Daily bump.
[official-gcc.git] / gcc / simplify-rtx.c
bloba060f1bbce01f4afb4ffa01ccc808c07975ba81c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2021 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "memmodel.h"
30 #include "optabs.h"
31 #include "emit-rtl.h"
32 #include "recog.h"
33 #include "diagnostic-core.h"
34 #include "varasm.h"
35 #include "flags.h"
36 #include "selftest.h"
37 #include "selftest-rtl.h"
38 #include "rtx-vector-builder.h"
39 #include "rtlanal.h"
41 /* Simplification and canonicalization of RTL. */
43 /* Much code operates on (low, high) pairs; the low value is an
44 unsigned wide int, the high value a signed wide int. We
45 occasionally need to sign extend from low to high as if low were a
46 signed wide int. */
47 #define HWI_SIGN_EXTEND(low) \
48 ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
50 static bool plus_minus_operand_p (const_rtx);
52 /* Negate I, which satisfies poly_int_rtx_p. MODE is the mode of I. */
54 static rtx
55 neg_poly_int_rtx (machine_mode mode, const_rtx i)
57 return immed_wide_int_const (-wi::to_poly_wide (i, mode), mode);
60 /* Test whether expression, X, is an immediate constant that represents
61 the most significant bit of machine mode MODE. */
63 bool
64 mode_signbit_p (machine_mode mode, const_rtx x)
66 unsigned HOST_WIDE_INT val;
67 unsigned int width;
68 scalar_int_mode int_mode;
70 if (!is_int_mode (mode, &int_mode))
71 return false;
73 width = GET_MODE_PRECISION (int_mode);
74 if (width == 0)
75 return false;
77 if (width <= HOST_BITS_PER_WIDE_INT
78 && CONST_INT_P (x))
79 val = INTVAL (x);
80 #if TARGET_SUPPORTS_WIDE_INT
81 else if (CONST_WIDE_INT_P (x))
83 unsigned int i;
84 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
85 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
86 return false;
87 for (i = 0; i < elts - 1; i++)
88 if (CONST_WIDE_INT_ELT (x, i) != 0)
89 return false;
90 val = CONST_WIDE_INT_ELT (x, elts - 1);
91 width %= HOST_BITS_PER_WIDE_INT;
92 if (width == 0)
93 width = HOST_BITS_PER_WIDE_INT;
95 #else
96 else if (width <= HOST_BITS_PER_DOUBLE_INT
97 && CONST_DOUBLE_AS_INT_P (x)
98 && CONST_DOUBLE_LOW (x) == 0)
100 val = CONST_DOUBLE_HIGH (x);
101 width -= HOST_BITS_PER_WIDE_INT;
103 #endif
104 else
105 /* X is not an integer constant. */
106 return false;
108 if (width < HOST_BITS_PER_WIDE_INT)
109 val &= (HOST_WIDE_INT_1U << width) - 1;
110 return val == (HOST_WIDE_INT_1U << (width - 1));
113 /* Test whether VAL is equal to the most significant bit of mode MODE
114 (after masking with the mode mask of MODE). Returns false if the
115 precision of MODE is too large to handle. */
117 bool
118 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
120 unsigned int width;
121 scalar_int_mode int_mode;
123 if (!is_int_mode (mode, &int_mode))
124 return false;
126 width = GET_MODE_PRECISION (int_mode);
127 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
128 return false;
130 val &= GET_MODE_MASK (int_mode);
131 return val == (HOST_WIDE_INT_1U << (width - 1));
134 /* Test whether the most significant bit of mode MODE is set in VAL.
135 Returns false if the precision of MODE is too large to handle. */
136 bool
137 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
139 unsigned int width;
141 scalar_int_mode int_mode;
142 if (!is_int_mode (mode, &int_mode))
143 return false;
145 width = GET_MODE_PRECISION (int_mode);
146 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
147 return false;
149 val &= HOST_WIDE_INT_1U << (width - 1);
150 return val != 0;
153 /* Test whether the most significant bit of mode MODE is clear in VAL.
154 Returns false if the precision of MODE is too large to handle. */
155 bool
156 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
158 unsigned int width;
160 scalar_int_mode int_mode;
161 if (!is_int_mode (mode, &int_mode))
162 return false;
164 width = GET_MODE_PRECISION (int_mode);
165 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
166 return false;
168 val &= HOST_WIDE_INT_1U << (width - 1);
169 return val == 0;
172 /* Make a binary operation by properly ordering the operands and
173 seeing if the expression folds. */
176 simplify_context::simplify_gen_binary (rtx_code code, machine_mode mode,
177 rtx op0, rtx op1)
179 rtx tem;
181 /* If this simplifies, do it. */
182 tem = simplify_binary_operation (code, mode, op0, op1);
183 if (tem)
184 return tem;
186 /* Put complex operands first and constants second if commutative. */
187 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
188 && swap_commutative_operands_p (op0, op1))
189 std::swap (op0, op1);
191 return gen_rtx_fmt_ee (code, mode, op0, op1);
194 /* If X is a MEM referencing the constant pool, return the real value.
195 Otherwise return X. */
197 avoid_constant_pool_reference (rtx x)
199 rtx c, tmp, addr;
200 machine_mode cmode;
201 poly_int64 offset = 0;
203 switch (GET_CODE (x))
205 case MEM:
206 break;
208 case FLOAT_EXTEND:
209 /* Handle float extensions of constant pool references. */
210 tmp = XEXP (x, 0);
211 c = avoid_constant_pool_reference (tmp);
212 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
213 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c),
214 GET_MODE (x));
215 return x;
217 default:
218 return x;
221 if (GET_MODE (x) == BLKmode)
222 return x;
224 addr = XEXP (x, 0);
226 /* Call target hook to avoid the effects of -fpic etc.... */
227 addr = targetm.delegitimize_address (addr);
229 /* Split the address into a base and integer offset. */
230 addr = strip_offset (addr, &offset);
232 if (GET_CODE (addr) == LO_SUM)
233 addr = XEXP (addr, 1);
235 /* If this is a constant pool reference, we can turn it into its
236 constant and hope that simplifications happen. */
237 if (GET_CODE (addr) == SYMBOL_REF
238 && CONSTANT_POOL_ADDRESS_P (addr))
240 c = get_pool_constant (addr);
241 cmode = get_pool_mode (addr);
243 /* If we're accessing the constant in a different mode than it was
244 originally stored, attempt to fix that up via subreg simplifications.
245 If that fails we have no choice but to return the original memory. */
246 if (known_eq (offset, 0) && cmode == GET_MODE (x))
247 return c;
248 else if (known_in_range_p (offset, 0, GET_MODE_SIZE (cmode)))
250 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
251 if (tem && CONSTANT_P (tem))
252 return tem;
256 return x;
259 /* Simplify a MEM based on its attributes. This is the default
260 delegitimize_address target hook, and it's recommended that every
261 overrider call it. */
264 delegitimize_mem_from_attrs (rtx x)
266 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
267 use their base addresses as equivalent. */
268 if (MEM_P (x)
269 && MEM_EXPR (x)
270 && MEM_OFFSET_KNOWN_P (x))
272 tree decl = MEM_EXPR (x);
273 machine_mode mode = GET_MODE (x);
274 poly_int64 offset = 0;
276 switch (TREE_CODE (decl))
278 default:
279 decl = NULL;
280 break;
282 case VAR_DECL:
283 break;
285 case ARRAY_REF:
286 case ARRAY_RANGE_REF:
287 case COMPONENT_REF:
288 case BIT_FIELD_REF:
289 case REALPART_EXPR:
290 case IMAGPART_EXPR:
291 case VIEW_CONVERT_EXPR:
293 poly_int64 bitsize, bitpos, bytepos, toffset_val = 0;
294 tree toffset;
295 int unsignedp, reversep, volatilep = 0;
297 decl
298 = get_inner_reference (decl, &bitsize, &bitpos, &toffset, &mode,
299 &unsignedp, &reversep, &volatilep);
300 if (maybe_ne (bitsize, GET_MODE_BITSIZE (mode))
301 || !multiple_p (bitpos, BITS_PER_UNIT, &bytepos)
302 || (toffset && !poly_int_tree_p (toffset, &toffset_val)))
303 decl = NULL;
304 else
305 offset += bytepos + toffset_val;
306 break;
310 if (decl
311 && mode == GET_MODE (x)
312 && VAR_P (decl)
313 && (TREE_STATIC (decl)
314 || DECL_THREAD_LOCAL_P (decl))
315 && DECL_RTL_SET_P (decl)
316 && MEM_P (DECL_RTL (decl)))
318 rtx newx;
320 offset += MEM_OFFSET (x);
322 newx = DECL_RTL (decl);
324 if (MEM_P (newx))
326 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
327 poly_int64 n_offset, o_offset;
329 /* Avoid creating a new MEM needlessly if we already had
330 the same address. We do if there's no OFFSET and the
331 old address X is identical to NEWX, or if X is of the
332 form (plus NEWX OFFSET), or the NEWX is of the form
333 (plus Y (const_int Z)) and X is that with the offset
334 added: (plus Y (const_int Z+OFFSET)). */
335 n = strip_offset (n, &n_offset);
336 o = strip_offset (o, &o_offset);
337 if (!(known_eq (o_offset, n_offset + offset)
338 && rtx_equal_p (o, n)))
339 x = adjust_address_nv (newx, mode, offset);
341 else if (GET_MODE (x) == GET_MODE (newx)
342 && known_eq (offset, 0))
343 x = newx;
347 return x;
350 /* Make a unary operation by first seeing if it folds and otherwise making
351 the specified operation. */
354 simplify_context::simplify_gen_unary (rtx_code code, machine_mode mode, rtx op,
355 machine_mode op_mode)
357 rtx tem;
359 /* If this simplifies, use it. */
360 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
361 return tem;
363 return gen_rtx_fmt_e (code, mode, op);
366 /* Likewise for ternary operations. */
369 simplify_context::simplify_gen_ternary (rtx_code code, machine_mode mode,
370 machine_mode op0_mode,
371 rtx op0, rtx op1, rtx op2)
373 rtx tem;
375 /* If this simplifies, use it. */
376 if ((tem = simplify_ternary_operation (code, mode, op0_mode,
377 op0, op1, op2)) != 0)
378 return tem;
380 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
383 /* Likewise, for relational operations.
384 CMP_MODE specifies mode comparison is done in. */
387 simplify_context::simplify_gen_relational (rtx_code code, machine_mode mode,
388 machine_mode cmp_mode,
389 rtx op0, rtx op1)
391 rtx tem;
393 if ((tem = simplify_relational_operation (code, mode, cmp_mode,
394 op0, op1)) != 0)
395 return tem;
397 return gen_rtx_fmt_ee (code, mode, op0, op1);
400 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
401 and simplify the result. If FN is non-NULL, call this callback on each
402 X, if it returns non-NULL, replace X with its return value and simplify the
403 result. */
406 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
407 rtx (*fn) (rtx, const_rtx, void *), void *data)
409 enum rtx_code code = GET_CODE (x);
410 machine_mode mode = GET_MODE (x);
411 machine_mode op_mode;
412 const char *fmt;
413 rtx op0, op1, op2, newx, op;
414 rtvec vec, newvec;
415 int i, j;
417 if (__builtin_expect (fn != NULL, 0))
419 newx = fn (x, old_rtx, data);
420 if (newx)
421 return newx;
423 else if (rtx_equal_p (x, old_rtx))
424 return copy_rtx ((rtx) data);
426 switch (GET_RTX_CLASS (code))
428 case RTX_UNARY:
429 op0 = XEXP (x, 0);
430 op_mode = GET_MODE (op0);
431 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
432 if (op0 == XEXP (x, 0))
433 return x;
434 return simplify_gen_unary (code, mode, op0, op_mode);
436 case RTX_BIN_ARITH:
437 case RTX_COMM_ARITH:
438 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
439 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
440 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
441 return x;
442 return simplify_gen_binary (code, mode, op0, op1);
444 case RTX_COMPARE:
445 case RTX_COMM_COMPARE:
446 op0 = XEXP (x, 0);
447 op1 = XEXP (x, 1);
448 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
449 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
450 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
451 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
452 return x;
453 return simplify_gen_relational (code, mode, op_mode, op0, op1);
455 case RTX_TERNARY:
456 case RTX_BITFIELD_OPS:
457 op0 = XEXP (x, 0);
458 op_mode = GET_MODE (op0);
459 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
460 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
461 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
462 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
463 return x;
464 if (op_mode == VOIDmode)
465 op_mode = GET_MODE (op0);
466 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
468 case RTX_EXTRA:
469 if (code == SUBREG)
471 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
472 if (op0 == SUBREG_REG (x))
473 return x;
474 op0 = simplify_gen_subreg (GET_MODE (x), op0,
475 GET_MODE (SUBREG_REG (x)),
476 SUBREG_BYTE (x));
477 return op0 ? op0 : x;
479 break;
481 case RTX_OBJ:
482 if (code == MEM)
484 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
485 if (op0 == XEXP (x, 0))
486 return x;
487 return replace_equiv_address_nv (x, op0);
489 else if (code == LO_SUM)
491 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
492 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
494 /* (lo_sum (high x) y) -> y where x and y have the same base. */
495 if (GET_CODE (op0) == HIGH)
497 rtx base0, base1, offset0, offset1;
498 split_const (XEXP (op0, 0), &base0, &offset0);
499 split_const (op1, &base1, &offset1);
500 if (rtx_equal_p (base0, base1))
501 return op1;
504 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
505 return x;
506 return gen_rtx_LO_SUM (mode, op0, op1);
508 break;
510 default:
511 break;
514 newx = x;
515 fmt = GET_RTX_FORMAT (code);
516 for (i = 0; fmt[i]; i++)
517 switch (fmt[i])
519 case 'E':
520 vec = XVEC (x, i);
521 newvec = XVEC (newx, i);
522 for (j = 0; j < GET_NUM_ELEM (vec); j++)
524 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
525 old_rtx, fn, data);
526 if (op != RTVEC_ELT (vec, j))
528 if (newvec == vec)
530 newvec = shallow_copy_rtvec (vec);
531 if (x == newx)
532 newx = shallow_copy_rtx (x);
533 XVEC (newx, i) = newvec;
535 RTVEC_ELT (newvec, j) = op;
538 break;
540 case 'e':
541 if (XEXP (x, i))
543 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
544 if (op != XEXP (x, i))
546 if (x == newx)
547 newx = shallow_copy_rtx (x);
548 XEXP (newx, i) = op;
551 break;
553 return newx;
556 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
557 resulting RTX. Return a new RTX which is as simplified as possible. */
560 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
562 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
565 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
566 Only handle cases where the truncated value is inherently an rvalue.
568 RTL provides two ways of truncating a value:
570 1. a lowpart subreg. This form is only a truncation when both
571 the outer and inner modes (here MODE and OP_MODE respectively)
572 are scalar integers, and only then when the subreg is used as
573 an rvalue.
575 It is only valid to form such truncating subregs if the
576 truncation requires no action by the target. The onus for
577 proving this is on the creator of the subreg -- e.g. the
578 caller to simplify_subreg or simplify_gen_subreg -- and typically
579 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
581 2. a TRUNCATE. This form handles both scalar and compound integers.
583 The first form is preferred where valid. However, the TRUNCATE
584 handling in simplify_unary_operation turns the second form into the
585 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
586 so it is generally safe to form rvalue truncations using:
588 simplify_gen_unary (TRUNCATE, ...)
590 and leave simplify_unary_operation to work out which representation
591 should be used.
593 Because of the proof requirements on (1), simplify_truncation must
594 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
595 regardless of whether the outer truncation came from a SUBREG or a
596 TRUNCATE. For example, if the caller has proven that an SImode
597 truncation of:
599 (and:DI X Y)
601 is a no-op and can be represented as a subreg, it does not follow
602 that SImode truncations of X and Y are also no-ops. On a target
603 like 64-bit MIPS that requires SImode values to be stored in
604 sign-extended form, an SImode truncation of:
606 (and:DI (reg:DI X) (const_int 63))
608 is trivially a no-op because only the lower 6 bits can be set.
609 However, X is still an arbitrary 64-bit number and so we cannot
610 assume that truncating it too is a no-op. */
613 simplify_context::simplify_truncation (machine_mode mode, rtx op,
614 machine_mode op_mode)
616 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
617 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
618 scalar_int_mode int_mode, int_op_mode, subreg_mode;
620 gcc_assert (precision <= op_precision);
622 /* Optimize truncations of zero and sign extended values. */
623 if (GET_CODE (op) == ZERO_EXTEND
624 || GET_CODE (op) == SIGN_EXTEND)
626 /* There are three possibilities. If MODE is the same as the
627 origmode, we can omit both the extension and the subreg.
628 If MODE is not larger than the origmode, we can apply the
629 truncation without the extension. Finally, if the outermode
630 is larger than the origmode, we can just extend to the appropriate
631 mode. */
632 machine_mode origmode = GET_MODE (XEXP (op, 0));
633 if (mode == origmode)
634 return XEXP (op, 0);
635 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
636 return simplify_gen_unary (TRUNCATE, mode,
637 XEXP (op, 0), origmode);
638 else
639 return simplify_gen_unary (GET_CODE (op), mode,
640 XEXP (op, 0), origmode);
643 /* If the machine can perform operations in the truncated mode, distribute
644 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
645 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
646 if (1
647 && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
648 && (GET_CODE (op) == PLUS
649 || GET_CODE (op) == MINUS
650 || GET_CODE (op) == MULT))
652 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
653 if (op0)
655 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
656 if (op1)
657 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
661 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
662 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
663 the outer subreg is effectively a truncation to the original mode. */
664 if ((GET_CODE (op) == LSHIFTRT
665 || GET_CODE (op) == ASHIFTRT)
666 /* Ensure that OP_MODE is at least twice as wide as MODE
667 to avoid the possibility that an outer LSHIFTRT shifts by more
668 than the sign extension's sign_bit_copies and introduces zeros
669 into the high bits of the result. */
670 && 2 * precision <= op_precision
671 && CONST_INT_P (XEXP (op, 1))
672 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
673 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
674 && UINTVAL (XEXP (op, 1)) < precision)
675 return simplify_gen_binary (ASHIFTRT, mode,
676 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
678 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
679 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
680 the outer subreg is effectively a truncation to the original mode. */
681 if ((GET_CODE (op) == LSHIFTRT
682 || GET_CODE (op) == ASHIFTRT)
683 && CONST_INT_P (XEXP (op, 1))
684 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
685 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
686 && UINTVAL (XEXP (op, 1)) < precision)
687 return simplify_gen_binary (LSHIFTRT, mode,
688 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
690 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
691 to (ashift:QI (x:QI) C), where C is a suitable small constant and
692 the outer subreg is effectively a truncation to the original mode. */
693 if (GET_CODE (op) == ASHIFT
694 && CONST_INT_P (XEXP (op, 1))
695 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
696 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
697 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
698 && UINTVAL (XEXP (op, 1)) < precision)
699 return simplify_gen_binary (ASHIFT, mode,
700 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
702 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
703 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
704 and C2. */
705 if (GET_CODE (op) == AND
706 && (GET_CODE (XEXP (op, 0)) == LSHIFTRT
707 || GET_CODE (XEXP (op, 0)) == ASHIFTRT)
708 && CONST_INT_P (XEXP (XEXP (op, 0), 1))
709 && CONST_INT_P (XEXP (op, 1)))
711 rtx op0 = (XEXP (XEXP (op, 0), 0));
712 rtx shift_op = XEXP (XEXP (op, 0), 1);
713 rtx mask_op = XEXP (op, 1);
714 unsigned HOST_WIDE_INT shift = UINTVAL (shift_op);
715 unsigned HOST_WIDE_INT mask = UINTVAL (mask_op);
717 if (shift < precision
718 /* If doing this transform works for an X with all bits set,
719 it works for any X. */
720 && ((GET_MODE_MASK (mode) >> shift) & mask)
721 == ((GET_MODE_MASK (op_mode) >> shift) & mask)
722 && (op0 = simplify_gen_unary (TRUNCATE, mode, op0, op_mode))
723 && (op0 = simplify_gen_binary (LSHIFTRT, mode, op0, shift_op)))
725 mask_op = GEN_INT (trunc_int_for_mode (mask, mode));
726 return simplify_gen_binary (AND, mode, op0, mask_op);
730 /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
731 (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
732 changing len. */
733 if ((GET_CODE (op) == ZERO_EXTRACT || GET_CODE (op) == SIGN_EXTRACT)
734 && REG_P (XEXP (op, 0))
735 && GET_MODE (XEXP (op, 0)) == GET_MODE (op)
736 && CONST_INT_P (XEXP (op, 1))
737 && CONST_INT_P (XEXP (op, 2)))
739 rtx op0 = XEXP (op, 0);
740 unsigned HOST_WIDE_INT len = UINTVAL (XEXP (op, 1));
741 unsigned HOST_WIDE_INT pos = UINTVAL (XEXP (op, 2));
742 if (BITS_BIG_ENDIAN && pos >= op_precision - precision)
744 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
745 if (op0)
747 pos -= op_precision - precision;
748 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
749 XEXP (op, 1), GEN_INT (pos));
752 else if (!BITS_BIG_ENDIAN && precision >= len + pos)
754 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
755 if (op0)
756 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
757 XEXP (op, 1), XEXP (op, 2));
761 /* Recognize a word extraction from a multi-word subreg. */
762 if ((GET_CODE (op) == LSHIFTRT
763 || GET_CODE (op) == ASHIFTRT)
764 && SCALAR_INT_MODE_P (mode)
765 && SCALAR_INT_MODE_P (op_mode)
766 && precision >= BITS_PER_WORD
767 && 2 * precision <= op_precision
768 && CONST_INT_P (XEXP (op, 1))
769 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
770 && UINTVAL (XEXP (op, 1)) < op_precision)
772 poly_int64 byte = subreg_lowpart_offset (mode, op_mode);
773 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
774 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
775 (WORDS_BIG_ENDIAN
776 ? byte - shifted_bytes
777 : byte + shifted_bytes));
780 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
781 and try replacing the TRUNCATE and shift with it. Don't do this
782 if the MEM has a mode-dependent address. */
783 if ((GET_CODE (op) == LSHIFTRT
784 || GET_CODE (op) == ASHIFTRT)
785 && is_a <scalar_int_mode> (mode, &int_mode)
786 && is_a <scalar_int_mode> (op_mode, &int_op_mode)
787 && MEM_P (XEXP (op, 0))
788 && CONST_INT_P (XEXP (op, 1))
789 && INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (int_mode) == 0
790 && INTVAL (XEXP (op, 1)) > 0
791 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (int_op_mode)
792 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
793 MEM_ADDR_SPACE (XEXP (op, 0)))
794 && ! MEM_VOLATILE_P (XEXP (op, 0))
795 && (GET_MODE_SIZE (int_mode) >= UNITS_PER_WORD
796 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
798 poly_int64 byte = subreg_lowpart_offset (int_mode, int_op_mode);
799 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
800 return adjust_address_nv (XEXP (op, 0), int_mode,
801 (WORDS_BIG_ENDIAN
802 ? byte - shifted_bytes
803 : byte + shifted_bytes));
806 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
807 (OP:SI foo:SI) if OP is NEG or ABS. */
808 if ((GET_CODE (op) == ABS
809 || GET_CODE (op) == NEG)
810 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
811 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
812 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
813 return simplify_gen_unary (GET_CODE (op), mode,
814 XEXP (XEXP (op, 0), 0), mode);
816 /* Simplifications of (truncate:A (subreg:B X 0)). */
817 if (GET_CODE (op) == SUBREG
818 && is_a <scalar_int_mode> (mode, &int_mode)
819 && SCALAR_INT_MODE_P (op_mode)
820 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &subreg_mode)
821 && subreg_lowpart_p (op))
823 /* (truncate:A (subreg:B (truncate:C X) 0)) is (truncate:A X). */
824 if (GET_CODE (SUBREG_REG (op)) == TRUNCATE)
826 rtx inner = XEXP (SUBREG_REG (op), 0);
827 if (GET_MODE_PRECISION (int_mode)
828 <= GET_MODE_PRECISION (subreg_mode))
829 return simplify_gen_unary (TRUNCATE, int_mode, inner,
830 GET_MODE (inner));
831 else
832 /* If subreg above is paradoxical and C is narrower
833 than A, return (subreg:A (truncate:C X) 0). */
834 return simplify_gen_subreg (int_mode, SUBREG_REG (op),
835 subreg_mode, 0);
838 /* Simplifications of (truncate:A (subreg:B X:C 0)) with
839 paradoxical subregs (B is wider than C). */
840 if (is_a <scalar_int_mode> (op_mode, &int_op_mode))
842 unsigned int int_op_prec = GET_MODE_PRECISION (int_op_mode);
843 unsigned int subreg_prec = GET_MODE_PRECISION (subreg_mode);
844 if (int_op_prec > subreg_prec)
846 if (int_mode == subreg_mode)
847 return SUBREG_REG (op);
848 if (GET_MODE_PRECISION (int_mode) < subreg_prec)
849 return simplify_gen_unary (TRUNCATE, int_mode,
850 SUBREG_REG (op), subreg_mode);
852 /* Simplification of (truncate:A (subreg:B X:C 0)) where
853 A is narrower than B and B is narrower than C. */
854 else if (int_op_prec < subreg_prec
855 && GET_MODE_PRECISION (int_mode) < int_op_prec)
856 return simplify_gen_unary (TRUNCATE, int_mode,
857 SUBREG_REG (op), subreg_mode);
861 /* (truncate:A (truncate:B X)) is (truncate:A X). */
862 if (GET_CODE (op) == TRUNCATE)
863 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
864 GET_MODE (XEXP (op, 0)));
866 /* (truncate:A (ior X C)) is (const_int -1) if C is equal to that already,
867 in mode A. */
868 if (GET_CODE (op) == IOR
869 && SCALAR_INT_MODE_P (mode)
870 && SCALAR_INT_MODE_P (op_mode)
871 && CONST_INT_P (XEXP (op, 1))
872 && trunc_int_for_mode (INTVAL (XEXP (op, 1)), mode) == -1)
873 return constm1_rtx;
875 return NULL_RTX;
878 /* Try to simplify a unary operation CODE whose output mode is to be
879 MODE with input operand OP whose mode was originally OP_MODE.
880 Return zero if no simplification can be made. */
882 simplify_context::simplify_unary_operation (rtx_code code, machine_mode mode,
883 rtx op, machine_mode op_mode)
885 rtx trueop, tem;
887 trueop = avoid_constant_pool_reference (op);
889 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
890 if (tem)
891 return tem;
893 return simplify_unary_operation_1 (code, mode, op);
896 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
897 to be exact. */
899 static bool
900 exact_int_to_float_conversion_p (const_rtx op)
902 int out_bits = significand_size (GET_MODE_INNER (GET_MODE (op)));
903 machine_mode op0_mode = GET_MODE (XEXP (op, 0));
904 /* Constants shouldn't reach here. */
905 gcc_assert (op0_mode != VOIDmode);
906 int in_prec = GET_MODE_UNIT_PRECISION (op0_mode);
907 int in_bits = in_prec;
908 if (HWI_COMPUTABLE_MODE_P (op0_mode))
910 unsigned HOST_WIDE_INT nonzero = nonzero_bits (XEXP (op, 0), op0_mode);
911 if (GET_CODE (op) == FLOAT)
912 in_bits -= num_sign_bit_copies (XEXP (op, 0), op0_mode);
913 else if (GET_CODE (op) == UNSIGNED_FLOAT)
914 in_bits = wi::min_precision (wi::uhwi (nonzero, in_prec), UNSIGNED);
915 else
916 gcc_unreachable ();
917 in_bits -= wi::ctz (wi::uhwi (nonzero, in_prec));
919 return in_bits <= out_bits;
922 /* Perform some simplifications we can do even if the operands
923 aren't constant. */
925 simplify_context::simplify_unary_operation_1 (rtx_code code, machine_mode mode,
926 rtx op)
928 enum rtx_code reversed;
929 rtx temp, elt, base, step;
930 scalar_int_mode inner, int_mode, op_mode, op0_mode;
932 switch (code)
934 case NOT:
935 /* (not (not X)) == X. */
936 if (GET_CODE (op) == NOT)
937 return XEXP (op, 0);
939 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
940 comparison is all ones. */
941 if (COMPARISON_P (op)
942 && (mode == BImode || STORE_FLAG_VALUE == -1)
943 && ((reversed = reversed_comparison_code (op, NULL)) != UNKNOWN))
944 return simplify_gen_relational (reversed, mode, VOIDmode,
945 XEXP (op, 0), XEXP (op, 1));
947 /* (not (plus X -1)) can become (neg X). */
948 if (GET_CODE (op) == PLUS
949 && XEXP (op, 1) == constm1_rtx)
950 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
952 /* Similarly, (not (neg X)) is (plus X -1). Only do this for
953 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
954 and MODE_VECTOR_INT. */
955 if (GET_CODE (op) == NEG && CONSTM1_RTX (mode))
956 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
957 CONSTM1_RTX (mode));
959 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
960 if (GET_CODE (op) == XOR
961 && CONST_INT_P (XEXP (op, 1))
962 && (temp = simplify_unary_operation (NOT, mode,
963 XEXP (op, 1), mode)) != 0)
964 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
966 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
967 if (GET_CODE (op) == PLUS
968 && CONST_INT_P (XEXP (op, 1))
969 && mode_signbit_p (mode, XEXP (op, 1))
970 && (temp = simplify_unary_operation (NOT, mode,
971 XEXP (op, 1), mode)) != 0)
972 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
975 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
976 operands other than 1, but that is not valid. We could do a
977 similar simplification for (not (lshiftrt C X)) where C is
978 just the sign bit, but this doesn't seem common enough to
979 bother with. */
980 if (GET_CODE (op) == ASHIFT
981 && XEXP (op, 0) == const1_rtx)
983 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
984 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
987 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
988 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
989 so we can perform the above simplification. */
990 if (STORE_FLAG_VALUE == -1
991 && is_a <scalar_int_mode> (mode, &int_mode)
992 && GET_CODE (op) == ASHIFTRT
993 && CONST_INT_P (XEXP (op, 1))
994 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (int_mode) - 1)
995 return simplify_gen_relational (GE, int_mode, VOIDmode,
996 XEXP (op, 0), const0_rtx);
999 if (partial_subreg_p (op)
1000 && subreg_lowpart_p (op)
1001 && GET_CODE (SUBREG_REG (op)) == ASHIFT
1002 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
1004 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
1005 rtx x;
1007 x = gen_rtx_ROTATE (inner_mode,
1008 simplify_gen_unary (NOT, inner_mode, const1_rtx,
1009 inner_mode),
1010 XEXP (SUBREG_REG (op), 1));
1011 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
1012 if (temp)
1013 return temp;
1016 /* Apply De Morgan's laws to reduce number of patterns for machines
1017 with negating logical insns (and-not, nand, etc.). If result has
1018 only one NOT, put it first, since that is how the patterns are
1019 coded. */
1020 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
1022 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
1023 machine_mode op_mode;
1025 op_mode = GET_MODE (in1);
1026 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
1028 op_mode = GET_MODE (in2);
1029 if (op_mode == VOIDmode)
1030 op_mode = mode;
1031 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
1033 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
1034 std::swap (in1, in2);
1036 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
1037 mode, in1, in2);
1040 /* (not (bswap x)) -> (bswap (not x)). */
1041 if (GET_CODE (op) == BSWAP)
1043 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1044 return simplify_gen_unary (BSWAP, mode, x, mode);
1046 break;
1048 case NEG:
1049 /* (neg (neg X)) == X. */
1050 if (GET_CODE (op) == NEG)
1051 return XEXP (op, 0);
1053 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1054 If comparison is not reversible use
1055 x ? y : (neg y). */
1056 if (GET_CODE (op) == IF_THEN_ELSE)
1058 rtx cond = XEXP (op, 0);
1059 rtx true_rtx = XEXP (op, 1);
1060 rtx false_rtx = XEXP (op, 2);
1062 if ((GET_CODE (true_rtx) == NEG
1063 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
1064 || (GET_CODE (false_rtx) == NEG
1065 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)))
1067 if (reversed_comparison_code (cond, NULL) != UNKNOWN)
1068 temp = reversed_comparison (cond, mode);
1069 else
1071 temp = cond;
1072 std::swap (true_rtx, false_rtx);
1074 return simplify_gen_ternary (IF_THEN_ELSE, mode,
1075 mode, temp, true_rtx, false_rtx);
1079 /* (neg (plus X 1)) can become (not X). */
1080 if (GET_CODE (op) == PLUS
1081 && XEXP (op, 1) == const1_rtx)
1082 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1084 /* Similarly, (neg (not X)) is (plus X 1). */
1085 if (GET_CODE (op) == NOT)
1086 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
1087 CONST1_RTX (mode));
1089 /* (neg (minus X Y)) can become (minus Y X). This transformation
1090 isn't safe for modes with signed zeros, since if X and Y are
1091 both +0, (minus Y X) is the same as (minus X Y). If the
1092 rounding mode is towards +infinity (or -infinity) then the two
1093 expressions will be rounded differently. */
1094 if (GET_CODE (op) == MINUS
1095 && !HONOR_SIGNED_ZEROS (mode)
1096 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1097 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
1099 if (GET_CODE (op) == PLUS
1100 && !HONOR_SIGNED_ZEROS (mode)
1101 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1103 /* (neg (plus A C)) is simplified to (minus -C A). */
1104 if (CONST_SCALAR_INT_P (XEXP (op, 1))
1105 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
1107 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1108 if (temp)
1109 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1112 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1113 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1114 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1117 /* (neg (mult A B)) becomes (mult A (neg B)).
1118 This works even for floating-point values. */
1119 if (GET_CODE (op) == MULT
1120 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1122 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1123 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1126 /* NEG commutes with ASHIFT since it is multiplication. Only do
1127 this if we can then eliminate the NEG (e.g., if the operand
1128 is a constant). */
1129 if (GET_CODE (op) == ASHIFT)
1131 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1132 if (temp)
1133 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1136 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1137 C is equal to the width of MODE minus 1. */
1138 if (GET_CODE (op) == ASHIFTRT
1139 && CONST_INT_P (XEXP (op, 1))
1140 && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
1141 return simplify_gen_binary (LSHIFTRT, mode,
1142 XEXP (op, 0), XEXP (op, 1));
1144 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1145 C is equal to the width of MODE minus 1. */
1146 if (GET_CODE (op) == LSHIFTRT
1147 && CONST_INT_P (XEXP (op, 1))
1148 && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
1149 return simplify_gen_binary (ASHIFTRT, mode,
1150 XEXP (op, 0), XEXP (op, 1));
1152 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1153 if (GET_CODE (op) == XOR
1154 && XEXP (op, 1) == const1_rtx
1155 && nonzero_bits (XEXP (op, 0), mode) == 1)
1156 return plus_constant (mode, XEXP (op, 0), -1);
1158 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1159 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1160 if (GET_CODE (op) == LT
1161 && XEXP (op, 1) == const0_rtx
1162 && is_a <scalar_int_mode> (GET_MODE (XEXP (op, 0)), &inner))
1164 int_mode = as_a <scalar_int_mode> (mode);
1165 int isize = GET_MODE_PRECISION (inner);
1166 if (STORE_FLAG_VALUE == 1)
1168 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1169 gen_int_shift_amount (inner,
1170 isize - 1));
1171 if (int_mode == inner)
1172 return temp;
1173 if (GET_MODE_PRECISION (int_mode) > isize)
1174 return simplify_gen_unary (SIGN_EXTEND, int_mode, temp, inner);
1175 return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
1177 else if (STORE_FLAG_VALUE == -1)
1179 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1180 gen_int_shift_amount (inner,
1181 isize - 1));
1182 if (int_mode == inner)
1183 return temp;
1184 if (GET_MODE_PRECISION (int_mode) > isize)
1185 return simplify_gen_unary (ZERO_EXTEND, int_mode, temp, inner);
1186 return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
1190 if (vec_series_p (op, &base, &step))
1192 /* Only create a new series if we can simplify both parts. In other
1193 cases this isn't really a simplification, and it's not necessarily
1194 a win to replace a vector operation with a scalar operation. */
1195 scalar_mode inner_mode = GET_MODE_INNER (mode);
1196 base = simplify_unary_operation (NEG, inner_mode, base, inner_mode);
1197 if (base)
1199 step = simplify_unary_operation (NEG, inner_mode,
1200 step, inner_mode);
1201 if (step)
1202 return gen_vec_series (mode, base, step);
1205 break;
1207 case TRUNCATE:
1208 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1209 with the umulXi3_highpart patterns. */
1210 if (GET_CODE (op) == LSHIFTRT
1211 && GET_CODE (XEXP (op, 0)) == MULT)
1212 break;
1214 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1216 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1218 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1219 if (temp)
1220 return temp;
1222 /* We can't handle truncation to a partial integer mode here
1223 because we don't know the real bitsize of the partial
1224 integer mode. */
1225 break;
1228 if (GET_MODE (op) != VOIDmode)
1230 temp = simplify_truncation (mode, op, GET_MODE (op));
1231 if (temp)
1232 return temp;
1235 /* If we know that the value is already truncated, we can
1236 replace the TRUNCATE with a SUBREG. */
1237 if (known_eq (GET_MODE_NUNITS (mode), 1)
1238 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1239 || truncated_to_mode (mode, op)))
1241 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1242 if (temp)
1243 return temp;
1246 /* A truncate of a comparison can be replaced with a subreg if
1247 STORE_FLAG_VALUE permits. This is like the previous test,
1248 but it works even if the comparison is done in a mode larger
1249 than HOST_BITS_PER_WIDE_INT. */
1250 if (HWI_COMPUTABLE_MODE_P (mode)
1251 && COMPARISON_P (op)
1252 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0
1253 && TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1255 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1256 if (temp)
1257 return temp;
1260 /* A truncate of a memory is just loading the low part of the memory
1261 if we are not changing the meaning of the address. */
1262 if (GET_CODE (op) == MEM
1263 && !VECTOR_MODE_P (mode)
1264 && !MEM_VOLATILE_P (op)
1265 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1267 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1268 if (temp)
1269 return temp;
1272 /* Check for useless truncation. */
1273 if (GET_MODE (op) == mode)
1274 return op;
1275 break;
1277 case FLOAT_TRUNCATE:
1278 /* Check for useless truncation. */
1279 if (GET_MODE (op) == mode)
1280 return op;
1282 if (DECIMAL_FLOAT_MODE_P (mode))
1283 break;
1285 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1286 if (GET_CODE (op) == FLOAT_EXTEND
1287 && GET_MODE (XEXP (op, 0)) == mode)
1288 return XEXP (op, 0);
1290 /* (float_truncate:SF (float_truncate:DF foo:XF))
1291 = (float_truncate:SF foo:XF).
1292 This may eliminate double rounding, so it is unsafe.
1294 (float_truncate:SF (float_extend:XF foo:DF))
1295 = (float_truncate:SF foo:DF).
1297 (float_truncate:DF (float_extend:XF foo:SF))
1298 = (float_extend:DF foo:SF). */
1299 if ((GET_CODE (op) == FLOAT_TRUNCATE
1300 && flag_unsafe_math_optimizations)
1301 || GET_CODE (op) == FLOAT_EXTEND)
1302 return simplify_gen_unary (GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)))
1303 > GET_MODE_UNIT_SIZE (mode)
1304 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1305 mode,
1306 XEXP (op, 0), mode);
1308 /* (float_truncate (float x)) is (float x) */
1309 if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1310 && (flag_unsafe_math_optimizations
1311 || exact_int_to_float_conversion_p (op)))
1312 return simplify_gen_unary (GET_CODE (op), mode,
1313 XEXP (op, 0),
1314 GET_MODE (XEXP (op, 0)));
1316 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1317 (OP:SF foo:SF) if OP is NEG or ABS. */
1318 if ((GET_CODE (op) == ABS
1319 || GET_CODE (op) == NEG)
1320 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1321 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1322 return simplify_gen_unary (GET_CODE (op), mode,
1323 XEXP (XEXP (op, 0), 0), mode);
1325 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1326 is (float_truncate:SF x). */
1327 if (GET_CODE (op) == SUBREG
1328 && subreg_lowpart_p (op)
1329 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1330 return SUBREG_REG (op);
1331 break;
1333 case FLOAT_EXTEND:
1334 /* Check for useless extension. */
1335 if (GET_MODE (op) == mode)
1336 return op;
1338 if (DECIMAL_FLOAT_MODE_P (mode))
1339 break;
1341 /* (float_extend (float_extend x)) is (float_extend x)
1343 (float_extend (float x)) is (float x) assuming that double
1344 rounding can't happen.
1346 if (GET_CODE (op) == FLOAT_EXTEND
1347 || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1348 && exact_int_to_float_conversion_p (op)))
1349 return simplify_gen_unary (GET_CODE (op), mode,
1350 XEXP (op, 0),
1351 GET_MODE (XEXP (op, 0)));
1353 break;
1355 case ABS:
1356 /* (abs (neg <foo>)) -> (abs <foo>) */
1357 if (GET_CODE (op) == NEG)
1358 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1359 GET_MODE (XEXP (op, 0)));
1361 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1362 do nothing. */
1363 if (GET_MODE (op) == VOIDmode)
1364 break;
1366 /* If operand is something known to be positive, ignore the ABS. */
1367 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1368 || val_signbit_known_clear_p (GET_MODE (op),
1369 nonzero_bits (op, GET_MODE (op))))
1370 return op;
1372 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1373 if (is_a <scalar_int_mode> (mode, &int_mode)
1374 && (num_sign_bit_copies (op, int_mode)
1375 == GET_MODE_PRECISION (int_mode)))
1376 return gen_rtx_NEG (int_mode, op);
1378 break;
1380 case FFS:
1381 /* (ffs (*_extend <X>)) = (ffs <X>) */
1382 if (GET_CODE (op) == SIGN_EXTEND
1383 || GET_CODE (op) == ZERO_EXTEND)
1384 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1385 GET_MODE (XEXP (op, 0)));
1386 break;
1388 case POPCOUNT:
1389 switch (GET_CODE (op))
1391 case BSWAP:
1392 case ZERO_EXTEND:
1393 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1394 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1395 GET_MODE (XEXP (op, 0)));
1397 case ROTATE:
1398 case ROTATERT:
1399 /* Rotations don't affect popcount. */
1400 if (!side_effects_p (XEXP (op, 1)))
1401 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1402 GET_MODE (XEXP (op, 0)));
1403 break;
1405 default:
1406 break;
1408 break;
1410 case PARITY:
1411 switch (GET_CODE (op))
1413 case NOT:
1414 case BSWAP:
1415 case ZERO_EXTEND:
1416 case SIGN_EXTEND:
1417 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1418 GET_MODE (XEXP (op, 0)));
1420 case ROTATE:
1421 case ROTATERT:
1422 /* Rotations don't affect parity. */
1423 if (!side_effects_p (XEXP (op, 1)))
1424 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1425 GET_MODE (XEXP (op, 0)));
1426 break;
1428 case PARITY:
1429 /* (parity (parity x)) -> parity (x). */
1430 return op;
1432 default:
1433 break;
1435 break;
1437 case BSWAP:
1438 /* (bswap (bswap x)) -> x. */
1439 if (GET_CODE (op) == BSWAP)
1440 return XEXP (op, 0);
1441 break;
1443 case FLOAT:
1444 /* (float (sign_extend <X>)) = (float <X>). */
1445 if (GET_CODE (op) == SIGN_EXTEND)
1446 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1447 GET_MODE (XEXP (op, 0)));
1448 break;
1450 case SIGN_EXTEND:
1451 /* Check for useless extension. */
1452 if (GET_MODE (op) == mode)
1453 return op;
1455 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1456 becomes just the MINUS if its mode is MODE. This allows
1457 folding switch statements on machines using casesi (such as
1458 the VAX). */
1459 if (GET_CODE (op) == TRUNCATE
1460 && GET_MODE (XEXP (op, 0)) == mode
1461 && GET_CODE (XEXP (op, 0)) == MINUS
1462 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1463 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1464 return XEXP (op, 0);
1466 /* Extending a widening multiplication should be canonicalized to
1467 a wider widening multiplication. */
1468 if (GET_CODE (op) == MULT)
1470 rtx lhs = XEXP (op, 0);
1471 rtx rhs = XEXP (op, 1);
1472 enum rtx_code lcode = GET_CODE (lhs);
1473 enum rtx_code rcode = GET_CODE (rhs);
1475 /* Widening multiplies usually extend both operands, but sometimes
1476 they use a shift to extract a portion of a register. */
1477 if ((lcode == SIGN_EXTEND
1478 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1479 && (rcode == SIGN_EXTEND
1480 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1482 machine_mode lmode = GET_MODE (lhs);
1483 machine_mode rmode = GET_MODE (rhs);
1484 int bits;
1486 if (lcode == ASHIFTRT)
1487 /* Number of bits not shifted off the end. */
1488 bits = (GET_MODE_UNIT_PRECISION (lmode)
1489 - INTVAL (XEXP (lhs, 1)));
1490 else /* lcode == SIGN_EXTEND */
1491 /* Size of inner mode. */
1492 bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0)));
1494 if (rcode == ASHIFTRT)
1495 bits += (GET_MODE_UNIT_PRECISION (rmode)
1496 - INTVAL (XEXP (rhs, 1)));
1497 else /* rcode == SIGN_EXTEND */
1498 bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0)));
1500 /* We can only widen multiplies if the result is mathematiclly
1501 equivalent. I.e. if overflow was impossible. */
1502 if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op)))
1503 return simplify_gen_binary
1504 (MULT, mode,
1505 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1506 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1510 /* Check for a sign extension of a subreg of a promoted
1511 variable, where the promotion is sign-extended, and the
1512 target mode is the same as the variable's promotion. */
1513 if (GET_CODE (op) == SUBREG
1514 && SUBREG_PROMOTED_VAR_P (op)
1515 && SUBREG_PROMOTED_SIGNED_P (op))
1517 rtx subreg = SUBREG_REG (op);
1518 machine_mode subreg_mode = GET_MODE (subreg);
1519 if (!paradoxical_subreg_p (mode, subreg_mode))
1521 temp = rtl_hooks.gen_lowpart_no_emit (mode, subreg);
1522 if (temp)
1524 /* Preserve SUBREG_PROMOTED_VAR_P. */
1525 if (partial_subreg_p (temp))
1527 SUBREG_PROMOTED_VAR_P (temp) = 1;
1528 SUBREG_PROMOTED_SET (temp, 1);
1530 return temp;
1533 else
1534 /* Sign-extending a sign-extended subreg. */
1535 return simplify_gen_unary (SIGN_EXTEND, mode,
1536 subreg, subreg_mode);
1539 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1540 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1541 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1543 gcc_assert (GET_MODE_UNIT_PRECISION (mode)
1544 > GET_MODE_UNIT_PRECISION (GET_MODE (op)));
1545 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1546 GET_MODE (XEXP (op, 0)));
1549 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1550 is (sign_extend:M (subreg:O <X>)) if there is mode with
1551 GET_MODE_BITSIZE (N) - I bits.
1552 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1553 is similarly (zero_extend:M (subreg:O <X>)). */
1554 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1555 && GET_CODE (XEXP (op, 0)) == ASHIFT
1556 && is_a <scalar_int_mode> (mode, &int_mode)
1557 && CONST_INT_P (XEXP (op, 1))
1558 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1559 && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
1560 GET_MODE_PRECISION (op_mode) > INTVAL (XEXP (op, 1))))
1562 scalar_int_mode tmode;
1563 gcc_assert (GET_MODE_PRECISION (int_mode)
1564 > GET_MODE_PRECISION (op_mode));
1565 if (int_mode_for_size (GET_MODE_PRECISION (op_mode)
1566 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1568 rtx inner =
1569 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1570 if (inner)
1571 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1572 ? SIGN_EXTEND : ZERO_EXTEND,
1573 int_mode, inner, tmode);
1577 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1578 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1579 if (GET_CODE (op) == LSHIFTRT
1580 && CONST_INT_P (XEXP (op, 1))
1581 && XEXP (op, 1) != const0_rtx)
1582 return simplify_gen_unary (ZERO_EXTEND, mode, op, GET_MODE (op));
1584 /* (sign_extend:M (truncate:N (lshiftrt:O <X> (const_int I)))) where
1585 I is GET_MODE_PRECISION(O) - GET_MODE_PRECISION(N), simplifies to
1586 (ashiftrt:M <X> (const_int I)) if modes M and O are the same, and
1587 (truncate:M (ashiftrt:O <X> (const_int I))) if M is narrower than
1588 O, and (sign_extend:M (ashiftrt:O <X> (const_int I))) if M is
1589 wider than O. */
1590 if (GET_CODE (op) == TRUNCATE
1591 && GET_CODE (XEXP (op, 0)) == LSHIFTRT
1592 && CONST_INT_P (XEXP (XEXP (op, 0), 1)))
1594 scalar_int_mode m_mode, n_mode, o_mode;
1595 rtx old_shift = XEXP (op, 0);
1596 if (is_a <scalar_int_mode> (mode, &m_mode)
1597 && is_a <scalar_int_mode> (GET_MODE (op), &n_mode)
1598 && is_a <scalar_int_mode> (GET_MODE (old_shift), &o_mode)
1599 && GET_MODE_PRECISION (o_mode) - GET_MODE_PRECISION (n_mode)
1600 == INTVAL (XEXP (old_shift, 1)))
1602 rtx new_shift = simplify_gen_binary (ASHIFTRT,
1603 GET_MODE (old_shift),
1604 XEXP (old_shift, 0),
1605 XEXP (old_shift, 1));
1606 if (GET_MODE_PRECISION (m_mode) > GET_MODE_PRECISION (o_mode))
1607 return simplify_gen_unary (SIGN_EXTEND, mode, new_shift,
1608 GET_MODE (new_shift));
1609 if (mode != GET_MODE (new_shift))
1610 return simplify_gen_unary (TRUNCATE, mode, new_shift,
1611 GET_MODE (new_shift));
1612 return new_shift;
1616 #if defined(POINTERS_EXTEND_UNSIGNED)
1617 /* As we do not know which address space the pointer is referring to,
1618 we can do this only if the target does not support different pointer
1619 or address modes depending on the address space. */
1620 if (target_default_pointer_address_modes_p ()
1621 && ! POINTERS_EXTEND_UNSIGNED
1622 && mode == Pmode && GET_MODE (op) == ptr_mode
1623 && (CONSTANT_P (op)
1624 || (GET_CODE (op) == SUBREG
1625 && REG_P (SUBREG_REG (op))
1626 && REG_POINTER (SUBREG_REG (op))
1627 && GET_MODE (SUBREG_REG (op)) == Pmode))
1628 && !targetm.have_ptr_extend ())
1630 temp
1631 = convert_memory_address_addr_space_1 (Pmode, op,
1632 ADDR_SPACE_GENERIC, false,
1633 true);
1634 if (temp)
1635 return temp;
1637 #endif
1638 break;
1640 case ZERO_EXTEND:
1641 /* Check for useless extension. */
1642 if (GET_MODE (op) == mode)
1643 return op;
1645 /* Check for a zero extension of a subreg of a promoted
1646 variable, where the promotion is zero-extended, and the
1647 target mode is the same as the variable's promotion. */
1648 if (GET_CODE (op) == SUBREG
1649 && SUBREG_PROMOTED_VAR_P (op)
1650 && SUBREG_PROMOTED_UNSIGNED_P (op))
1652 rtx subreg = SUBREG_REG (op);
1653 machine_mode subreg_mode = GET_MODE (subreg);
1654 if (!paradoxical_subreg_p (mode, subreg_mode))
1656 temp = rtl_hooks.gen_lowpart_no_emit (mode, subreg);
1657 if (temp)
1659 /* Preserve SUBREG_PROMOTED_VAR_P. */
1660 if (partial_subreg_p (temp))
1662 SUBREG_PROMOTED_VAR_P (temp) = 1;
1663 SUBREG_PROMOTED_SET (temp, 0);
1665 return temp;
1668 else
1669 /* Zero-extending a zero-extended subreg. */
1670 return simplify_gen_unary (ZERO_EXTEND, mode,
1671 subreg, subreg_mode);
1674 /* Extending a widening multiplication should be canonicalized to
1675 a wider widening multiplication. */
1676 if (GET_CODE (op) == MULT)
1678 rtx lhs = XEXP (op, 0);
1679 rtx rhs = XEXP (op, 1);
1680 enum rtx_code lcode = GET_CODE (lhs);
1681 enum rtx_code rcode = GET_CODE (rhs);
1683 /* Widening multiplies usually extend both operands, but sometimes
1684 they use a shift to extract a portion of a register. */
1685 if ((lcode == ZERO_EXTEND
1686 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1687 && (rcode == ZERO_EXTEND
1688 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1690 machine_mode lmode = GET_MODE (lhs);
1691 machine_mode rmode = GET_MODE (rhs);
1692 int bits;
1694 if (lcode == LSHIFTRT)
1695 /* Number of bits not shifted off the end. */
1696 bits = (GET_MODE_UNIT_PRECISION (lmode)
1697 - INTVAL (XEXP (lhs, 1)));
1698 else /* lcode == ZERO_EXTEND */
1699 /* Size of inner mode. */
1700 bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0)));
1702 if (rcode == LSHIFTRT)
1703 bits += (GET_MODE_UNIT_PRECISION (rmode)
1704 - INTVAL (XEXP (rhs, 1)));
1705 else /* rcode == ZERO_EXTEND */
1706 bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0)));
1708 /* We can only widen multiplies if the result is mathematiclly
1709 equivalent. I.e. if overflow was impossible. */
1710 if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op)))
1711 return simplify_gen_binary
1712 (MULT, mode,
1713 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1714 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1718 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1719 if (GET_CODE (op) == ZERO_EXTEND)
1720 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1721 GET_MODE (XEXP (op, 0)));
1723 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1724 is (zero_extend:M (subreg:O <X>)) if there is mode with
1725 GET_MODE_PRECISION (N) - I bits. */
1726 if (GET_CODE (op) == LSHIFTRT
1727 && GET_CODE (XEXP (op, 0)) == ASHIFT
1728 && is_a <scalar_int_mode> (mode, &int_mode)
1729 && CONST_INT_P (XEXP (op, 1))
1730 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1731 && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
1732 GET_MODE_PRECISION (op_mode) > INTVAL (XEXP (op, 1))))
1734 scalar_int_mode tmode;
1735 if (int_mode_for_size (GET_MODE_PRECISION (op_mode)
1736 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1738 rtx inner =
1739 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1740 if (inner)
1741 return simplify_gen_unary (ZERO_EXTEND, int_mode,
1742 inner, tmode);
1746 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1747 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1748 of mode N. E.g.
1749 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1750 (and:SI (reg:SI) (const_int 63)). */
1751 if (partial_subreg_p (op)
1752 && is_a <scalar_int_mode> (mode, &int_mode)
1753 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &op0_mode)
1754 && GET_MODE_PRECISION (op0_mode) <= HOST_BITS_PER_WIDE_INT
1755 && GET_MODE_PRECISION (int_mode) >= GET_MODE_PRECISION (op0_mode)
1756 && subreg_lowpart_p (op)
1757 && (nonzero_bits (SUBREG_REG (op), op0_mode)
1758 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1760 if (GET_MODE_PRECISION (int_mode) == GET_MODE_PRECISION (op0_mode))
1761 return SUBREG_REG (op);
1762 return simplify_gen_unary (ZERO_EXTEND, int_mode, SUBREG_REG (op),
1763 op0_mode);
1766 #if defined(POINTERS_EXTEND_UNSIGNED)
1767 /* As we do not know which address space the pointer is referring to,
1768 we can do this only if the target does not support different pointer
1769 or address modes depending on the address space. */
1770 if (target_default_pointer_address_modes_p ()
1771 && POINTERS_EXTEND_UNSIGNED > 0
1772 && mode == Pmode && GET_MODE (op) == ptr_mode
1773 && (CONSTANT_P (op)
1774 || (GET_CODE (op) == SUBREG
1775 && REG_P (SUBREG_REG (op))
1776 && REG_POINTER (SUBREG_REG (op))
1777 && GET_MODE (SUBREG_REG (op)) == Pmode))
1778 && !targetm.have_ptr_extend ())
1780 temp
1781 = convert_memory_address_addr_space_1 (Pmode, op,
1782 ADDR_SPACE_GENERIC, false,
1783 true);
1784 if (temp)
1785 return temp;
1787 #endif
1788 break;
1790 default:
1791 break;
1794 if (VECTOR_MODE_P (mode)
1795 && vec_duplicate_p (op, &elt)
1796 && code != VEC_DUPLICATE)
1798 if (code == SIGN_EXTEND || code == ZERO_EXTEND)
1799 /* Enforce a canonical order of VEC_DUPLICATE wrt other unary
1800 operations by promoting VEC_DUPLICATE to the root of the expression
1801 (as far as possible). */
1802 temp = simplify_gen_unary (code, GET_MODE_INNER (mode),
1803 elt, GET_MODE_INNER (GET_MODE (op)));
1804 else
1805 /* Try applying the operator to ELT and see if that simplifies.
1806 We can duplicate the result if so.
1808 The reason we traditionally haven't used simplify_gen_unary
1809 for these codes is that it didn't necessarily seem to be a
1810 win to convert things like:
1812 (neg:V (vec_duplicate:V (reg:S R)))
1816 (vec_duplicate:V (neg:S (reg:S R)))
1818 The first might be done entirely in vector registers while the
1819 second might need a move between register files.
1821 However, there also cases where promoting the vec_duplicate is
1822 more efficient, and there is definite value in having a canonical
1823 form when matching instruction patterns. We should consider
1824 extending the simplify_gen_unary code above to more cases. */
1825 temp = simplify_unary_operation (code, GET_MODE_INNER (mode),
1826 elt, GET_MODE_INNER (GET_MODE (op)));
1827 if (temp)
1828 return gen_vec_duplicate (mode, temp);
1831 return 0;
1834 /* Try to compute the value of a unary operation CODE whose output mode is to
1835 be MODE with input operand OP whose mode was originally OP_MODE.
1836 Return zero if the value cannot be computed. */
1838 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1839 rtx op, machine_mode op_mode)
1841 scalar_int_mode result_mode;
1843 if (code == VEC_DUPLICATE)
1845 gcc_assert (VECTOR_MODE_P (mode));
1846 if (GET_MODE (op) != VOIDmode)
1848 if (!VECTOR_MODE_P (GET_MODE (op)))
1849 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1850 else
1851 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1852 (GET_MODE (op)));
1854 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op))
1855 return gen_const_vec_duplicate (mode, op);
1856 if (GET_CODE (op) == CONST_VECTOR
1857 && (CONST_VECTOR_DUPLICATE_P (op)
1858 || CONST_VECTOR_NUNITS (op).is_constant ()))
1860 unsigned int npatterns = (CONST_VECTOR_DUPLICATE_P (op)
1861 ? CONST_VECTOR_NPATTERNS (op)
1862 : CONST_VECTOR_NUNITS (op).to_constant ());
1863 gcc_assert (multiple_p (GET_MODE_NUNITS (mode), npatterns));
1864 rtx_vector_builder builder (mode, npatterns, 1);
1865 for (unsigned i = 0; i < npatterns; i++)
1866 builder.quick_push (CONST_VECTOR_ELT (op, i));
1867 return builder.build ();
1871 if (VECTOR_MODE_P (mode)
1872 && GET_CODE (op) == CONST_VECTOR
1873 && known_eq (GET_MODE_NUNITS (mode), CONST_VECTOR_NUNITS (op)))
1875 gcc_assert (GET_MODE (op) == op_mode);
1877 rtx_vector_builder builder;
1878 if (!builder.new_unary_operation (mode, op, false))
1879 return 0;
1881 unsigned int count = builder.encoded_nelts ();
1882 for (unsigned int i = 0; i < count; i++)
1884 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1885 CONST_VECTOR_ELT (op, i),
1886 GET_MODE_INNER (op_mode));
1887 if (!x || !valid_for_const_vector_p (mode, x))
1888 return 0;
1889 builder.quick_push (x);
1891 return builder.build ();
1894 /* The order of these tests is critical so that, for example, we don't
1895 check the wrong mode (input vs. output) for a conversion operation,
1896 such as FIX. At some point, this should be simplified. */
1898 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1900 REAL_VALUE_TYPE d;
1902 if (op_mode == VOIDmode)
1904 /* CONST_INT have VOIDmode as the mode. We assume that all
1905 the bits of the constant are significant, though, this is
1906 a dangerous assumption as many times CONST_INTs are
1907 created and used with garbage in the bits outside of the
1908 precision of the implied mode of the const_int. */
1909 op_mode = MAX_MODE_INT;
1912 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), SIGNED);
1914 /* Avoid the folding if flag_signaling_nans is on and
1915 operand is a signaling NaN. */
1916 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1917 return 0;
1919 d = real_value_truncate (mode, d);
1921 /* Avoid the folding if flag_rounding_math is on and the
1922 conversion is not exact. */
1923 if (HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1925 bool fail = false;
1926 wide_int w = real_to_integer (&d, &fail,
1927 GET_MODE_PRECISION
1928 (as_a <scalar_int_mode> (op_mode)));
1929 if (fail || wi::ne_p (w, wide_int (rtx_mode_t (op, op_mode))))
1930 return 0;
1933 return const_double_from_real_value (d, mode);
1935 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1937 REAL_VALUE_TYPE d;
1939 if (op_mode == VOIDmode)
1941 /* CONST_INT have VOIDmode as the mode. We assume that all
1942 the bits of the constant are significant, though, this is
1943 a dangerous assumption as many times CONST_INTs are
1944 created and used with garbage in the bits outside of the
1945 precision of the implied mode of the const_int. */
1946 op_mode = MAX_MODE_INT;
1949 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), UNSIGNED);
1951 /* Avoid the folding if flag_signaling_nans is on and
1952 operand is a signaling NaN. */
1953 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1954 return 0;
1956 d = real_value_truncate (mode, d);
1958 /* Avoid the folding if flag_rounding_math is on and the
1959 conversion is not exact. */
1960 if (HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1962 bool fail = false;
1963 wide_int w = real_to_integer (&d, &fail,
1964 GET_MODE_PRECISION
1965 (as_a <scalar_int_mode> (op_mode)));
1966 if (fail || wi::ne_p (w, wide_int (rtx_mode_t (op, op_mode))))
1967 return 0;
1970 return const_double_from_real_value (d, mode);
1973 if (CONST_SCALAR_INT_P (op) && is_a <scalar_int_mode> (mode, &result_mode))
1975 unsigned int width = GET_MODE_PRECISION (result_mode);
1976 if (width > MAX_BITSIZE_MODE_ANY_INT)
1977 return 0;
1979 wide_int result;
1980 scalar_int_mode imode = (op_mode == VOIDmode
1981 ? result_mode
1982 : as_a <scalar_int_mode> (op_mode));
1983 rtx_mode_t op0 = rtx_mode_t (op, imode);
1984 int int_value;
1986 #if TARGET_SUPPORTS_WIDE_INT == 0
1987 /* This assert keeps the simplification from producing a result
1988 that cannot be represented in a CONST_DOUBLE but a lot of
1989 upstream callers expect that this function never fails to
1990 simplify something and so you if you added this to the test
1991 above the code would die later anyway. If this assert
1992 happens, you just need to make the port support wide int. */
1993 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1994 #endif
1996 switch (code)
1998 case NOT:
1999 result = wi::bit_not (op0);
2000 break;
2002 case NEG:
2003 result = wi::neg (op0);
2004 break;
2006 case ABS:
2007 result = wi::abs (op0);
2008 break;
2010 case FFS:
2011 result = wi::shwi (wi::ffs (op0), result_mode);
2012 break;
2014 case CLZ:
2015 if (wi::ne_p (op0, 0))
2016 int_value = wi::clz (op0);
2017 else if (! CLZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
2018 return NULL_RTX;
2019 result = wi::shwi (int_value, result_mode);
2020 break;
2022 case CLRSB:
2023 result = wi::shwi (wi::clrsb (op0), result_mode);
2024 break;
2026 case CTZ:
2027 if (wi::ne_p (op0, 0))
2028 int_value = wi::ctz (op0);
2029 else if (! CTZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
2030 return NULL_RTX;
2031 result = wi::shwi (int_value, result_mode);
2032 break;
2034 case POPCOUNT:
2035 result = wi::shwi (wi::popcount (op0), result_mode);
2036 break;
2038 case PARITY:
2039 result = wi::shwi (wi::parity (op0), result_mode);
2040 break;
2042 case BSWAP:
2043 result = wide_int (op0).bswap ();
2044 break;
2046 case TRUNCATE:
2047 case ZERO_EXTEND:
2048 result = wide_int::from (op0, width, UNSIGNED);
2049 break;
2051 case SIGN_EXTEND:
2052 result = wide_int::from (op0, width, SIGNED);
2053 break;
2055 case SS_NEG:
2056 if (wi::only_sign_bit_p (op0))
2057 result = wi::max_value (GET_MODE_PRECISION (imode), SIGNED);
2058 else
2059 result = wi::neg (op0);
2060 break;
2062 case SS_ABS:
2063 if (wi::only_sign_bit_p (op0))
2064 result = wi::max_value (GET_MODE_PRECISION (imode), SIGNED);
2065 else
2066 result = wi::abs (op0);
2067 break;
2069 case SQRT:
2070 default:
2071 return 0;
2074 return immed_wide_int_const (result, result_mode);
2077 else if (CONST_DOUBLE_AS_FLOAT_P (op)
2078 && SCALAR_FLOAT_MODE_P (mode)
2079 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
2081 REAL_VALUE_TYPE d = *CONST_DOUBLE_REAL_VALUE (op);
2082 switch (code)
2084 case SQRT:
2085 return 0;
2086 case ABS:
2087 d = real_value_abs (&d);
2088 break;
2089 case NEG:
2090 d = real_value_negate (&d);
2091 break;
2092 case FLOAT_TRUNCATE:
2093 /* Don't perform the operation if flag_signaling_nans is on
2094 and the operand is a signaling NaN. */
2095 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
2096 return NULL_RTX;
2097 /* Or if flag_rounding_math is on and the truncation is not
2098 exact. */
2099 if (HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2100 && !exact_real_truncate (mode, &d))
2101 return NULL_RTX;
2102 d = real_value_truncate (mode, d);
2103 break;
2104 case FLOAT_EXTEND:
2105 /* Don't perform the operation if flag_signaling_nans is on
2106 and the operand is a signaling NaN. */
2107 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
2108 return NULL_RTX;
2109 /* All this does is change the mode, unless changing
2110 mode class. */
2111 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
2112 real_convert (&d, mode, &d);
2113 break;
2114 case FIX:
2115 /* Don't perform the operation if flag_signaling_nans is on
2116 and the operand is a signaling NaN. */
2117 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
2118 return NULL_RTX;
2119 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
2120 break;
2121 case NOT:
2123 long tmp[4];
2124 int i;
2126 real_to_target (tmp, &d, GET_MODE (op));
2127 for (i = 0; i < 4; i++)
2128 tmp[i] = ~tmp[i];
2129 real_from_target (&d, tmp, mode);
2130 break;
2132 default:
2133 gcc_unreachable ();
2135 return const_double_from_real_value (d, mode);
2137 else if (CONST_DOUBLE_AS_FLOAT_P (op)
2138 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
2139 && is_int_mode (mode, &result_mode))
2141 unsigned int width = GET_MODE_PRECISION (result_mode);
2142 if (width > MAX_BITSIZE_MODE_ANY_INT)
2143 return 0;
2145 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
2146 operators are intentionally left unspecified (to ease implementation
2147 by target backends), for consistency, this routine implements the
2148 same semantics for constant folding as used by the middle-end. */
2150 /* This was formerly used only for non-IEEE float.
2151 eggert@twinsun.com says it is safe for IEEE also. */
2152 REAL_VALUE_TYPE t;
2153 const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (op);
2154 wide_int wmax, wmin;
2155 /* This is part of the abi to real_to_integer, but we check
2156 things before making this call. */
2157 bool fail;
2159 switch (code)
2161 case FIX:
2162 if (REAL_VALUE_ISNAN (*x))
2163 return const0_rtx;
2165 /* Test against the signed upper bound. */
2166 wmax = wi::max_value (width, SIGNED);
2167 real_from_integer (&t, VOIDmode, wmax, SIGNED);
2168 if (real_less (&t, x))
2169 return immed_wide_int_const (wmax, mode);
2171 /* Test against the signed lower bound. */
2172 wmin = wi::min_value (width, SIGNED);
2173 real_from_integer (&t, VOIDmode, wmin, SIGNED);
2174 if (real_less (x, &t))
2175 return immed_wide_int_const (wmin, mode);
2177 return immed_wide_int_const (real_to_integer (x, &fail, width),
2178 mode);
2180 case UNSIGNED_FIX:
2181 if (REAL_VALUE_ISNAN (*x) || REAL_VALUE_NEGATIVE (*x))
2182 return const0_rtx;
2184 /* Test against the unsigned upper bound. */
2185 wmax = wi::max_value (width, UNSIGNED);
2186 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
2187 if (real_less (&t, x))
2188 return immed_wide_int_const (wmax, mode);
2190 return immed_wide_int_const (real_to_integer (x, &fail, width),
2191 mode);
2193 default:
2194 gcc_unreachable ();
2198 /* Handle polynomial integers. */
2199 else if (CONST_POLY_INT_P (op))
2201 poly_wide_int result;
2202 switch (code)
2204 case NEG:
2205 result = -const_poly_int_value (op);
2206 break;
2208 case NOT:
2209 result = ~const_poly_int_value (op);
2210 break;
2212 default:
2213 return NULL_RTX;
2215 return immed_wide_int_const (result, mode);
2218 return NULL_RTX;
2221 /* Subroutine of simplify_binary_operation to simplify a binary operation
2222 CODE that can commute with byte swapping, with result mode MODE and
2223 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2224 Return zero if no simplification or canonicalization is possible. */
2227 simplify_context::simplify_byte_swapping_operation (rtx_code code,
2228 machine_mode mode,
2229 rtx op0, rtx op1)
2231 rtx tem;
2233 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2234 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
2236 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
2237 simplify_gen_unary (BSWAP, mode, op1, mode));
2238 return simplify_gen_unary (BSWAP, mode, tem, mode);
2241 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2242 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
2244 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
2245 return simplify_gen_unary (BSWAP, mode, tem, mode);
2248 return NULL_RTX;
2251 /* Subroutine of simplify_binary_operation to simplify a commutative,
2252 associative binary operation CODE with result mode MODE, operating
2253 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2254 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2255 canonicalization is possible. */
2258 simplify_context::simplify_associative_operation (rtx_code code,
2259 machine_mode mode,
2260 rtx op0, rtx op1)
2262 rtx tem;
2264 /* Linearize the operator to the left. */
2265 if (GET_CODE (op1) == code)
2267 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2268 if (GET_CODE (op0) == code)
2270 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2271 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2274 /* "a op (b op c)" becomes "(b op c) op a". */
2275 if (! swap_commutative_operands_p (op1, op0))
2276 return simplify_gen_binary (code, mode, op1, op0);
2278 std::swap (op0, op1);
2281 if (GET_CODE (op0) == code)
2283 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2284 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2286 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2287 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2290 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2291 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2292 if (tem != 0)
2293 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2295 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2296 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2297 if (tem != 0)
2298 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2301 return 0;
2304 /* Return a mask describing the COMPARISON. */
2305 static int
2306 comparison_to_mask (enum rtx_code comparison)
2308 switch (comparison)
2310 case LT:
2311 return 8;
2312 case GT:
2313 return 4;
2314 case EQ:
2315 return 2;
2316 case UNORDERED:
2317 return 1;
2319 case LTGT:
2320 return 12;
2321 case LE:
2322 return 10;
2323 case GE:
2324 return 6;
2325 case UNLT:
2326 return 9;
2327 case UNGT:
2328 return 5;
2329 case UNEQ:
2330 return 3;
2332 case ORDERED:
2333 return 14;
2334 case NE:
2335 return 13;
2336 case UNLE:
2337 return 11;
2338 case UNGE:
2339 return 7;
2341 default:
2342 gcc_unreachable ();
2346 /* Return a comparison corresponding to the MASK. */
2347 static enum rtx_code
2348 mask_to_comparison (int mask)
2350 switch (mask)
2352 case 8:
2353 return LT;
2354 case 4:
2355 return GT;
2356 case 2:
2357 return EQ;
2358 case 1:
2359 return UNORDERED;
2361 case 12:
2362 return LTGT;
2363 case 10:
2364 return LE;
2365 case 6:
2366 return GE;
2367 case 9:
2368 return UNLT;
2369 case 5:
2370 return UNGT;
2371 case 3:
2372 return UNEQ;
2374 case 14:
2375 return ORDERED;
2376 case 13:
2377 return NE;
2378 case 11:
2379 return UNLE;
2380 case 7:
2381 return UNGE;
2383 default:
2384 gcc_unreachable ();
2388 /* Return true if CODE is valid for comparisons of mode MODE, false
2389 otherwise.
2391 It is always safe to return false, even if the code was valid for the
2392 given mode as that will merely suppress optimizations. */
2394 static bool
2395 comparison_code_valid_for_mode (enum rtx_code code, enum machine_mode mode)
2397 switch (code)
2399 /* These are valid for integral, floating and vector modes. */
2400 case NE:
2401 case EQ:
2402 case GE:
2403 case GT:
2404 case LE:
2405 case LT:
2406 return (INTEGRAL_MODE_P (mode)
2407 || FLOAT_MODE_P (mode)
2408 || VECTOR_MODE_P (mode));
2410 /* These are valid for floating point modes. */
2411 case LTGT:
2412 case UNORDERED:
2413 case ORDERED:
2414 case UNEQ:
2415 case UNGE:
2416 case UNGT:
2417 case UNLE:
2418 case UNLT:
2419 return FLOAT_MODE_P (mode);
2421 /* These are filtered out in simplify_logical_operation, but
2422 we check for them too as a matter of safety. They are valid
2423 for integral and vector modes. */
2424 case GEU:
2425 case GTU:
2426 case LEU:
2427 case LTU:
2428 return INTEGRAL_MODE_P (mode) || VECTOR_MODE_P (mode);
2430 default:
2431 gcc_unreachable ();
2435 /* Canonicalize RES, a scalar const0_rtx/const_true_rtx to the right
2436 false/true value of comparison with MODE where comparison operands
2437 have CMP_MODE. */
2439 static rtx
2440 relational_result (machine_mode mode, machine_mode cmp_mode, rtx res)
2442 if (SCALAR_FLOAT_MODE_P (mode))
2444 if (res == const0_rtx)
2445 return CONST0_RTX (mode);
2446 #ifdef FLOAT_STORE_FLAG_VALUE
2447 REAL_VALUE_TYPE val = FLOAT_STORE_FLAG_VALUE (mode);
2448 return const_double_from_real_value (val, mode);
2449 #else
2450 return NULL_RTX;
2451 #endif
2453 if (VECTOR_MODE_P (mode))
2455 if (res == const0_rtx)
2456 return CONST0_RTX (mode);
2457 #ifdef VECTOR_STORE_FLAG_VALUE
2458 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
2459 if (val == NULL_RTX)
2460 return NULL_RTX;
2461 if (val == const1_rtx)
2462 return CONST1_RTX (mode);
2464 return gen_const_vec_duplicate (mode, val);
2465 #else
2466 return NULL_RTX;
2467 #endif
2469 /* For vector comparison with scalar int result, it is unknown
2470 if the target means here a comparison into an integral bitmask,
2471 or comparison where all comparisons true mean const_true_rtx
2472 whole result, or where any comparisons true mean const_true_rtx
2473 whole result. For const0_rtx all the cases are the same. */
2474 if (VECTOR_MODE_P (cmp_mode)
2475 && SCALAR_INT_MODE_P (mode)
2476 && res == const_true_rtx)
2477 return NULL_RTX;
2479 return res;
2482 /* Simplify a logical operation CODE with result mode MODE, operating on OP0
2483 and OP1, which should be both relational operations. Return 0 if no such
2484 simplification is possible. */
2486 simplify_context::simplify_logical_relational_operation (rtx_code code,
2487 machine_mode mode,
2488 rtx op0, rtx op1)
2490 /* We only handle IOR of two relational operations. */
2491 if (code != IOR)
2492 return 0;
2494 if (!(COMPARISON_P (op0) && COMPARISON_P (op1)))
2495 return 0;
2497 if (!(rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2498 && rtx_equal_p (XEXP (op0, 1), XEXP (op1, 1))))
2499 return 0;
2501 enum rtx_code code0 = GET_CODE (op0);
2502 enum rtx_code code1 = GET_CODE (op1);
2504 /* We don't handle unsigned comparisons currently. */
2505 if (code0 == LTU || code0 == GTU || code0 == LEU || code0 == GEU)
2506 return 0;
2507 if (code1 == LTU || code1 == GTU || code1 == LEU || code1 == GEU)
2508 return 0;
2510 int mask0 = comparison_to_mask (code0);
2511 int mask1 = comparison_to_mask (code1);
2513 int mask = mask0 | mask1;
2515 if (mask == 15)
2516 return relational_result (mode, GET_MODE (op0), const_true_rtx);
2518 code = mask_to_comparison (mask);
2520 /* Many comparison codes are only valid for certain mode classes. */
2521 if (!comparison_code_valid_for_mode (code, mode))
2522 return 0;
2524 op0 = XEXP (op1, 0);
2525 op1 = XEXP (op1, 1);
2527 return simplify_gen_relational (code, mode, VOIDmode, op0, op1);
2530 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2531 and OP1. Return 0 if no simplification is possible.
2533 Don't use this for relational operations such as EQ or LT.
2534 Use simplify_relational_operation instead. */
2536 simplify_context::simplify_binary_operation (rtx_code code, machine_mode mode,
2537 rtx op0, rtx op1)
2539 rtx trueop0, trueop1;
2540 rtx tem;
2542 /* Relational operations don't work here. We must know the mode
2543 of the operands in order to do the comparison correctly.
2544 Assuming a full word can give incorrect results.
2545 Consider comparing 128 with -128 in QImode. */
2546 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2547 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2549 /* Make sure the constant is second. */
2550 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2551 && swap_commutative_operands_p (op0, op1))
2552 std::swap (op0, op1);
2554 trueop0 = avoid_constant_pool_reference (op0);
2555 trueop1 = avoid_constant_pool_reference (op1);
2557 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2558 if (tem)
2559 return tem;
2560 tem = simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2562 if (tem)
2563 return tem;
2565 /* If the above steps did not result in a simplification and op0 or op1
2566 were constant pool references, use the referenced constants directly. */
2567 if (trueop0 != op0 || trueop1 != op1)
2568 return simplify_gen_binary (code, mode, trueop0, trueop1);
2570 return NULL_RTX;
2573 /* Subroutine of simplify_binary_operation_1 that looks for cases in
2574 which OP0 and OP1 are both vector series or vector duplicates
2575 (which are really just series with a step of 0). If so, try to
2576 form a new series by applying CODE to the bases and to the steps.
2577 Return null if no simplification is possible.
2579 MODE is the mode of the operation and is known to be a vector
2580 integer mode. */
2583 simplify_context::simplify_binary_operation_series (rtx_code code,
2584 machine_mode mode,
2585 rtx op0, rtx op1)
2587 rtx base0, step0;
2588 if (vec_duplicate_p (op0, &base0))
2589 step0 = const0_rtx;
2590 else if (!vec_series_p (op0, &base0, &step0))
2591 return NULL_RTX;
2593 rtx base1, step1;
2594 if (vec_duplicate_p (op1, &base1))
2595 step1 = const0_rtx;
2596 else if (!vec_series_p (op1, &base1, &step1))
2597 return NULL_RTX;
2599 /* Only create a new series if we can simplify both parts. In other
2600 cases this isn't really a simplification, and it's not necessarily
2601 a win to replace a vector operation with a scalar operation. */
2602 scalar_mode inner_mode = GET_MODE_INNER (mode);
2603 rtx new_base = simplify_binary_operation (code, inner_mode, base0, base1);
2604 if (!new_base)
2605 return NULL_RTX;
2607 rtx new_step = simplify_binary_operation (code, inner_mode, step0, step1);
2608 if (!new_step)
2609 return NULL_RTX;
2611 return gen_vec_series (mode, new_base, new_step);
2614 /* Subroutine of simplify_binary_operation_1. Un-distribute a binary
2615 operation CODE with result mode MODE, operating on OP0 and OP1.
2616 e.g. simplify (xor (and A C) (and (B C)) to (and (xor (A B) C).
2617 Returns NULL_RTX if no simplification is possible. */
2620 simplify_context::simplify_distributive_operation (rtx_code code,
2621 machine_mode mode,
2622 rtx op0, rtx op1)
2624 enum rtx_code op = GET_CODE (op0);
2625 gcc_assert (GET_CODE (op1) == op);
2627 if (rtx_equal_p (XEXP (op0, 1), XEXP (op1, 1))
2628 && ! side_effects_p (XEXP (op0, 1)))
2629 return simplify_gen_binary (op, mode,
2630 simplify_gen_binary (code, mode,
2631 XEXP (op0, 0),
2632 XEXP (op1, 0)),
2633 XEXP (op0, 1));
2635 if (GET_RTX_CLASS (op) == RTX_COMM_ARITH)
2637 if (rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2638 && ! side_effects_p (XEXP (op0, 0)))
2639 return simplify_gen_binary (op, mode,
2640 simplify_gen_binary (code, mode,
2641 XEXP (op0, 1),
2642 XEXP (op1, 1)),
2643 XEXP (op0, 0));
2644 if (rtx_equal_p (XEXP (op0, 0), XEXP (op1, 1))
2645 && ! side_effects_p (XEXP (op0, 0)))
2646 return simplify_gen_binary (op, mode,
2647 simplify_gen_binary (code, mode,
2648 XEXP (op0, 1),
2649 XEXP (op1, 0)),
2650 XEXP (op0, 0));
2651 if (rtx_equal_p (XEXP (op0, 1), XEXP (op1, 0))
2652 && ! side_effects_p (XEXP (op0, 1)))
2653 return simplify_gen_binary (op, mode,
2654 simplify_gen_binary (code, mode,
2655 XEXP (op0, 0),
2656 XEXP (op1, 1)),
2657 XEXP (op0, 1));
2660 return NULL_RTX;
2663 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2664 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2665 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2666 actual constants. */
2669 simplify_context::simplify_binary_operation_1 (rtx_code code,
2670 machine_mode mode,
2671 rtx op0, rtx op1,
2672 rtx trueop0, rtx trueop1)
2674 rtx tem, reversed, opleft, opright, elt0, elt1;
2675 HOST_WIDE_INT val;
2676 scalar_int_mode int_mode, inner_mode;
2677 poly_int64 offset;
2679 /* Even if we can't compute a constant result,
2680 there are some cases worth simplifying. */
2682 switch (code)
2684 case PLUS:
2685 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2686 when x is NaN, infinite, or finite and nonzero. They aren't
2687 when x is -0 and the rounding mode is not towards -infinity,
2688 since (-0) + 0 is then 0. */
2689 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2690 return op0;
2692 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2693 transformations are safe even for IEEE. */
2694 if (GET_CODE (op0) == NEG)
2695 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2696 else if (GET_CODE (op1) == NEG)
2697 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2699 /* (~a) + 1 -> -a */
2700 if (INTEGRAL_MODE_P (mode)
2701 && GET_CODE (op0) == NOT
2702 && trueop1 == const1_rtx)
2703 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2705 /* Handle both-operands-constant cases. We can only add
2706 CONST_INTs to constants since the sum of relocatable symbols
2707 can't be handled by most assemblers. Don't add CONST_INT
2708 to CONST_INT since overflow won't be computed properly if wider
2709 than HOST_BITS_PER_WIDE_INT. */
2711 if ((GET_CODE (op0) == CONST
2712 || GET_CODE (op0) == SYMBOL_REF
2713 || GET_CODE (op0) == LABEL_REF)
2714 && poly_int_rtx_p (op1, &offset))
2715 return plus_constant (mode, op0, offset);
2716 else if ((GET_CODE (op1) == CONST
2717 || GET_CODE (op1) == SYMBOL_REF
2718 || GET_CODE (op1) == LABEL_REF)
2719 && poly_int_rtx_p (op0, &offset))
2720 return plus_constant (mode, op1, offset);
2722 /* See if this is something like X * C - X or vice versa or
2723 if the multiplication is written as a shift. If so, we can
2724 distribute and make a new multiply, shift, or maybe just
2725 have X (if C is 2 in the example above). But don't make
2726 something more expensive than we had before. */
2728 if (is_a <scalar_int_mode> (mode, &int_mode))
2730 rtx lhs = op0, rhs = op1;
2732 wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2733 wide_int coeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2735 if (GET_CODE (lhs) == NEG)
2737 coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2738 lhs = XEXP (lhs, 0);
2740 else if (GET_CODE (lhs) == MULT
2741 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2743 coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2744 lhs = XEXP (lhs, 0);
2746 else if (GET_CODE (lhs) == ASHIFT
2747 && CONST_INT_P (XEXP (lhs, 1))
2748 && INTVAL (XEXP (lhs, 1)) >= 0
2749 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2751 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2752 GET_MODE_PRECISION (int_mode));
2753 lhs = XEXP (lhs, 0);
2756 if (GET_CODE (rhs) == NEG)
2758 coeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2759 rhs = XEXP (rhs, 0);
2761 else if (GET_CODE (rhs) == MULT
2762 && CONST_INT_P (XEXP (rhs, 1)))
2764 coeff1 = rtx_mode_t (XEXP (rhs, 1), int_mode);
2765 rhs = XEXP (rhs, 0);
2767 else if (GET_CODE (rhs) == ASHIFT
2768 && CONST_INT_P (XEXP (rhs, 1))
2769 && INTVAL (XEXP (rhs, 1)) >= 0
2770 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
2772 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2773 GET_MODE_PRECISION (int_mode));
2774 rhs = XEXP (rhs, 0);
2777 if (rtx_equal_p (lhs, rhs))
2779 rtx orig = gen_rtx_PLUS (int_mode, op0, op1);
2780 rtx coeff;
2781 bool speed = optimize_function_for_speed_p (cfun);
2783 coeff = immed_wide_int_const (coeff0 + coeff1, int_mode);
2785 tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
2786 return (set_src_cost (tem, int_mode, speed)
2787 <= set_src_cost (orig, int_mode, speed) ? tem : 0);
2790 /* Optimize (X - 1) * Y + Y to X * Y. */
2791 lhs = op0;
2792 rhs = op1;
2793 if (GET_CODE (op0) == MULT)
2795 if (((GET_CODE (XEXP (op0, 0)) == PLUS
2796 && XEXP (XEXP (op0, 0), 1) == constm1_rtx)
2797 || (GET_CODE (XEXP (op0, 0)) == MINUS
2798 && XEXP (XEXP (op0, 0), 1) == const1_rtx))
2799 && rtx_equal_p (XEXP (op0, 1), op1))
2800 lhs = XEXP (XEXP (op0, 0), 0);
2801 else if (((GET_CODE (XEXP (op0, 1)) == PLUS
2802 && XEXP (XEXP (op0, 1), 1) == constm1_rtx)
2803 || (GET_CODE (XEXP (op0, 1)) == MINUS
2804 && XEXP (XEXP (op0, 1), 1) == const1_rtx))
2805 && rtx_equal_p (XEXP (op0, 0), op1))
2806 lhs = XEXP (XEXP (op0, 1), 0);
2808 else if (GET_CODE (op1) == MULT)
2810 if (((GET_CODE (XEXP (op1, 0)) == PLUS
2811 && XEXP (XEXP (op1, 0), 1) == constm1_rtx)
2812 || (GET_CODE (XEXP (op1, 0)) == MINUS
2813 && XEXP (XEXP (op1, 0), 1) == const1_rtx))
2814 && rtx_equal_p (XEXP (op1, 1), op0))
2815 rhs = XEXP (XEXP (op1, 0), 0);
2816 else if (((GET_CODE (XEXP (op1, 1)) == PLUS
2817 && XEXP (XEXP (op1, 1), 1) == constm1_rtx)
2818 || (GET_CODE (XEXP (op1, 1)) == MINUS
2819 && XEXP (XEXP (op1, 1), 1) == const1_rtx))
2820 && rtx_equal_p (XEXP (op1, 0), op0))
2821 rhs = XEXP (XEXP (op1, 1), 0);
2823 if (lhs != op0 || rhs != op1)
2824 return simplify_gen_binary (MULT, int_mode, lhs, rhs);
2827 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2828 if (CONST_SCALAR_INT_P (op1)
2829 && GET_CODE (op0) == XOR
2830 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2831 && mode_signbit_p (mode, op1))
2832 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2833 simplify_gen_binary (XOR, mode, op1,
2834 XEXP (op0, 1)));
2836 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2837 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2838 && GET_CODE (op0) == MULT
2839 && GET_CODE (XEXP (op0, 0)) == NEG)
2841 rtx in1, in2;
2843 in1 = XEXP (XEXP (op0, 0), 0);
2844 in2 = XEXP (op0, 1);
2845 return simplify_gen_binary (MINUS, mode, op1,
2846 simplify_gen_binary (MULT, mode,
2847 in1, in2));
2850 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2851 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2852 is 1. */
2853 if (COMPARISON_P (op0)
2854 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2855 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2856 && (reversed = reversed_comparison (op0, mode)))
2857 return
2858 simplify_gen_unary (NEG, mode, reversed, mode);
2860 /* If one of the operands is a PLUS or a MINUS, see if we can
2861 simplify this by the associative law.
2862 Don't use the associative law for floating point.
2863 The inaccuracy makes it nonassociative,
2864 and subtle programs can break if operations are associated. */
2866 if (INTEGRAL_MODE_P (mode)
2867 && (plus_minus_operand_p (op0)
2868 || plus_minus_operand_p (op1))
2869 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2870 return tem;
2872 /* Reassociate floating point addition only when the user
2873 specifies associative math operations. */
2874 if (FLOAT_MODE_P (mode)
2875 && flag_associative_math)
2877 tem = simplify_associative_operation (code, mode, op0, op1);
2878 if (tem)
2879 return tem;
2882 /* Handle vector series. */
2883 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
2885 tem = simplify_binary_operation_series (code, mode, op0, op1);
2886 if (tem)
2887 return tem;
2889 break;
2891 case COMPARE:
2892 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2893 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2894 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2895 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2897 rtx xop00 = XEXP (op0, 0);
2898 rtx xop10 = XEXP (op1, 0);
2900 if (REG_P (xop00) && REG_P (xop10)
2901 && REGNO (xop00) == REGNO (xop10)
2902 && GET_MODE (xop00) == mode
2903 && GET_MODE (xop10) == mode
2904 && GET_MODE_CLASS (mode) == MODE_CC)
2905 return xop00;
2907 break;
2909 case MINUS:
2910 /* We can't assume x-x is 0 even with non-IEEE floating point,
2911 but since it is zero except in very strange circumstances, we
2912 will treat it as zero with -ffinite-math-only. */
2913 if (rtx_equal_p (trueop0, trueop1)
2914 && ! side_effects_p (op0)
2915 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2916 return CONST0_RTX (mode);
2918 /* Change subtraction from zero into negation. (0 - x) is the
2919 same as -x when x is NaN, infinite, or finite and nonzero.
2920 But if the mode has signed zeros, and does not round towards
2921 -infinity, then 0 - 0 is 0, not -0. */
2922 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2923 return simplify_gen_unary (NEG, mode, op1, mode);
2925 /* (-1 - a) is ~a, unless the expression contains symbolic
2926 constants, in which case not retaining additions and
2927 subtractions could cause invalid assembly to be produced. */
2928 if (trueop0 == constm1_rtx
2929 && !contains_symbolic_reference_p (op1))
2930 return simplify_gen_unary (NOT, mode, op1, mode);
2932 /* Subtracting 0 has no effect unless the mode has signalling NaNs,
2933 or has signed zeros and supports rounding towards -infinity.
2934 In such a case, 0 - 0 is -0. */
2935 if (!(HONOR_SIGNED_ZEROS (mode)
2936 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2937 && !HONOR_SNANS (mode)
2938 && trueop1 == CONST0_RTX (mode))
2939 return op0;
2941 /* See if this is something like X * C - X or vice versa or
2942 if the multiplication is written as a shift. If so, we can
2943 distribute and make a new multiply, shift, or maybe just
2944 have X (if C is 2 in the example above). But don't make
2945 something more expensive than we had before. */
2947 if (is_a <scalar_int_mode> (mode, &int_mode))
2949 rtx lhs = op0, rhs = op1;
2951 wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2952 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2954 if (GET_CODE (lhs) == NEG)
2956 coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2957 lhs = XEXP (lhs, 0);
2959 else if (GET_CODE (lhs) == MULT
2960 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2962 coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2963 lhs = XEXP (lhs, 0);
2965 else if (GET_CODE (lhs) == ASHIFT
2966 && CONST_INT_P (XEXP (lhs, 1))
2967 && INTVAL (XEXP (lhs, 1)) >= 0
2968 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2970 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2971 GET_MODE_PRECISION (int_mode));
2972 lhs = XEXP (lhs, 0);
2975 if (GET_CODE (rhs) == NEG)
2977 negcoeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2978 rhs = XEXP (rhs, 0);
2980 else if (GET_CODE (rhs) == MULT
2981 && CONST_INT_P (XEXP (rhs, 1)))
2983 negcoeff1 = wi::neg (rtx_mode_t (XEXP (rhs, 1), int_mode));
2984 rhs = XEXP (rhs, 0);
2986 else if (GET_CODE (rhs) == ASHIFT
2987 && CONST_INT_P (XEXP (rhs, 1))
2988 && INTVAL (XEXP (rhs, 1)) >= 0
2989 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
2991 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2992 GET_MODE_PRECISION (int_mode));
2993 negcoeff1 = -negcoeff1;
2994 rhs = XEXP (rhs, 0);
2997 if (rtx_equal_p (lhs, rhs))
2999 rtx orig = gen_rtx_MINUS (int_mode, op0, op1);
3000 rtx coeff;
3001 bool speed = optimize_function_for_speed_p (cfun);
3003 coeff = immed_wide_int_const (coeff0 + negcoeff1, int_mode);
3005 tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
3006 return (set_src_cost (tem, int_mode, speed)
3007 <= set_src_cost (orig, int_mode, speed) ? tem : 0);
3010 /* Optimize (X + 1) * Y - Y to X * Y. */
3011 lhs = op0;
3012 if (GET_CODE (op0) == MULT)
3014 if (((GET_CODE (XEXP (op0, 0)) == PLUS
3015 && XEXP (XEXP (op0, 0), 1) == const1_rtx)
3016 || (GET_CODE (XEXP (op0, 0)) == MINUS
3017 && XEXP (XEXP (op0, 0), 1) == constm1_rtx))
3018 && rtx_equal_p (XEXP (op0, 1), op1))
3019 lhs = XEXP (XEXP (op0, 0), 0);
3020 else if (((GET_CODE (XEXP (op0, 1)) == PLUS
3021 && XEXP (XEXP (op0, 1), 1) == const1_rtx)
3022 || (GET_CODE (XEXP (op0, 1)) == MINUS
3023 && XEXP (XEXP (op0, 1), 1) == constm1_rtx))
3024 && rtx_equal_p (XEXP (op0, 0), op1))
3025 lhs = XEXP (XEXP (op0, 1), 0);
3027 if (lhs != op0)
3028 return simplify_gen_binary (MULT, int_mode, lhs, op1);
3031 /* (a - (-b)) -> (a + b). True even for IEEE. */
3032 if (GET_CODE (op1) == NEG)
3033 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
3035 /* (-x - c) may be simplified as (-c - x). */
3036 if (GET_CODE (op0) == NEG
3037 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
3039 tem = simplify_unary_operation (NEG, mode, op1, mode);
3040 if (tem)
3041 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
3044 if ((GET_CODE (op0) == CONST
3045 || GET_CODE (op0) == SYMBOL_REF
3046 || GET_CODE (op0) == LABEL_REF)
3047 && poly_int_rtx_p (op1, &offset))
3048 return plus_constant (mode, op0, trunc_int_for_mode (-offset, mode));
3050 /* Don't let a relocatable value get a negative coeff. */
3051 if (poly_int_rtx_p (op1) && GET_MODE (op0) != VOIDmode)
3052 return simplify_gen_binary (PLUS, mode,
3053 op0,
3054 neg_poly_int_rtx (mode, op1));
3056 /* (x - (x & y)) -> (x & ~y) */
3057 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
3059 if (rtx_equal_p (op0, XEXP (op1, 0)))
3061 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
3062 GET_MODE (XEXP (op1, 1)));
3063 return simplify_gen_binary (AND, mode, op0, tem);
3065 if (rtx_equal_p (op0, XEXP (op1, 1)))
3067 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
3068 GET_MODE (XEXP (op1, 0)));
3069 return simplify_gen_binary (AND, mode, op0, tem);
3073 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
3074 by reversing the comparison code if valid. */
3075 if (STORE_FLAG_VALUE == 1
3076 && trueop0 == const1_rtx
3077 && COMPARISON_P (op1)
3078 && (reversed = reversed_comparison (op1, mode)))
3079 return reversed;
3081 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
3082 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
3083 && GET_CODE (op1) == MULT
3084 && GET_CODE (XEXP (op1, 0)) == NEG)
3086 rtx in1, in2;
3088 in1 = XEXP (XEXP (op1, 0), 0);
3089 in2 = XEXP (op1, 1);
3090 return simplify_gen_binary (PLUS, mode,
3091 simplify_gen_binary (MULT, mode,
3092 in1, in2),
3093 op0);
3096 /* Canonicalize (minus (neg A) (mult B C)) to
3097 (minus (mult (neg B) C) A). */
3098 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
3099 && GET_CODE (op1) == MULT
3100 && GET_CODE (op0) == NEG)
3102 rtx in1, in2;
3104 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
3105 in2 = XEXP (op1, 1);
3106 return simplify_gen_binary (MINUS, mode,
3107 simplify_gen_binary (MULT, mode,
3108 in1, in2),
3109 XEXP (op0, 0));
3112 /* If one of the operands is a PLUS or a MINUS, see if we can
3113 simplify this by the associative law. This will, for example,
3114 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
3115 Don't use the associative law for floating point.
3116 The inaccuracy makes it nonassociative,
3117 and subtle programs can break if operations are associated. */
3119 if (INTEGRAL_MODE_P (mode)
3120 && (plus_minus_operand_p (op0)
3121 || plus_minus_operand_p (op1))
3122 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
3123 return tem;
3125 /* Handle vector series. */
3126 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
3128 tem = simplify_binary_operation_series (code, mode, op0, op1);
3129 if (tem)
3130 return tem;
3132 break;
3134 case MULT:
3135 if (trueop1 == constm1_rtx)
3136 return simplify_gen_unary (NEG, mode, op0, mode);
3138 if (GET_CODE (op0) == NEG)
3140 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
3141 /* If op1 is a MULT as well and simplify_unary_operation
3142 just moved the NEG to the second operand, simplify_gen_binary
3143 below could through simplify_associative_operation move
3144 the NEG around again and recurse endlessly. */
3145 if (temp
3146 && GET_CODE (op1) == MULT
3147 && GET_CODE (temp) == MULT
3148 && XEXP (op1, 0) == XEXP (temp, 0)
3149 && GET_CODE (XEXP (temp, 1)) == NEG
3150 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
3151 temp = NULL_RTX;
3152 if (temp)
3153 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
3155 if (GET_CODE (op1) == NEG)
3157 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
3158 /* If op0 is a MULT as well and simplify_unary_operation
3159 just moved the NEG to the second operand, simplify_gen_binary
3160 below could through simplify_associative_operation move
3161 the NEG around again and recurse endlessly. */
3162 if (temp
3163 && GET_CODE (op0) == MULT
3164 && GET_CODE (temp) == MULT
3165 && XEXP (op0, 0) == XEXP (temp, 0)
3166 && GET_CODE (XEXP (temp, 1)) == NEG
3167 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
3168 temp = NULL_RTX;
3169 if (temp)
3170 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
3173 /* Maybe simplify x * 0 to 0. The reduction is not valid if
3174 x is NaN, since x * 0 is then also NaN. Nor is it valid
3175 when the mode has signed zeros, since multiplying a negative
3176 number by 0 will give -0, not 0. */
3177 if (!HONOR_NANS (mode)
3178 && !HONOR_SIGNED_ZEROS (mode)
3179 && trueop1 == CONST0_RTX (mode)
3180 && ! side_effects_p (op0))
3181 return op1;
3183 /* In IEEE floating point, x*1 is not equivalent to x for
3184 signalling NaNs. */
3185 if (!HONOR_SNANS (mode)
3186 && trueop1 == CONST1_RTX (mode))
3187 return op0;
3189 /* Convert multiply by constant power of two into shift. */
3190 if (mem_depth == 0 && CONST_SCALAR_INT_P (trueop1))
3192 val = wi::exact_log2 (rtx_mode_t (trueop1, mode));
3193 if (val >= 0)
3194 return simplify_gen_binary (ASHIFT, mode, op0,
3195 gen_int_shift_amount (mode, val));
3198 /* x*2 is x+x and x*(-1) is -x */
3199 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3200 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
3201 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
3202 && GET_MODE (op0) == mode)
3204 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
3206 if (real_equal (d1, &dconst2))
3207 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
3209 if (!HONOR_SNANS (mode)
3210 && real_equal (d1, &dconstm1))
3211 return simplify_gen_unary (NEG, mode, op0, mode);
3214 /* Optimize -x * -x as x * x. */
3215 if (FLOAT_MODE_P (mode)
3216 && GET_CODE (op0) == NEG
3217 && GET_CODE (op1) == NEG
3218 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
3219 && !side_effects_p (XEXP (op0, 0)))
3220 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
3222 /* Likewise, optimize abs(x) * abs(x) as x * x. */
3223 if (SCALAR_FLOAT_MODE_P (mode)
3224 && GET_CODE (op0) == ABS
3225 && GET_CODE (op1) == ABS
3226 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
3227 && !side_effects_p (XEXP (op0, 0)))
3228 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
3230 /* Reassociate multiplication, but for floating point MULTs
3231 only when the user specifies unsafe math optimizations. */
3232 if (! FLOAT_MODE_P (mode)
3233 || flag_unsafe_math_optimizations)
3235 tem = simplify_associative_operation (code, mode, op0, op1);
3236 if (tem)
3237 return tem;
3239 break;
3241 case IOR:
3242 if (trueop1 == CONST0_RTX (mode))
3243 return op0;
3244 if (INTEGRAL_MODE_P (mode)
3245 && trueop1 == CONSTM1_RTX (mode)
3246 && !side_effects_p (op0))
3247 return op1;
3248 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3249 return op0;
3250 /* A | (~A) -> -1 */
3251 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3252 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3253 && ! side_effects_p (op0)
3254 && SCALAR_INT_MODE_P (mode))
3255 return constm1_rtx;
3257 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
3258 if (CONST_INT_P (op1)
3259 && HWI_COMPUTABLE_MODE_P (mode)
3260 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
3261 && !side_effects_p (op0))
3262 return op1;
3264 /* Canonicalize (X & C1) | C2. */
3265 if (GET_CODE (op0) == AND
3266 && CONST_INT_P (trueop1)
3267 && CONST_INT_P (XEXP (op0, 1)))
3269 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
3270 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
3271 HOST_WIDE_INT c2 = INTVAL (trueop1);
3273 /* If (C1&C2) == C1, then (X&C1)|C2 becomes C2. */
3274 if ((c1 & c2) == c1
3275 && !side_effects_p (XEXP (op0, 0)))
3276 return trueop1;
3278 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
3279 if (((c1|c2) & mask) == mask)
3280 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
3283 /* Convert (A & B) | A to A. */
3284 if (GET_CODE (op0) == AND
3285 && (rtx_equal_p (XEXP (op0, 0), op1)
3286 || rtx_equal_p (XEXP (op0, 1), op1))
3287 && ! side_effects_p (XEXP (op0, 0))
3288 && ! side_effects_p (XEXP (op0, 1)))
3289 return op1;
3291 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
3292 mode size to (rotate A CX). */
3294 if (GET_CODE (op1) == ASHIFT
3295 || GET_CODE (op1) == SUBREG)
3297 opleft = op1;
3298 opright = op0;
3300 else
3302 opright = op1;
3303 opleft = op0;
3306 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
3307 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
3308 && CONST_INT_P (XEXP (opleft, 1))
3309 && CONST_INT_P (XEXP (opright, 1))
3310 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
3311 == GET_MODE_UNIT_PRECISION (mode)))
3312 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
3314 /* Same, but for ashift that has been "simplified" to a wider mode
3315 by simplify_shift_const. */
3317 if (GET_CODE (opleft) == SUBREG
3318 && is_a <scalar_int_mode> (mode, &int_mode)
3319 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (opleft)),
3320 &inner_mode)
3321 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
3322 && GET_CODE (opright) == LSHIFTRT
3323 && GET_CODE (XEXP (opright, 0)) == SUBREG
3324 && known_eq (SUBREG_BYTE (opleft), SUBREG_BYTE (XEXP (opright, 0)))
3325 && GET_MODE_SIZE (int_mode) < GET_MODE_SIZE (inner_mode)
3326 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
3327 SUBREG_REG (XEXP (opright, 0)))
3328 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
3329 && CONST_INT_P (XEXP (opright, 1))
3330 && (INTVAL (XEXP (SUBREG_REG (opleft), 1))
3331 + INTVAL (XEXP (opright, 1))
3332 == GET_MODE_PRECISION (int_mode)))
3333 return gen_rtx_ROTATE (int_mode, XEXP (opright, 0),
3334 XEXP (SUBREG_REG (opleft), 1));
3336 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
3337 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
3338 the PLUS does not affect any of the bits in OP1: then we can do
3339 the IOR as a PLUS and we can associate. This is valid if OP1
3340 can be safely shifted left C bits. */
3341 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
3342 && GET_CODE (XEXP (op0, 0)) == PLUS
3343 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
3344 && CONST_INT_P (XEXP (op0, 1))
3345 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
3347 int count = INTVAL (XEXP (op0, 1));
3348 HOST_WIDE_INT mask = UINTVAL (trueop1) << count;
3350 if (mask >> count == INTVAL (trueop1)
3351 && trunc_int_for_mode (mask, mode) == mask
3352 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
3353 return simplify_gen_binary (ASHIFTRT, mode,
3354 plus_constant (mode, XEXP (op0, 0),
3355 mask),
3356 XEXP (op0, 1));
3359 /* The following happens with bitfield merging.
3360 (X & C) | ((X | Y) & ~C) -> X | (Y & ~C) */
3361 if (GET_CODE (op0) == AND
3362 && GET_CODE (op1) == AND
3363 && CONST_INT_P (XEXP (op0, 1))
3364 && CONST_INT_P (XEXP (op1, 1))
3365 && (INTVAL (XEXP (op0, 1))
3366 == ~INTVAL (XEXP (op1, 1))))
3368 /* The IOR may be on both sides. */
3369 rtx top0 = NULL_RTX, top1 = NULL_RTX;
3370 if (GET_CODE (XEXP (op1, 0)) == IOR)
3371 top0 = op0, top1 = op1;
3372 else if (GET_CODE (XEXP (op0, 0)) == IOR)
3373 top0 = op1, top1 = op0;
3374 if (top0 && top1)
3376 /* X may be on either side of the inner IOR. */
3377 rtx tem = NULL_RTX;
3378 if (rtx_equal_p (XEXP (top0, 0),
3379 XEXP (XEXP (top1, 0), 0)))
3380 tem = XEXP (XEXP (top1, 0), 1);
3381 else if (rtx_equal_p (XEXP (top0, 0),
3382 XEXP (XEXP (top1, 0), 1)))
3383 tem = XEXP (XEXP (top1, 0), 0);
3384 if (tem)
3385 return simplify_gen_binary (IOR, mode, XEXP (top0, 0),
3386 simplify_gen_binary
3387 (AND, mode, tem, XEXP (top1, 1)));
3391 /* Convert (ior (and A C) (and B C)) into (and (ior A B) C). */
3392 if (GET_CODE (op0) == GET_CODE (op1)
3393 && (GET_CODE (op0) == AND
3394 || GET_CODE (op0) == IOR
3395 || GET_CODE (op0) == LSHIFTRT
3396 || GET_CODE (op0) == ASHIFTRT
3397 || GET_CODE (op0) == ASHIFT
3398 || GET_CODE (op0) == ROTATE
3399 || GET_CODE (op0) == ROTATERT))
3401 tem = simplify_distributive_operation (code, mode, op0, op1);
3402 if (tem)
3403 return tem;
3406 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3407 if (tem)
3408 return tem;
3410 tem = simplify_associative_operation (code, mode, op0, op1);
3411 if (tem)
3412 return tem;
3414 tem = simplify_logical_relational_operation (code, mode, op0, op1);
3415 if (tem)
3416 return tem;
3417 break;
3419 case XOR:
3420 if (trueop1 == CONST0_RTX (mode))
3421 return op0;
3422 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
3423 return simplify_gen_unary (NOT, mode, op0, mode);
3424 if (rtx_equal_p (trueop0, trueop1)
3425 && ! side_effects_p (op0)
3426 && GET_MODE_CLASS (mode) != MODE_CC)
3427 return CONST0_RTX (mode);
3429 /* Canonicalize XOR of the most significant bit to PLUS. */
3430 if (CONST_SCALAR_INT_P (op1)
3431 && mode_signbit_p (mode, op1))
3432 return simplify_gen_binary (PLUS, mode, op0, op1);
3433 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
3434 if (CONST_SCALAR_INT_P (op1)
3435 && GET_CODE (op0) == PLUS
3436 && CONST_SCALAR_INT_P (XEXP (op0, 1))
3437 && mode_signbit_p (mode, XEXP (op0, 1)))
3438 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
3439 simplify_gen_binary (XOR, mode, op1,
3440 XEXP (op0, 1)));
3442 /* If we are XORing two things that have no bits in common,
3443 convert them into an IOR. This helps to detect rotation encoded
3444 using those methods and possibly other simplifications. */
3446 if (HWI_COMPUTABLE_MODE_P (mode)
3447 && (nonzero_bits (op0, mode)
3448 & nonzero_bits (op1, mode)) == 0)
3449 return (simplify_gen_binary (IOR, mode, op0, op1));
3451 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
3452 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
3453 (NOT y). */
3455 int num_negated = 0;
3457 if (GET_CODE (op0) == NOT)
3458 num_negated++, op0 = XEXP (op0, 0);
3459 if (GET_CODE (op1) == NOT)
3460 num_negated++, op1 = XEXP (op1, 0);
3462 if (num_negated == 2)
3463 return simplify_gen_binary (XOR, mode, op0, op1);
3464 else if (num_negated == 1)
3465 return simplify_gen_unary (NOT, mode,
3466 simplify_gen_binary (XOR, mode, op0, op1),
3467 mode);
3470 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
3471 correspond to a machine insn or result in further simplifications
3472 if B is a constant. */
3474 if (GET_CODE (op0) == AND
3475 && rtx_equal_p (XEXP (op0, 1), op1)
3476 && ! side_effects_p (op1))
3477 return simplify_gen_binary (AND, mode,
3478 simplify_gen_unary (NOT, mode,
3479 XEXP (op0, 0), mode),
3480 op1);
3482 else if (GET_CODE (op0) == AND
3483 && rtx_equal_p (XEXP (op0, 0), op1)
3484 && ! side_effects_p (op1))
3485 return simplify_gen_binary (AND, mode,
3486 simplify_gen_unary (NOT, mode,
3487 XEXP (op0, 1), mode),
3488 op1);
3490 /* Given (xor (ior (xor A B) C) D), where B, C and D are
3491 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
3492 out bits inverted twice and not set by C. Similarly, given
3493 (xor (and (xor A B) C) D), simplify without inverting C in
3494 the xor operand: (xor (and A C) (B&C)^D).
3496 else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
3497 && GET_CODE (XEXP (op0, 0)) == XOR
3498 && CONST_INT_P (op1)
3499 && CONST_INT_P (XEXP (op0, 1))
3500 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
3502 enum rtx_code op = GET_CODE (op0);
3503 rtx a = XEXP (XEXP (op0, 0), 0);
3504 rtx b = XEXP (XEXP (op0, 0), 1);
3505 rtx c = XEXP (op0, 1);
3506 rtx d = op1;
3507 HOST_WIDE_INT bval = INTVAL (b);
3508 HOST_WIDE_INT cval = INTVAL (c);
3509 HOST_WIDE_INT dval = INTVAL (d);
3510 HOST_WIDE_INT xcval;
3512 if (op == IOR)
3513 xcval = ~cval;
3514 else
3515 xcval = cval;
3517 return simplify_gen_binary (XOR, mode,
3518 simplify_gen_binary (op, mode, a, c),
3519 gen_int_mode ((bval & xcval) ^ dval,
3520 mode));
3523 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
3524 we can transform like this:
3525 (A&B)^C == ~(A&B)&C | ~C&(A&B)
3526 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
3527 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
3528 Attempt a few simplifications when B and C are both constants. */
3529 if (GET_CODE (op0) == AND
3530 && CONST_INT_P (op1)
3531 && CONST_INT_P (XEXP (op0, 1)))
3533 rtx a = XEXP (op0, 0);
3534 rtx b = XEXP (op0, 1);
3535 rtx c = op1;
3536 HOST_WIDE_INT bval = INTVAL (b);
3537 HOST_WIDE_INT cval = INTVAL (c);
3539 /* Instead of computing ~A&C, we compute its negated value,
3540 ~(A|~C). If it yields -1, ~A&C is zero, so we can
3541 optimize for sure. If it does not simplify, we still try
3542 to compute ~A&C below, but since that always allocates
3543 RTL, we don't try that before committing to returning a
3544 simplified expression. */
3545 rtx n_na_c = simplify_binary_operation (IOR, mode, a,
3546 GEN_INT (~cval));
3548 if ((~cval & bval) == 0)
3550 rtx na_c = NULL_RTX;
3551 if (n_na_c)
3552 na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
3553 else
3555 /* If ~A does not simplify, don't bother: we don't
3556 want to simplify 2 operations into 3, and if na_c
3557 were to simplify with na, n_na_c would have
3558 simplified as well. */
3559 rtx na = simplify_unary_operation (NOT, mode, a, mode);
3560 if (na)
3561 na_c = simplify_gen_binary (AND, mode, na, c);
3564 /* Try to simplify ~A&C | ~B&C. */
3565 if (na_c != NULL_RTX)
3566 return simplify_gen_binary (IOR, mode, na_c,
3567 gen_int_mode (~bval & cval, mode));
3569 else
3571 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
3572 if (n_na_c == CONSTM1_RTX (mode))
3574 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
3575 gen_int_mode (~cval & bval,
3576 mode));
3577 return simplify_gen_binary (IOR, mode, a_nc_b,
3578 gen_int_mode (~bval & cval,
3579 mode));
3584 /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
3585 do (ior (and A ~C) (and B C)) which is a machine instruction on some
3586 machines, and also has shorter instruction path length. */
3587 if (GET_CODE (op0) == AND
3588 && GET_CODE (XEXP (op0, 0)) == XOR
3589 && CONST_INT_P (XEXP (op0, 1))
3590 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), trueop1))
3592 rtx a = trueop1;
3593 rtx b = XEXP (XEXP (op0, 0), 1);
3594 rtx c = XEXP (op0, 1);
3595 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
3596 rtx a_nc = simplify_gen_binary (AND, mode, a, nc);
3597 rtx bc = simplify_gen_binary (AND, mode, b, c);
3598 return simplify_gen_binary (IOR, mode, a_nc, bc);
3600 /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C)) */
3601 else if (GET_CODE (op0) == AND
3602 && GET_CODE (XEXP (op0, 0)) == XOR
3603 && CONST_INT_P (XEXP (op0, 1))
3604 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), trueop1))
3606 rtx a = XEXP (XEXP (op0, 0), 0);
3607 rtx b = trueop1;
3608 rtx c = XEXP (op0, 1);
3609 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
3610 rtx b_nc = simplify_gen_binary (AND, mode, b, nc);
3611 rtx ac = simplify_gen_binary (AND, mode, a, c);
3612 return simplify_gen_binary (IOR, mode, ac, b_nc);
3615 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
3616 comparison if STORE_FLAG_VALUE is 1. */
3617 if (STORE_FLAG_VALUE == 1
3618 && trueop1 == const1_rtx
3619 && COMPARISON_P (op0)
3620 && (reversed = reversed_comparison (op0, mode)))
3621 return reversed;
3623 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
3624 is (lt foo (const_int 0)), so we can perform the above
3625 simplification if STORE_FLAG_VALUE is 1. */
3627 if (is_a <scalar_int_mode> (mode, &int_mode)
3628 && STORE_FLAG_VALUE == 1
3629 && trueop1 == const1_rtx
3630 && GET_CODE (op0) == LSHIFTRT
3631 && CONST_INT_P (XEXP (op0, 1))
3632 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (int_mode) - 1)
3633 return gen_rtx_GE (int_mode, XEXP (op0, 0), const0_rtx);
3635 /* (xor (comparison foo bar) (const_int sign-bit))
3636 when STORE_FLAG_VALUE is the sign bit. */
3637 if (is_a <scalar_int_mode> (mode, &int_mode)
3638 && val_signbit_p (int_mode, STORE_FLAG_VALUE)
3639 && trueop1 == const_true_rtx
3640 && COMPARISON_P (op0)
3641 && (reversed = reversed_comparison (op0, int_mode)))
3642 return reversed;
3644 /* Convert (xor (and A C) (and B C)) into (and (xor A B) C). */
3645 if (GET_CODE (op0) == GET_CODE (op1)
3646 && (GET_CODE (op0) == AND
3647 || GET_CODE (op0) == LSHIFTRT
3648 || GET_CODE (op0) == ASHIFTRT
3649 || GET_CODE (op0) == ASHIFT
3650 || GET_CODE (op0) == ROTATE
3651 || GET_CODE (op0) == ROTATERT))
3653 tem = simplify_distributive_operation (code, mode, op0, op1);
3654 if (tem)
3655 return tem;
3658 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3659 if (tem)
3660 return tem;
3662 tem = simplify_associative_operation (code, mode, op0, op1);
3663 if (tem)
3664 return tem;
3665 break;
3667 case AND:
3668 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3669 return trueop1;
3670 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
3671 return op0;
3672 if (HWI_COMPUTABLE_MODE_P (mode))
3674 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
3675 HOST_WIDE_INT nzop1;
3676 if (CONST_INT_P (trueop1))
3678 HOST_WIDE_INT val1 = INTVAL (trueop1);
3679 /* If we are turning off bits already known off in OP0, we need
3680 not do an AND. */
3681 if ((nzop0 & ~val1) == 0)
3682 return op0;
3684 nzop1 = nonzero_bits (trueop1, mode);
3685 /* If we are clearing all the nonzero bits, the result is zero. */
3686 if ((nzop1 & nzop0) == 0
3687 && !side_effects_p (op0) && !side_effects_p (op1))
3688 return CONST0_RTX (mode);
3690 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
3691 && GET_MODE_CLASS (mode) != MODE_CC)
3692 return op0;
3693 /* A & (~A) -> 0 */
3694 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3695 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3696 && ! side_effects_p (op0)
3697 && GET_MODE_CLASS (mode) != MODE_CC)
3698 return CONST0_RTX (mode);
3700 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3701 there are no nonzero bits of C outside of X's mode. */
3702 if ((GET_CODE (op0) == SIGN_EXTEND
3703 || GET_CODE (op0) == ZERO_EXTEND)
3704 && CONST_INT_P (trueop1)
3705 && HWI_COMPUTABLE_MODE_P (mode)
3706 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
3707 & UINTVAL (trueop1)) == 0)
3709 machine_mode imode = GET_MODE (XEXP (op0, 0));
3710 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
3711 gen_int_mode (INTVAL (trueop1),
3712 imode));
3713 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
3716 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3717 we might be able to further simplify the AND with X and potentially
3718 remove the truncation altogether. */
3719 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
3721 rtx x = XEXP (op0, 0);
3722 machine_mode xmode = GET_MODE (x);
3723 tem = simplify_gen_binary (AND, xmode, x,
3724 gen_int_mode (INTVAL (trueop1), xmode));
3725 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
3728 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3729 if (GET_CODE (op0) == IOR
3730 && CONST_INT_P (trueop1)
3731 && CONST_INT_P (XEXP (op0, 1)))
3733 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
3734 return simplify_gen_binary (IOR, mode,
3735 simplify_gen_binary (AND, mode,
3736 XEXP (op0, 0), op1),
3737 gen_int_mode (tmp, mode));
3740 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3741 insn (and may simplify more). */
3742 if (GET_CODE (op0) == XOR
3743 && rtx_equal_p (XEXP (op0, 0), op1)
3744 && ! side_effects_p (op1))
3745 return simplify_gen_binary (AND, mode,
3746 simplify_gen_unary (NOT, mode,
3747 XEXP (op0, 1), mode),
3748 op1);
3750 if (GET_CODE (op0) == XOR
3751 && rtx_equal_p (XEXP (op0, 1), op1)
3752 && ! side_effects_p (op1))
3753 return simplify_gen_binary (AND, mode,
3754 simplify_gen_unary (NOT, mode,
3755 XEXP (op0, 0), mode),
3756 op1);
3758 /* Similarly for (~(A ^ B)) & A. */
3759 if (GET_CODE (op0) == NOT
3760 && GET_CODE (XEXP (op0, 0)) == XOR
3761 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3762 && ! side_effects_p (op1))
3763 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3765 if (GET_CODE (op0) == NOT
3766 && GET_CODE (XEXP (op0, 0)) == XOR
3767 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3768 && ! side_effects_p (op1))
3769 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3771 /* Convert (A | B) & A to A. */
3772 if (GET_CODE (op0) == IOR
3773 && (rtx_equal_p (XEXP (op0, 0), op1)
3774 || rtx_equal_p (XEXP (op0, 1), op1))
3775 && ! side_effects_p (XEXP (op0, 0))
3776 && ! side_effects_p (XEXP (op0, 1)))
3777 return op1;
3779 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3780 ((A & N) + B) & M -> (A + B) & M
3781 Similarly if (N & M) == 0,
3782 ((A | N) + B) & M -> (A + B) & M
3783 and for - instead of + and/or ^ instead of |.
3784 Also, if (N & M) == 0, then
3785 (A +- N) & M -> A & M. */
3786 if (CONST_INT_P (trueop1)
3787 && HWI_COMPUTABLE_MODE_P (mode)
3788 && ~UINTVAL (trueop1)
3789 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3790 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3792 rtx pmop[2];
3793 int which;
3795 pmop[0] = XEXP (op0, 0);
3796 pmop[1] = XEXP (op0, 1);
3798 if (CONST_INT_P (pmop[1])
3799 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3800 return simplify_gen_binary (AND, mode, pmop[0], op1);
3802 for (which = 0; which < 2; which++)
3804 tem = pmop[which];
3805 switch (GET_CODE (tem))
3807 case AND:
3808 if (CONST_INT_P (XEXP (tem, 1))
3809 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3810 == UINTVAL (trueop1))
3811 pmop[which] = XEXP (tem, 0);
3812 break;
3813 case IOR:
3814 case XOR:
3815 if (CONST_INT_P (XEXP (tem, 1))
3816 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3817 pmop[which] = XEXP (tem, 0);
3818 break;
3819 default:
3820 break;
3824 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3826 tem = simplify_gen_binary (GET_CODE (op0), mode,
3827 pmop[0], pmop[1]);
3828 return simplify_gen_binary (code, mode, tem, op1);
3832 /* (and X (ior (not X) Y) -> (and X Y) */
3833 if (GET_CODE (op1) == IOR
3834 && GET_CODE (XEXP (op1, 0)) == NOT
3835 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
3836 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3838 /* (and (ior (not X) Y) X) -> (and X Y) */
3839 if (GET_CODE (op0) == IOR
3840 && GET_CODE (XEXP (op0, 0)) == NOT
3841 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
3842 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3844 /* (and X (ior Y (not X)) -> (and X Y) */
3845 if (GET_CODE (op1) == IOR
3846 && GET_CODE (XEXP (op1, 1)) == NOT
3847 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3848 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3850 /* (and (ior Y (not X)) X) -> (and X Y) */
3851 if (GET_CODE (op0) == IOR
3852 && GET_CODE (XEXP (op0, 1)) == NOT
3853 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3854 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3856 /* Convert (and (ior A C) (ior B C)) into (ior (and A B) C). */
3857 if (GET_CODE (op0) == GET_CODE (op1)
3858 && (GET_CODE (op0) == AND
3859 || GET_CODE (op0) == IOR
3860 || GET_CODE (op0) == LSHIFTRT
3861 || GET_CODE (op0) == ASHIFTRT
3862 || GET_CODE (op0) == ASHIFT
3863 || GET_CODE (op0) == ROTATE
3864 || GET_CODE (op0) == ROTATERT))
3866 tem = simplify_distributive_operation (code, mode, op0, op1);
3867 if (tem)
3868 return tem;
3871 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3872 if (tem)
3873 return tem;
3875 tem = simplify_associative_operation (code, mode, op0, op1);
3876 if (tem)
3877 return tem;
3878 break;
3880 case UDIV:
3881 /* 0/x is 0 (or x&0 if x has side-effects). */
3882 if (trueop0 == CONST0_RTX (mode)
3883 && !cfun->can_throw_non_call_exceptions)
3885 if (side_effects_p (op1))
3886 return simplify_gen_binary (AND, mode, op1, trueop0);
3887 return trueop0;
3889 /* x/1 is x. */
3890 if (trueop1 == CONST1_RTX (mode))
3892 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3893 if (tem)
3894 return tem;
3896 /* Convert divide by power of two into shift. */
3897 if (CONST_INT_P (trueop1)
3898 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3899 return simplify_gen_binary (LSHIFTRT, mode, op0,
3900 gen_int_shift_amount (mode, val));
3901 break;
3903 case DIV:
3904 /* Handle floating point and integers separately. */
3905 if (SCALAR_FLOAT_MODE_P (mode))
3907 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3908 safe for modes with NaNs, since 0.0 / 0.0 will then be
3909 NaN rather than 0.0. Nor is it safe for modes with signed
3910 zeros, since dividing 0 by a negative number gives -0.0 */
3911 if (trueop0 == CONST0_RTX (mode)
3912 && !HONOR_NANS (mode)
3913 && !HONOR_SIGNED_ZEROS (mode)
3914 && ! side_effects_p (op1))
3915 return op0;
3916 /* x/1.0 is x. */
3917 if (trueop1 == CONST1_RTX (mode)
3918 && !HONOR_SNANS (mode))
3919 return op0;
3921 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3922 && trueop1 != CONST0_RTX (mode))
3924 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
3926 /* x/-1.0 is -x. */
3927 if (real_equal (d1, &dconstm1)
3928 && !HONOR_SNANS (mode))
3929 return simplify_gen_unary (NEG, mode, op0, mode);
3931 /* Change FP division by a constant into multiplication.
3932 Only do this with -freciprocal-math. */
3933 if (flag_reciprocal_math
3934 && !real_equal (d1, &dconst0))
3936 REAL_VALUE_TYPE d;
3937 real_arithmetic (&d, RDIV_EXPR, &dconst1, d1);
3938 tem = const_double_from_real_value (d, mode);
3939 return simplify_gen_binary (MULT, mode, op0, tem);
3943 else if (SCALAR_INT_MODE_P (mode))
3945 /* 0/x is 0 (or x&0 if x has side-effects). */
3946 if (trueop0 == CONST0_RTX (mode)
3947 && !cfun->can_throw_non_call_exceptions)
3949 if (side_effects_p (op1))
3950 return simplify_gen_binary (AND, mode, op1, trueop0);
3951 return trueop0;
3953 /* x/1 is x. */
3954 if (trueop1 == CONST1_RTX (mode))
3956 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3957 if (tem)
3958 return tem;
3960 /* x/-1 is -x. */
3961 if (trueop1 == constm1_rtx)
3963 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3964 if (x)
3965 return simplify_gen_unary (NEG, mode, x, mode);
3968 break;
3970 case UMOD:
3971 /* 0%x is 0 (or x&0 if x has side-effects). */
3972 if (trueop0 == CONST0_RTX (mode))
3974 if (side_effects_p (op1))
3975 return simplify_gen_binary (AND, mode, op1, trueop0);
3976 return trueop0;
3978 /* x%1 is 0 (of x&0 if x has side-effects). */
3979 if (trueop1 == CONST1_RTX (mode))
3981 if (side_effects_p (op0))
3982 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3983 return CONST0_RTX (mode);
3985 /* Implement modulus by power of two as AND. */
3986 if (CONST_INT_P (trueop1)
3987 && exact_log2 (UINTVAL (trueop1)) > 0)
3988 return simplify_gen_binary (AND, mode, op0,
3989 gen_int_mode (UINTVAL (trueop1) - 1,
3990 mode));
3991 break;
3993 case MOD:
3994 /* 0%x is 0 (or x&0 if x has side-effects). */
3995 if (trueop0 == CONST0_RTX (mode))
3997 if (side_effects_p (op1))
3998 return simplify_gen_binary (AND, mode, op1, trueop0);
3999 return trueop0;
4001 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
4002 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
4004 if (side_effects_p (op0))
4005 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
4006 return CONST0_RTX (mode);
4008 break;
4010 case ROTATERT:
4011 case ROTATE:
4012 if (trueop1 == CONST0_RTX (mode))
4013 return op0;
4014 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
4015 prefer left rotation, if op1 is from bitsize / 2 + 1 to
4016 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
4017 amount instead. */
4018 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
4019 if (CONST_INT_P (trueop1)
4020 && IN_RANGE (INTVAL (trueop1),
4021 GET_MODE_UNIT_PRECISION (mode) / 2 + (code == ROTATE),
4022 GET_MODE_UNIT_PRECISION (mode) - 1))
4024 int new_amount = GET_MODE_UNIT_PRECISION (mode) - INTVAL (trueop1);
4025 rtx new_amount_rtx = gen_int_shift_amount (mode, new_amount);
4026 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
4027 mode, op0, new_amount_rtx);
4029 #endif
4030 /* FALLTHRU */
4031 case ASHIFTRT:
4032 if (trueop1 == CONST0_RTX (mode))
4033 return op0;
4034 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
4035 return op0;
4036 /* Rotating ~0 always results in ~0. */
4037 if (CONST_INT_P (trueop0)
4038 && HWI_COMPUTABLE_MODE_P (mode)
4039 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
4040 && ! side_effects_p (op1))
4041 return op0;
4043 canonicalize_shift:
4044 /* Given:
4045 scalar modes M1, M2
4046 scalar constants c1, c2
4047 size (M2) > size (M1)
4048 c1 == size (M2) - size (M1)
4049 optimize:
4050 ([a|l]shiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
4051 <low_part>)
4052 (const_int <c2>))
4054 (subreg:M1 ([a|l]shiftrt:M2 (reg:M2) (const_int <c1 + c2>))
4055 <low_part>). */
4056 if ((code == ASHIFTRT || code == LSHIFTRT)
4057 && is_a <scalar_int_mode> (mode, &int_mode)
4058 && SUBREG_P (op0)
4059 && CONST_INT_P (op1)
4060 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
4061 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op0)),
4062 &inner_mode)
4063 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
4064 && GET_MODE_BITSIZE (inner_mode) > GET_MODE_BITSIZE (int_mode)
4065 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
4066 == GET_MODE_BITSIZE (inner_mode) - GET_MODE_BITSIZE (int_mode))
4067 && subreg_lowpart_p (op0))
4069 rtx tmp = gen_int_shift_amount
4070 (inner_mode, INTVAL (XEXP (SUBREG_REG (op0), 1)) + INTVAL (op1));
4072 /* Combine would usually zero out the value when combining two
4073 local shifts and the range becomes larger or equal to the mode.
4074 However since we fold away one of the shifts here combine won't
4075 see it so we should immediately zero the result if it's out of
4076 range. */
4077 if (code == LSHIFTRT
4078 && INTVAL (tmp) >= GET_MODE_BITSIZE (inner_mode))
4079 tmp = const0_rtx;
4080 else
4081 tmp = simplify_gen_binary (code,
4082 inner_mode,
4083 XEXP (SUBREG_REG (op0), 0),
4084 tmp);
4086 return lowpart_subreg (int_mode, tmp, inner_mode);
4089 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
4091 val = INTVAL (op1) & (GET_MODE_UNIT_PRECISION (mode) - 1);
4092 if (val != INTVAL (op1))
4093 return simplify_gen_binary (code, mode, op0,
4094 gen_int_shift_amount (mode, val));
4096 break;
4098 case SS_ASHIFT:
4099 if (CONST_INT_P (trueop0)
4100 && HWI_COMPUTABLE_MODE_P (mode)
4101 && (UINTVAL (trueop0) == (GET_MODE_MASK (mode) >> 1)
4102 || mode_signbit_p (mode, trueop0))
4103 && ! side_effects_p (op1))
4104 return op0;
4105 goto simplify_ashift;
4107 case US_ASHIFT:
4108 if (CONST_INT_P (trueop0)
4109 && HWI_COMPUTABLE_MODE_P (mode)
4110 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
4111 && ! side_effects_p (op1))
4112 return op0;
4113 /* FALLTHRU */
4115 case ASHIFT:
4116 simplify_ashift:
4117 if (trueop1 == CONST0_RTX (mode))
4118 return op0;
4119 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
4120 return op0;
4121 if (mem_depth
4122 && code == ASHIFT
4123 && CONST_INT_P (trueop1)
4124 && is_a <scalar_int_mode> (mode, &int_mode)
4125 && IN_RANGE (UINTVAL (trueop1),
4126 1, GET_MODE_PRECISION (int_mode) - 1))
4128 auto c = (wi::one (GET_MODE_PRECISION (int_mode))
4129 << UINTVAL (trueop1));
4130 rtx new_op1 = immed_wide_int_const (c, int_mode);
4131 return simplify_gen_binary (MULT, int_mode, op0, new_op1);
4133 goto canonicalize_shift;
4135 case LSHIFTRT:
4136 if (trueop1 == CONST0_RTX (mode))
4137 return op0;
4138 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
4139 return op0;
4140 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
4141 if (GET_CODE (op0) == CLZ
4142 && is_a <scalar_int_mode> (GET_MODE (XEXP (op0, 0)), &inner_mode)
4143 && CONST_INT_P (trueop1)
4144 && STORE_FLAG_VALUE == 1
4145 && INTVAL (trueop1) < GET_MODE_UNIT_PRECISION (mode))
4147 unsigned HOST_WIDE_INT zero_val = 0;
4149 if (CLZ_DEFINED_VALUE_AT_ZERO (inner_mode, zero_val)
4150 && zero_val == GET_MODE_PRECISION (inner_mode)
4151 && INTVAL (trueop1) == exact_log2 (zero_val))
4152 return simplify_gen_relational (EQ, mode, inner_mode,
4153 XEXP (op0, 0), const0_rtx);
4155 goto canonicalize_shift;
4157 case SMIN:
4158 if (HWI_COMPUTABLE_MODE_P (mode)
4159 && mode_signbit_p (mode, trueop1)
4160 && ! side_effects_p (op0))
4161 return op1;
4162 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
4163 return op0;
4164 tem = simplify_associative_operation (code, mode, op0, op1);
4165 if (tem)
4166 return tem;
4167 break;
4169 case SMAX:
4170 if (HWI_COMPUTABLE_MODE_P (mode)
4171 && CONST_INT_P (trueop1)
4172 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
4173 && ! side_effects_p (op0))
4174 return op1;
4175 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
4176 return op0;
4177 tem = simplify_associative_operation (code, mode, op0, op1);
4178 if (tem)
4179 return tem;
4180 break;
4182 case UMIN:
4183 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
4184 return op1;
4185 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
4186 return op0;
4187 tem = simplify_associative_operation (code, mode, op0, op1);
4188 if (tem)
4189 return tem;
4190 break;
4192 case UMAX:
4193 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
4194 return op1;
4195 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
4196 return op0;
4197 tem = simplify_associative_operation (code, mode, op0, op1);
4198 if (tem)
4199 return tem;
4200 break;
4202 case SS_PLUS:
4203 case US_PLUS:
4204 case SS_MINUS:
4205 case US_MINUS:
4206 /* Simplify x +/- 0 to x, if possible. */
4207 if (trueop1 == CONST0_RTX (mode))
4208 return op0;
4209 return 0;
4211 case SS_MULT:
4212 case US_MULT:
4213 /* Simplify x * 0 to 0, if possible. */
4214 if (trueop1 == CONST0_RTX (mode)
4215 && !side_effects_p (op0))
4216 return op1;
4218 /* Simplify x * 1 to x, if possible. */
4219 if (trueop1 == CONST1_RTX (mode))
4220 return op0;
4221 return 0;
4223 case SMUL_HIGHPART:
4224 case UMUL_HIGHPART:
4225 /* Simplify x * 0 to 0, if possible. */
4226 if (trueop1 == CONST0_RTX (mode)
4227 && !side_effects_p (op0))
4228 return op1;
4229 return 0;
4231 case SS_DIV:
4232 case US_DIV:
4233 /* Simplify x / 1 to x, if possible. */
4234 if (trueop1 == CONST1_RTX (mode))
4235 return op0;
4236 return 0;
4238 case VEC_SERIES:
4239 if (op1 == CONST0_RTX (GET_MODE_INNER (mode)))
4240 return gen_vec_duplicate (mode, op0);
4241 if (valid_for_const_vector_p (mode, op0)
4242 && valid_for_const_vector_p (mode, op1))
4243 return gen_const_vec_series (mode, op0, op1);
4244 return 0;
4246 case VEC_SELECT:
4247 if (!VECTOR_MODE_P (mode))
4249 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
4250 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
4251 gcc_assert (GET_CODE (trueop1) == PARALLEL);
4252 gcc_assert (XVECLEN (trueop1, 0) == 1);
4254 /* We can't reason about selections made at runtime. */
4255 if (!CONST_INT_P (XVECEXP (trueop1, 0, 0)))
4256 return 0;
4258 if (vec_duplicate_p (trueop0, &elt0))
4259 return elt0;
4261 if (GET_CODE (trueop0) == CONST_VECTOR)
4262 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
4263 (trueop1, 0, 0)));
4265 /* Extract a scalar element from a nested VEC_SELECT expression
4266 (with optional nested VEC_CONCAT expression). Some targets
4267 (i386) extract scalar element from a vector using chain of
4268 nested VEC_SELECT expressions. When input operand is a memory
4269 operand, this operation can be simplified to a simple scalar
4270 load from an offseted memory address. */
4271 int n_elts;
4272 if (GET_CODE (trueop0) == VEC_SELECT
4273 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0, 0)))
4274 .is_constant (&n_elts)))
4276 rtx op0 = XEXP (trueop0, 0);
4277 rtx op1 = XEXP (trueop0, 1);
4279 int i = INTVAL (XVECEXP (trueop1, 0, 0));
4280 int elem;
4282 rtvec vec;
4283 rtx tmp_op, tmp;
4285 gcc_assert (GET_CODE (op1) == PARALLEL);
4286 gcc_assert (i < n_elts);
4288 /* Select element, pointed by nested selector. */
4289 elem = INTVAL (XVECEXP (op1, 0, i));
4291 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
4292 if (GET_CODE (op0) == VEC_CONCAT)
4294 rtx op00 = XEXP (op0, 0);
4295 rtx op01 = XEXP (op0, 1);
4297 machine_mode mode00, mode01;
4298 int n_elts00, n_elts01;
4300 mode00 = GET_MODE (op00);
4301 mode01 = GET_MODE (op01);
4303 /* Find out the number of elements of each operand.
4304 Since the concatenated result has a constant number
4305 of elements, the operands must too. */
4306 n_elts00 = GET_MODE_NUNITS (mode00).to_constant ();
4307 n_elts01 = GET_MODE_NUNITS (mode01).to_constant ();
4309 gcc_assert (n_elts == n_elts00 + n_elts01);
4311 /* Select correct operand of VEC_CONCAT
4312 and adjust selector. */
4313 if (elem < n_elts01)
4314 tmp_op = op00;
4315 else
4317 tmp_op = op01;
4318 elem -= n_elts00;
4321 else
4322 tmp_op = op0;
4324 vec = rtvec_alloc (1);
4325 RTVEC_ELT (vec, 0) = GEN_INT (elem);
4327 tmp = gen_rtx_fmt_ee (code, mode,
4328 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
4329 return tmp;
4332 else
4334 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
4335 gcc_assert (GET_MODE_INNER (mode)
4336 == GET_MODE_INNER (GET_MODE (trueop0)));
4337 gcc_assert (GET_CODE (trueop1) == PARALLEL);
4339 if (vec_duplicate_p (trueop0, &elt0))
4340 /* It doesn't matter which elements are selected by trueop1,
4341 because they are all the same. */
4342 return gen_vec_duplicate (mode, elt0);
4344 if (GET_CODE (trueop0) == CONST_VECTOR)
4346 unsigned n_elts = XVECLEN (trueop1, 0);
4347 rtvec v = rtvec_alloc (n_elts);
4348 unsigned int i;
4350 gcc_assert (known_eq (n_elts, GET_MODE_NUNITS (mode)));
4351 for (i = 0; i < n_elts; i++)
4353 rtx x = XVECEXP (trueop1, 0, i);
4355 if (!CONST_INT_P (x))
4356 return 0;
4358 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
4359 INTVAL (x));
4362 return gen_rtx_CONST_VECTOR (mode, v);
4365 /* Recognize the identity. */
4366 if (GET_MODE (trueop0) == mode)
4368 bool maybe_ident = true;
4369 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
4371 rtx j = XVECEXP (trueop1, 0, i);
4372 if (!CONST_INT_P (j) || INTVAL (j) != i)
4374 maybe_ident = false;
4375 break;
4378 if (maybe_ident)
4379 return trueop0;
4382 /* If we select a low-part subreg, return that. */
4383 if (vec_series_lowpart_p (mode, GET_MODE (trueop0), trueop1))
4385 rtx new_rtx = lowpart_subreg (mode, trueop0,
4386 GET_MODE (trueop0));
4387 if (new_rtx != NULL_RTX)
4388 return new_rtx;
4391 /* If we build {a,b} then permute it, build the result directly. */
4392 if (XVECLEN (trueop1, 0) == 2
4393 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
4394 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
4395 && GET_CODE (trueop0) == VEC_CONCAT
4396 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
4397 && GET_MODE (XEXP (trueop0, 0)) == mode
4398 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
4399 && GET_MODE (XEXP (trueop0, 1)) == mode)
4401 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
4402 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
4403 rtx subop0, subop1;
4405 gcc_assert (i0 < 4 && i1 < 4);
4406 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
4407 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
4409 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
4412 if (XVECLEN (trueop1, 0) == 2
4413 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
4414 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
4415 && GET_CODE (trueop0) == VEC_CONCAT
4416 && GET_MODE (trueop0) == mode)
4418 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
4419 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
4420 rtx subop0, subop1;
4422 gcc_assert (i0 < 2 && i1 < 2);
4423 subop0 = XEXP (trueop0, i0);
4424 subop1 = XEXP (trueop0, i1);
4426 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
4429 /* If we select one half of a vec_concat, return that. */
4430 int l0, l1;
4431 if (GET_CODE (trueop0) == VEC_CONCAT
4432 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0, 0)))
4433 .is_constant (&l0))
4434 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0, 1)))
4435 .is_constant (&l1))
4436 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
4438 rtx subop0 = XEXP (trueop0, 0);
4439 rtx subop1 = XEXP (trueop0, 1);
4440 machine_mode mode0 = GET_MODE (subop0);
4441 machine_mode mode1 = GET_MODE (subop1);
4442 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
4443 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
4445 bool success = true;
4446 for (int i = 1; i < l0; ++i)
4448 rtx j = XVECEXP (trueop1, 0, i);
4449 if (!CONST_INT_P (j) || INTVAL (j) != i)
4451 success = false;
4452 break;
4455 if (success)
4456 return subop0;
4458 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
4460 bool success = true;
4461 for (int i = 1; i < l1; ++i)
4463 rtx j = XVECEXP (trueop1, 0, i);
4464 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
4466 success = false;
4467 break;
4470 if (success)
4471 return subop1;
4475 /* Simplify vec_select of a subreg of X to just a vec_select of X
4476 when X has same component mode as vec_select. */
4477 unsigned HOST_WIDE_INT subreg_offset = 0;
4478 if (GET_CODE (trueop0) == SUBREG
4479 && GET_MODE_INNER (mode)
4480 == GET_MODE_INNER (GET_MODE (SUBREG_REG (trueop0)))
4481 && GET_MODE_NUNITS (mode).is_constant (&l1)
4482 && constant_multiple_p (subreg_memory_offset (trueop0),
4483 GET_MODE_UNIT_BITSIZE (mode),
4484 &subreg_offset))
4486 poly_uint64 nunits
4487 = GET_MODE_NUNITS (GET_MODE (SUBREG_REG (trueop0)));
4488 bool success = true;
4489 for (int i = 0; i != l1; i++)
4491 rtx idx = XVECEXP (trueop1, 0, i);
4492 if (!CONST_INT_P (idx)
4493 || maybe_ge (UINTVAL (idx) + subreg_offset, nunits))
4495 success = false;
4496 break;
4500 if (success)
4502 rtx par = trueop1;
4503 if (subreg_offset)
4505 rtvec vec = rtvec_alloc (l1);
4506 for (int i = 0; i < l1; i++)
4507 RTVEC_ELT (vec, i)
4508 = GEN_INT (INTVAL (XVECEXP (trueop1, 0, i))
4509 + subreg_offset);
4510 par = gen_rtx_PARALLEL (VOIDmode, vec);
4512 return gen_rtx_VEC_SELECT (mode, SUBREG_REG (trueop0), par);
4517 if (XVECLEN (trueop1, 0) == 1
4518 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
4519 && GET_CODE (trueop0) == VEC_CONCAT)
4521 rtx vec = trueop0;
4522 offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
4524 /* Try to find the element in the VEC_CONCAT. */
4525 while (GET_MODE (vec) != mode
4526 && GET_CODE (vec) == VEC_CONCAT)
4528 poly_int64 vec_size;
4530 if (CONST_INT_P (XEXP (vec, 0)))
4532 /* vec_concat of two const_ints doesn't make sense with
4533 respect to modes. */
4534 if (CONST_INT_P (XEXP (vec, 1)))
4535 return 0;
4537 vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
4538 - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
4540 else
4541 vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
4543 if (known_lt (offset, vec_size))
4544 vec = XEXP (vec, 0);
4545 else if (known_ge (offset, vec_size))
4547 offset -= vec_size;
4548 vec = XEXP (vec, 1);
4550 else
4551 break;
4552 vec = avoid_constant_pool_reference (vec);
4555 if (GET_MODE (vec) == mode)
4556 return vec;
4559 /* If we select elements in a vec_merge that all come from the same
4560 operand, select from that operand directly. */
4561 if (GET_CODE (op0) == VEC_MERGE)
4563 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
4564 if (CONST_INT_P (trueop02))
4566 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
4567 bool all_operand0 = true;
4568 bool all_operand1 = true;
4569 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
4571 rtx j = XVECEXP (trueop1, 0, i);
4572 if (sel & (HOST_WIDE_INT_1U << UINTVAL (j)))
4573 all_operand1 = false;
4574 else
4575 all_operand0 = false;
4577 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
4578 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
4579 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
4580 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
4584 /* If we have two nested selects that are inverses of each
4585 other, replace them with the source operand. */
4586 if (GET_CODE (trueop0) == VEC_SELECT
4587 && GET_MODE (XEXP (trueop0, 0)) == mode)
4589 rtx op0_subop1 = XEXP (trueop0, 1);
4590 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
4591 gcc_assert (known_eq (XVECLEN (trueop1, 0), GET_MODE_NUNITS (mode)));
4593 /* Apply the outer ordering vector to the inner one. (The inner
4594 ordering vector is expressly permitted to be of a different
4595 length than the outer one.) If the result is { 0, 1, ..., n-1 }
4596 then the two VEC_SELECTs cancel. */
4597 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
4599 rtx x = XVECEXP (trueop1, 0, i);
4600 if (!CONST_INT_P (x))
4601 return 0;
4602 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
4603 if (!CONST_INT_P (y) || i != INTVAL (y))
4604 return 0;
4606 return XEXP (trueop0, 0);
4609 return 0;
4610 case VEC_CONCAT:
4612 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
4613 ? GET_MODE (trueop0)
4614 : GET_MODE_INNER (mode));
4615 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
4616 ? GET_MODE (trueop1)
4617 : GET_MODE_INNER (mode));
4619 gcc_assert (VECTOR_MODE_P (mode));
4620 gcc_assert (known_eq (GET_MODE_SIZE (op0_mode)
4621 + GET_MODE_SIZE (op1_mode),
4622 GET_MODE_SIZE (mode)));
4624 if (VECTOR_MODE_P (op0_mode))
4625 gcc_assert (GET_MODE_INNER (mode)
4626 == GET_MODE_INNER (op0_mode));
4627 else
4628 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
4630 if (VECTOR_MODE_P (op1_mode))
4631 gcc_assert (GET_MODE_INNER (mode)
4632 == GET_MODE_INNER (op1_mode));
4633 else
4634 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
4636 unsigned int n_elts, in_n_elts;
4637 if ((GET_CODE (trueop0) == CONST_VECTOR
4638 || CONST_SCALAR_INT_P (trueop0)
4639 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
4640 && (GET_CODE (trueop1) == CONST_VECTOR
4641 || CONST_SCALAR_INT_P (trueop1)
4642 || CONST_DOUBLE_AS_FLOAT_P (trueop1))
4643 && GET_MODE_NUNITS (mode).is_constant (&n_elts)
4644 && GET_MODE_NUNITS (op0_mode).is_constant (&in_n_elts))
4646 rtvec v = rtvec_alloc (n_elts);
4647 unsigned int i;
4648 for (i = 0; i < n_elts; i++)
4650 if (i < in_n_elts)
4652 if (!VECTOR_MODE_P (op0_mode))
4653 RTVEC_ELT (v, i) = trueop0;
4654 else
4655 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
4657 else
4659 if (!VECTOR_MODE_P (op1_mode))
4660 RTVEC_ELT (v, i) = trueop1;
4661 else
4662 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
4663 i - in_n_elts);
4667 return gen_rtx_CONST_VECTOR (mode, v);
4670 /* Try to merge two VEC_SELECTs from the same vector into a single one.
4671 Restrict the transformation to avoid generating a VEC_SELECT with a
4672 mode unrelated to its operand. */
4673 if (GET_CODE (trueop0) == VEC_SELECT
4674 && GET_CODE (trueop1) == VEC_SELECT
4675 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
4676 && GET_MODE_INNER (GET_MODE (XEXP (trueop0, 0)))
4677 == GET_MODE_INNER(mode))
4679 rtx par0 = XEXP (trueop0, 1);
4680 rtx par1 = XEXP (trueop1, 1);
4681 int len0 = XVECLEN (par0, 0);
4682 int len1 = XVECLEN (par1, 0);
4683 rtvec vec = rtvec_alloc (len0 + len1);
4684 for (int i = 0; i < len0; i++)
4685 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
4686 for (int i = 0; i < len1; i++)
4687 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
4688 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
4689 gen_rtx_PARALLEL (VOIDmode, vec));
4692 return 0;
4694 default:
4695 gcc_unreachable ();
4698 if (mode == GET_MODE (op0)
4699 && mode == GET_MODE (op1)
4700 && vec_duplicate_p (op0, &elt0)
4701 && vec_duplicate_p (op1, &elt1))
4703 /* Try applying the operator to ELT and see if that simplifies.
4704 We can duplicate the result if so.
4706 The reason we don't use simplify_gen_binary is that it isn't
4707 necessarily a win to convert things like:
4709 (plus:V (vec_duplicate:V (reg:S R1))
4710 (vec_duplicate:V (reg:S R2)))
4714 (vec_duplicate:V (plus:S (reg:S R1) (reg:S R2)))
4716 The first might be done entirely in vector registers while the
4717 second might need a move between register files. */
4718 tem = simplify_binary_operation (code, GET_MODE_INNER (mode),
4719 elt0, elt1);
4720 if (tem)
4721 return gen_vec_duplicate (mode, tem);
4724 return 0;
4727 /* Return true if binary operation OP distributes over addition in operand
4728 OPNO, with the other operand being held constant. OPNO counts from 1. */
4730 static bool
4731 distributes_over_addition_p (rtx_code op, int opno)
4733 switch (op)
4735 case PLUS:
4736 case MINUS:
4737 case MULT:
4738 return true;
4740 case ASHIFT:
4741 return opno == 1;
4743 default:
4744 return false;
4749 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
4750 rtx op0, rtx op1)
4752 if (VECTOR_MODE_P (mode)
4753 && code != VEC_CONCAT
4754 && GET_CODE (op0) == CONST_VECTOR
4755 && GET_CODE (op1) == CONST_VECTOR)
4757 bool step_ok_p;
4758 if (CONST_VECTOR_STEPPED_P (op0)
4759 && CONST_VECTOR_STEPPED_P (op1))
4760 /* We can operate directly on the encoding if:
4762 a3 - a2 == a2 - a1 && b3 - b2 == b2 - b1
4763 implies
4764 (a3 op b3) - (a2 op b2) == (a2 op b2) - (a1 op b1)
4766 Addition and subtraction are the supported operators
4767 for which this is true. */
4768 step_ok_p = (code == PLUS || code == MINUS);
4769 else if (CONST_VECTOR_STEPPED_P (op0))
4770 /* We can operate directly on stepped encodings if:
4772 a3 - a2 == a2 - a1
4773 implies:
4774 (a3 op c) - (a2 op c) == (a2 op c) - (a1 op c)
4776 which is true if (x -> x op c) distributes over addition. */
4777 step_ok_p = distributes_over_addition_p (code, 1);
4778 else
4779 /* Similarly in reverse. */
4780 step_ok_p = distributes_over_addition_p (code, 2);
4781 rtx_vector_builder builder;
4782 if (!builder.new_binary_operation (mode, op0, op1, step_ok_p))
4783 return 0;
4785 unsigned int count = builder.encoded_nelts ();
4786 for (unsigned int i = 0; i < count; i++)
4788 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
4789 CONST_VECTOR_ELT (op0, i),
4790 CONST_VECTOR_ELT (op1, i));
4791 if (!x || !valid_for_const_vector_p (mode, x))
4792 return 0;
4793 builder.quick_push (x);
4795 return builder.build ();
4798 if (VECTOR_MODE_P (mode)
4799 && code == VEC_CONCAT
4800 && (CONST_SCALAR_INT_P (op0)
4801 || CONST_FIXED_P (op0)
4802 || CONST_DOUBLE_AS_FLOAT_P (op0))
4803 && (CONST_SCALAR_INT_P (op1)
4804 || CONST_DOUBLE_AS_FLOAT_P (op1)
4805 || CONST_FIXED_P (op1)))
4807 /* Both inputs have a constant number of elements, so the result
4808 must too. */
4809 unsigned n_elts = GET_MODE_NUNITS (mode).to_constant ();
4810 rtvec v = rtvec_alloc (n_elts);
4812 gcc_assert (n_elts >= 2);
4813 if (n_elts == 2)
4815 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
4816 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
4818 RTVEC_ELT (v, 0) = op0;
4819 RTVEC_ELT (v, 1) = op1;
4821 else
4823 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0)).to_constant ();
4824 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1)).to_constant ();
4825 unsigned i;
4827 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
4828 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
4829 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
4831 for (i = 0; i < op0_n_elts; ++i)
4832 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op0, i);
4833 for (i = 0; i < op1_n_elts; ++i)
4834 RTVEC_ELT (v, op0_n_elts+i) = CONST_VECTOR_ELT (op1, i);
4837 return gen_rtx_CONST_VECTOR (mode, v);
4840 if (SCALAR_FLOAT_MODE_P (mode)
4841 && CONST_DOUBLE_AS_FLOAT_P (op0)
4842 && CONST_DOUBLE_AS_FLOAT_P (op1)
4843 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
4845 if (code == AND
4846 || code == IOR
4847 || code == XOR)
4849 long tmp0[4];
4850 long tmp1[4];
4851 REAL_VALUE_TYPE r;
4852 int i;
4854 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
4855 GET_MODE (op0));
4856 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
4857 GET_MODE (op1));
4858 for (i = 0; i < 4; i++)
4860 switch (code)
4862 case AND:
4863 tmp0[i] &= tmp1[i];
4864 break;
4865 case IOR:
4866 tmp0[i] |= tmp1[i];
4867 break;
4868 case XOR:
4869 tmp0[i] ^= tmp1[i];
4870 break;
4871 default:
4872 gcc_unreachable ();
4875 real_from_target (&r, tmp0, mode);
4876 return const_double_from_real_value (r, mode);
4878 else
4880 REAL_VALUE_TYPE f0, f1, value, result;
4881 const REAL_VALUE_TYPE *opr0, *opr1;
4882 bool inexact;
4884 opr0 = CONST_DOUBLE_REAL_VALUE (op0);
4885 opr1 = CONST_DOUBLE_REAL_VALUE (op1);
4887 if (HONOR_SNANS (mode)
4888 && (REAL_VALUE_ISSIGNALING_NAN (*opr0)
4889 || REAL_VALUE_ISSIGNALING_NAN (*opr1)))
4890 return 0;
4892 real_convert (&f0, mode, opr0);
4893 real_convert (&f1, mode, opr1);
4895 if (code == DIV
4896 && real_equal (&f1, &dconst0)
4897 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
4898 return 0;
4900 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4901 && flag_trapping_math
4902 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
4904 int s0 = REAL_VALUE_NEGATIVE (f0);
4905 int s1 = REAL_VALUE_NEGATIVE (f1);
4907 switch (code)
4909 case PLUS:
4910 /* Inf + -Inf = NaN plus exception. */
4911 if (s0 != s1)
4912 return 0;
4913 break;
4914 case MINUS:
4915 /* Inf - Inf = NaN plus exception. */
4916 if (s0 == s1)
4917 return 0;
4918 break;
4919 case DIV:
4920 /* Inf / Inf = NaN plus exception. */
4921 return 0;
4922 default:
4923 break;
4927 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4928 && flag_trapping_math
4929 && ((REAL_VALUE_ISINF (f0) && real_equal (&f1, &dconst0))
4930 || (REAL_VALUE_ISINF (f1)
4931 && real_equal (&f0, &dconst0))))
4932 /* Inf * 0 = NaN plus exception. */
4933 return 0;
4935 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
4936 &f0, &f1);
4937 real_convert (&result, mode, &value);
4939 /* Don't constant fold this floating point operation if
4940 the result has overflowed and flag_trapping_math. */
4942 if (flag_trapping_math
4943 && MODE_HAS_INFINITIES (mode)
4944 && REAL_VALUE_ISINF (result)
4945 && !REAL_VALUE_ISINF (f0)
4946 && !REAL_VALUE_ISINF (f1))
4947 /* Overflow plus exception. */
4948 return 0;
4950 /* Don't constant fold this floating point operation if the
4951 result may dependent upon the run-time rounding mode and
4952 flag_rounding_math is set, or if GCC's software emulation
4953 is unable to accurately represent the result. */
4955 if ((flag_rounding_math
4956 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
4957 && (inexact || !real_identical (&result, &value)))
4958 return NULL_RTX;
4960 return const_double_from_real_value (result, mode);
4964 /* We can fold some multi-word operations. */
4965 scalar_int_mode int_mode;
4966 if (is_a <scalar_int_mode> (mode, &int_mode)
4967 && CONST_SCALAR_INT_P (op0)
4968 && CONST_SCALAR_INT_P (op1)
4969 && GET_MODE_PRECISION (int_mode) <= MAX_BITSIZE_MODE_ANY_INT)
4971 wide_int result;
4972 wi::overflow_type overflow;
4973 rtx_mode_t pop0 = rtx_mode_t (op0, int_mode);
4974 rtx_mode_t pop1 = rtx_mode_t (op1, int_mode);
4976 #if TARGET_SUPPORTS_WIDE_INT == 0
4977 /* This assert keeps the simplification from producing a result
4978 that cannot be represented in a CONST_DOUBLE but a lot of
4979 upstream callers expect that this function never fails to
4980 simplify something and so you if you added this to the test
4981 above the code would die later anyway. If this assert
4982 happens, you just need to make the port support wide int. */
4983 gcc_assert (GET_MODE_PRECISION (int_mode) <= HOST_BITS_PER_DOUBLE_INT);
4984 #endif
4985 switch (code)
4987 case MINUS:
4988 result = wi::sub (pop0, pop1);
4989 break;
4991 case PLUS:
4992 result = wi::add (pop0, pop1);
4993 break;
4995 case MULT:
4996 result = wi::mul (pop0, pop1);
4997 break;
4999 case DIV:
5000 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
5001 if (overflow)
5002 return NULL_RTX;
5003 break;
5005 case MOD:
5006 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
5007 if (overflow)
5008 return NULL_RTX;
5009 break;
5011 case UDIV:
5012 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
5013 if (overflow)
5014 return NULL_RTX;
5015 break;
5017 case UMOD:
5018 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
5019 if (overflow)
5020 return NULL_RTX;
5021 break;
5023 case AND:
5024 result = wi::bit_and (pop0, pop1);
5025 break;
5027 case IOR:
5028 result = wi::bit_or (pop0, pop1);
5029 break;
5031 case XOR:
5032 result = wi::bit_xor (pop0, pop1);
5033 break;
5035 case SMIN:
5036 result = wi::smin (pop0, pop1);
5037 break;
5039 case SMAX:
5040 result = wi::smax (pop0, pop1);
5041 break;
5043 case UMIN:
5044 result = wi::umin (pop0, pop1);
5045 break;
5047 case UMAX:
5048 result = wi::umax (pop0, pop1);
5049 break;
5051 case LSHIFTRT:
5052 case ASHIFTRT:
5053 case ASHIFT:
5054 case SS_ASHIFT:
5055 case US_ASHIFT:
5057 wide_int wop1 = pop1;
5058 if (SHIFT_COUNT_TRUNCATED)
5059 wop1 = wi::umod_trunc (wop1, GET_MODE_PRECISION (int_mode));
5060 else if (wi::geu_p (wop1, GET_MODE_PRECISION (int_mode)))
5061 return NULL_RTX;
5063 switch (code)
5065 case LSHIFTRT:
5066 result = wi::lrshift (pop0, wop1);
5067 break;
5069 case ASHIFTRT:
5070 result = wi::arshift (pop0, wop1);
5071 break;
5073 case ASHIFT:
5074 result = wi::lshift (pop0, wop1);
5075 break;
5077 case SS_ASHIFT:
5078 if (wi::leu_p (wop1, wi::clrsb (pop0)))
5079 result = wi::lshift (pop0, wop1);
5080 else if (wi::neg_p (pop0))
5081 result = wi::min_value (int_mode, SIGNED);
5082 else
5083 result = wi::max_value (int_mode, SIGNED);
5084 break;
5086 case US_ASHIFT:
5087 if (wi::eq_p (pop0, 0))
5088 result = pop0;
5089 else if (wi::leu_p (wop1, wi::clz (pop0)))
5090 result = wi::lshift (pop0, wop1);
5091 else
5092 result = wi::max_value (int_mode, UNSIGNED);
5093 break;
5095 default:
5096 gcc_unreachable ();
5098 break;
5100 case ROTATE:
5101 case ROTATERT:
5103 if (wi::neg_p (pop1))
5104 return NULL_RTX;
5106 switch (code)
5108 case ROTATE:
5109 result = wi::lrotate (pop0, pop1);
5110 break;
5112 case ROTATERT:
5113 result = wi::rrotate (pop0, pop1);
5114 break;
5116 default:
5117 gcc_unreachable ();
5119 break;
5122 case SS_PLUS:
5123 result = wi::add (pop0, pop1, SIGNED, &overflow);
5124 clamp_signed_saturation:
5125 if (overflow == wi::OVF_OVERFLOW)
5126 result = wi::max_value (GET_MODE_PRECISION (int_mode), SIGNED);
5127 else if (overflow == wi::OVF_UNDERFLOW)
5128 result = wi::min_value (GET_MODE_PRECISION (int_mode), SIGNED);
5129 else if (overflow != wi::OVF_NONE)
5130 return NULL_RTX;
5131 break;
5133 case US_PLUS:
5134 result = wi::add (pop0, pop1, UNSIGNED, &overflow);
5135 clamp_unsigned_saturation:
5136 if (overflow != wi::OVF_NONE)
5137 result = wi::max_value (GET_MODE_PRECISION (int_mode), UNSIGNED);
5138 break;
5140 case SS_MINUS:
5141 result = wi::sub (pop0, pop1, SIGNED, &overflow);
5142 goto clamp_signed_saturation;
5144 case US_MINUS:
5145 result = wi::sub (pop0, pop1, UNSIGNED, &overflow);
5146 if (overflow != wi::OVF_NONE)
5147 result = wi::min_value (GET_MODE_PRECISION (int_mode), UNSIGNED);
5148 break;
5150 case SS_MULT:
5151 result = wi::mul (pop0, pop1, SIGNED, &overflow);
5152 goto clamp_signed_saturation;
5154 case US_MULT:
5155 result = wi::mul (pop0, pop1, UNSIGNED, &overflow);
5156 goto clamp_unsigned_saturation;
5158 case SMUL_HIGHPART:
5159 result = wi::mul_high (pop0, pop1, SIGNED);
5160 break;
5162 case UMUL_HIGHPART:
5163 result = wi::mul_high (pop0, pop1, UNSIGNED);
5164 break;
5166 default:
5167 return NULL_RTX;
5169 return immed_wide_int_const (result, int_mode);
5172 /* Handle polynomial integers. */
5173 if (NUM_POLY_INT_COEFFS > 1
5174 && is_a <scalar_int_mode> (mode, &int_mode)
5175 && poly_int_rtx_p (op0)
5176 && poly_int_rtx_p (op1))
5178 poly_wide_int result;
5179 switch (code)
5181 case PLUS:
5182 result = wi::to_poly_wide (op0, mode) + wi::to_poly_wide (op1, mode);
5183 break;
5185 case MINUS:
5186 result = wi::to_poly_wide (op0, mode) - wi::to_poly_wide (op1, mode);
5187 break;
5189 case MULT:
5190 if (CONST_SCALAR_INT_P (op1))
5191 result = wi::to_poly_wide (op0, mode) * rtx_mode_t (op1, mode);
5192 else
5193 return NULL_RTX;
5194 break;
5196 case ASHIFT:
5197 if (CONST_SCALAR_INT_P (op1))
5199 wide_int shift = rtx_mode_t (op1, mode);
5200 if (SHIFT_COUNT_TRUNCATED)
5201 shift = wi::umod_trunc (shift, GET_MODE_PRECISION (int_mode));
5202 else if (wi::geu_p (shift, GET_MODE_PRECISION (int_mode)))
5203 return NULL_RTX;
5204 result = wi::to_poly_wide (op0, mode) << shift;
5206 else
5207 return NULL_RTX;
5208 break;
5210 case IOR:
5211 if (!CONST_SCALAR_INT_P (op1)
5212 || !can_ior_p (wi::to_poly_wide (op0, mode),
5213 rtx_mode_t (op1, mode), &result))
5214 return NULL_RTX;
5215 break;
5217 default:
5218 return NULL_RTX;
5220 return immed_wide_int_const (result, int_mode);
5223 return NULL_RTX;
5228 /* Return a positive integer if X should sort after Y. The value
5229 returned is 1 if and only if X and Y are both regs. */
5231 static int
5232 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
5234 int result;
5236 result = (commutative_operand_precedence (y)
5237 - commutative_operand_precedence (x));
5238 if (result)
5239 return result + result;
5241 /* Group together equal REGs to do more simplification. */
5242 if (REG_P (x) && REG_P (y))
5243 return REGNO (x) > REGNO (y);
5245 return 0;
5248 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
5249 operands may be another PLUS or MINUS.
5251 Rather than test for specific case, we do this by a brute-force method
5252 and do all possible simplifications until no more changes occur. Then
5253 we rebuild the operation.
5255 May return NULL_RTX when no changes were made. */
5258 simplify_context::simplify_plus_minus (rtx_code code, machine_mode mode,
5259 rtx op0, rtx op1)
5261 struct simplify_plus_minus_op_data
5263 rtx op;
5264 short neg;
5265 } ops[16];
5266 rtx result, tem;
5267 int n_ops = 2;
5268 int changed, n_constants, canonicalized = 0;
5269 int i, j;
5271 memset (ops, 0, sizeof ops);
5273 /* Set up the two operands and then expand them until nothing has been
5274 changed. If we run out of room in our array, give up; this should
5275 almost never happen. */
5277 ops[0].op = op0;
5278 ops[0].neg = 0;
5279 ops[1].op = op1;
5280 ops[1].neg = (code == MINUS);
5284 changed = 0;
5285 n_constants = 0;
5287 for (i = 0; i < n_ops; i++)
5289 rtx this_op = ops[i].op;
5290 int this_neg = ops[i].neg;
5291 enum rtx_code this_code = GET_CODE (this_op);
5293 switch (this_code)
5295 case PLUS:
5296 case MINUS:
5297 if (n_ops == ARRAY_SIZE (ops))
5298 return NULL_RTX;
5300 ops[n_ops].op = XEXP (this_op, 1);
5301 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
5302 n_ops++;
5304 ops[i].op = XEXP (this_op, 0);
5305 changed = 1;
5306 /* If this operand was negated then we will potentially
5307 canonicalize the expression. Similarly if we don't
5308 place the operands adjacent we're re-ordering the
5309 expression and thus might be performing a
5310 canonicalization. Ignore register re-ordering.
5311 ??? It might be better to shuffle the ops array here,
5312 but then (plus (plus (A, B), plus (C, D))) wouldn't
5313 be seen as non-canonical. */
5314 if (this_neg
5315 || (i != n_ops - 2
5316 && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
5317 canonicalized = 1;
5318 break;
5320 case NEG:
5321 ops[i].op = XEXP (this_op, 0);
5322 ops[i].neg = ! this_neg;
5323 changed = 1;
5324 canonicalized = 1;
5325 break;
5327 case CONST:
5328 if (n_ops != ARRAY_SIZE (ops)
5329 && GET_CODE (XEXP (this_op, 0)) == PLUS
5330 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
5331 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
5333 ops[i].op = XEXP (XEXP (this_op, 0), 0);
5334 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
5335 ops[n_ops].neg = this_neg;
5336 n_ops++;
5337 changed = 1;
5338 canonicalized = 1;
5340 break;
5342 case NOT:
5343 /* ~a -> (-a - 1) */
5344 if (n_ops != ARRAY_SIZE (ops))
5346 ops[n_ops].op = CONSTM1_RTX (mode);
5347 ops[n_ops++].neg = this_neg;
5348 ops[i].op = XEXP (this_op, 0);
5349 ops[i].neg = !this_neg;
5350 changed = 1;
5351 canonicalized = 1;
5353 break;
5355 CASE_CONST_SCALAR_INT:
5356 case CONST_POLY_INT:
5357 n_constants++;
5358 if (this_neg)
5360 ops[i].op = neg_poly_int_rtx (mode, this_op);
5361 ops[i].neg = 0;
5362 changed = 1;
5363 canonicalized = 1;
5365 break;
5367 default:
5368 break;
5372 while (changed);
5374 if (n_constants > 1)
5375 canonicalized = 1;
5377 gcc_assert (n_ops >= 2);
5379 /* If we only have two operands, we can avoid the loops. */
5380 if (n_ops == 2)
5382 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
5383 rtx lhs, rhs;
5385 /* Get the two operands. Be careful with the order, especially for
5386 the cases where code == MINUS. */
5387 if (ops[0].neg && ops[1].neg)
5389 lhs = gen_rtx_NEG (mode, ops[0].op);
5390 rhs = ops[1].op;
5392 else if (ops[0].neg)
5394 lhs = ops[1].op;
5395 rhs = ops[0].op;
5397 else
5399 lhs = ops[0].op;
5400 rhs = ops[1].op;
5403 return simplify_const_binary_operation (code, mode, lhs, rhs);
5406 /* Now simplify each pair of operands until nothing changes. */
5407 while (1)
5409 /* Insertion sort is good enough for a small array. */
5410 for (i = 1; i < n_ops; i++)
5412 struct simplify_plus_minus_op_data save;
5413 int cmp;
5415 j = i - 1;
5416 cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
5417 if (cmp <= 0)
5418 continue;
5419 /* Just swapping registers doesn't count as canonicalization. */
5420 if (cmp != 1)
5421 canonicalized = 1;
5423 save = ops[i];
5425 ops[j + 1] = ops[j];
5426 while (j--
5427 && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
5428 ops[j + 1] = save;
5431 changed = 0;
5432 for (i = n_ops - 1; i > 0; i--)
5433 for (j = i - 1; j >= 0; j--)
5435 rtx lhs = ops[j].op, rhs = ops[i].op;
5436 int lneg = ops[j].neg, rneg = ops[i].neg;
5438 if (lhs != 0 && rhs != 0)
5440 enum rtx_code ncode = PLUS;
5442 if (lneg != rneg)
5444 ncode = MINUS;
5445 if (lneg)
5446 std::swap (lhs, rhs);
5448 else if (swap_commutative_operands_p (lhs, rhs))
5449 std::swap (lhs, rhs);
5451 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
5452 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
5454 rtx tem_lhs, tem_rhs;
5456 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
5457 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
5458 tem = simplify_binary_operation (ncode, mode, tem_lhs,
5459 tem_rhs);
5461 if (tem && !CONSTANT_P (tem))
5462 tem = gen_rtx_CONST (GET_MODE (tem), tem);
5464 else
5465 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
5467 if (tem)
5469 /* Reject "simplifications" that just wrap the two
5470 arguments in a CONST. Failure to do so can result
5471 in infinite recursion with simplify_binary_operation
5472 when it calls us to simplify CONST operations.
5473 Also, if we find such a simplification, don't try
5474 any more combinations with this rhs: We must have
5475 something like symbol+offset, ie. one of the
5476 trivial CONST expressions we handle later. */
5477 if (GET_CODE (tem) == CONST
5478 && GET_CODE (XEXP (tem, 0)) == ncode
5479 && XEXP (XEXP (tem, 0), 0) == lhs
5480 && XEXP (XEXP (tem, 0), 1) == rhs)
5481 break;
5482 lneg &= rneg;
5483 if (GET_CODE (tem) == NEG)
5484 tem = XEXP (tem, 0), lneg = !lneg;
5485 if (poly_int_rtx_p (tem) && lneg)
5486 tem = neg_poly_int_rtx (mode, tem), lneg = 0;
5488 ops[i].op = tem;
5489 ops[i].neg = lneg;
5490 ops[j].op = NULL_RTX;
5491 changed = 1;
5492 canonicalized = 1;
5497 if (!changed)
5498 break;
5500 /* Pack all the operands to the lower-numbered entries. */
5501 for (i = 0, j = 0; j < n_ops; j++)
5502 if (ops[j].op)
5504 ops[i] = ops[j];
5505 i++;
5507 n_ops = i;
5510 /* If nothing changed, check that rematerialization of rtl instructions
5511 is still required. */
5512 if (!canonicalized)
5514 /* Perform rematerialization if only all operands are registers and
5515 all operations are PLUS. */
5516 /* ??? Also disallow (non-global, non-frame) fixed registers to work
5517 around rs6000 and how it uses the CA register. See PR67145. */
5518 for (i = 0; i < n_ops; i++)
5519 if (ops[i].neg
5520 || !REG_P (ops[i].op)
5521 || (REGNO (ops[i].op) < FIRST_PSEUDO_REGISTER
5522 && fixed_regs[REGNO (ops[i].op)]
5523 && !global_regs[REGNO (ops[i].op)]
5524 && ops[i].op != frame_pointer_rtx
5525 && ops[i].op != arg_pointer_rtx
5526 && ops[i].op != stack_pointer_rtx))
5527 return NULL_RTX;
5528 goto gen_result;
5531 /* Create (minus -C X) instead of (neg (const (plus X C))). */
5532 if (n_ops == 2
5533 && CONST_INT_P (ops[1].op)
5534 && CONSTANT_P (ops[0].op)
5535 && ops[0].neg)
5536 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
5538 /* We suppressed creation of trivial CONST expressions in the
5539 combination loop to avoid recursion. Create one manually now.
5540 The combination loop should have ensured that there is exactly
5541 one CONST_INT, and the sort will have ensured that it is last
5542 in the array and that any other constant will be next-to-last. */
5544 if (n_ops > 1
5545 && poly_int_rtx_p (ops[n_ops - 1].op)
5546 && CONSTANT_P (ops[n_ops - 2].op))
5548 rtx value = ops[n_ops - 1].op;
5549 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
5550 value = neg_poly_int_rtx (mode, value);
5551 if (CONST_INT_P (value))
5553 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
5554 INTVAL (value));
5555 n_ops--;
5559 /* Put a non-negated operand first, if possible. */
5561 for (i = 0; i < n_ops && ops[i].neg; i++)
5562 continue;
5563 if (i == n_ops)
5564 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
5565 else if (i != 0)
5567 tem = ops[0].op;
5568 ops[0] = ops[i];
5569 ops[i].op = tem;
5570 ops[i].neg = 1;
5573 /* Now make the result by performing the requested operations. */
5574 gen_result:
5575 result = ops[0].op;
5576 for (i = 1; i < n_ops; i++)
5577 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
5578 mode, result, ops[i].op);
5580 return result;
5583 /* Check whether an operand is suitable for calling simplify_plus_minus. */
5584 static bool
5585 plus_minus_operand_p (const_rtx x)
5587 return GET_CODE (x) == PLUS
5588 || GET_CODE (x) == MINUS
5589 || (GET_CODE (x) == CONST
5590 && GET_CODE (XEXP (x, 0)) == PLUS
5591 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
5592 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
5595 /* Like simplify_binary_operation except used for relational operators.
5596 MODE is the mode of the result. If MODE is VOIDmode, both operands must
5597 not also be VOIDmode.
5599 CMP_MODE specifies in which mode the comparison is done in, so it is
5600 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
5601 the operands or, if both are VOIDmode, the operands are compared in
5602 "infinite precision". */
5604 simplify_context::simplify_relational_operation (rtx_code code,
5605 machine_mode mode,
5606 machine_mode cmp_mode,
5607 rtx op0, rtx op1)
5609 rtx tem, trueop0, trueop1;
5611 if (cmp_mode == VOIDmode)
5612 cmp_mode = GET_MODE (op0);
5613 if (cmp_mode == VOIDmode)
5614 cmp_mode = GET_MODE (op1);
5616 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
5617 if (tem)
5618 return relational_result (mode, cmp_mode, tem);
5620 /* For the following tests, ensure const0_rtx is op1. */
5621 if (swap_commutative_operands_p (op0, op1)
5622 || (op0 == const0_rtx && op1 != const0_rtx))
5623 std::swap (op0, op1), code = swap_condition (code);
5625 /* If op0 is a compare, extract the comparison arguments from it. */
5626 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
5627 return simplify_gen_relational (code, mode, VOIDmode,
5628 XEXP (op0, 0), XEXP (op0, 1));
5630 if (GET_MODE_CLASS (cmp_mode) == MODE_CC)
5631 return NULL_RTX;
5633 trueop0 = avoid_constant_pool_reference (op0);
5634 trueop1 = avoid_constant_pool_reference (op1);
5635 return simplify_relational_operation_1 (code, mode, cmp_mode,
5636 trueop0, trueop1);
5639 /* This part of simplify_relational_operation is only used when CMP_MODE
5640 is not in class MODE_CC (i.e. it is a real comparison).
5642 MODE is the mode of the result, while CMP_MODE specifies in which
5643 mode the comparison is done in, so it is the mode of the operands. */
5646 simplify_context::simplify_relational_operation_1 (rtx_code code,
5647 machine_mode mode,
5648 machine_mode cmp_mode,
5649 rtx op0, rtx op1)
5651 enum rtx_code op0code = GET_CODE (op0);
5653 if (op1 == const0_rtx && COMPARISON_P (op0))
5655 /* If op0 is a comparison, extract the comparison arguments
5656 from it. */
5657 if (code == NE)
5659 if (GET_MODE (op0) == mode)
5660 return simplify_rtx (op0);
5661 else
5662 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
5663 XEXP (op0, 0), XEXP (op0, 1));
5665 else if (code == EQ)
5667 enum rtx_code new_code = reversed_comparison_code (op0, NULL);
5668 if (new_code != UNKNOWN)
5669 return simplify_gen_relational (new_code, mode, VOIDmode,
5670 XEXP (op0, 0), XEXP (op0, 1));
5674 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
5675 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
5676 if ((code == LTU || code == GEU)
5677 && GET_CODE (op0) == PLUS
5678 && CONST_INT_P (XEXP (op0, 1))
5679 && (rtx_equal_p (op1, XEXP (op0, 0))
5680 || rtx_equal_p (op1, XEXP (op0, 1)))
5681 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
5682 && XEXP (op0, 1) != const0_rtx)
5684 rtx new_cmp
5685 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
5686 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
5687 cmp_mode, XEXP (op0, 0), new_cmp);
5690 /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
5691 transformed into (LTU a -C). */
5692 if (code == GTU && GET_CODE (op0) == PLUS && CONST_INT_P (op1)
5693 && CONST_INT_P (XEXP (op0, 1))
5694 && (UINTVAL (op1) == UINTVAL (XEXP (op0, 1)) - 1)
5695 && XEXP (op0, 1) != const0_rtx)
5697 rtx new_cmp
5698 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
5699 return simplify_gen_relational (LTU, mode, cmp_mode,
5700 XEXP (op0, 0), new_cmp);
5703 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
5704 if ((code == LTU || code == GEU)
5705 && GET_CODE (op0) == PLUS
5706 && rtx_equal_p (op1, XEXP (op0, 1))
5707 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
5708 && !rtx_equal_p (op1, XEXP (op0, 0)))
5709 return simplify_gen_relational (code, mode, cmp_mode, op0,
5710 copy_rtx (XEXP (op0, 0)));
5712 if (op1 == const0_rtx)
5714 /* Canonicalize (GTU x 0) as (NE x 0). */
5715 if (code == GTU)
5716 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
5717 /* Canonicalize (LEU x 0) as (EQ x 0). */
5718 if (code == LEU)
5719 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
5721 else if (op1 == const1_rtx)
5723 switch (code)
5725 case GE:
5726 /* Canonicalize (GE x 1) as (GT x 0). */
5727 return simplify_gen_relational (GT, mode, cmp_mode,
5728 op0, const0_rtx);
5729 case GEU:
5730 /* Canonicalize (GEU x 1) as (NE x 0). */
5731 return simplify_gen_relational (NE, mode, cmp_mode,
5732 op0, const0_rtx);
5733 case LT:
5734 /* Canonicalize (LT x 1) as (LE x 0). */
5735 return simplify_gen_relational (LE, mode, cmp_mode,
5736 op0, const0_rtx);
5737 case LTU:
5738 /* Canonicalize (LTU x 1) as (EQ x 0). */
5739 return simplify_gen_relational (EQ, mode, cmp_mode,
5740 op0, const0_rtx);
5741 default:
5742 break;
5745 else if (op1 == constm1_rtx)
5747 /* Canonicalize (LE x -1) as (LT x 0). */
5748 if (code == LE)
5749 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
5750 /* Canonicalize (GT x -1) as (GE x 0). */
5751 if (code == GT)
5752 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
5755 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
5756 if ((code == EQ || code == NE)
5757 && (op0code == PLUS || op0code == MINUS)
5758 && CONSTANT_P (op1)
5759 && CONSTANT_P (XEXP (op0, 1))
5760 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
5762 rtx x = XEXP (op0, 0);
5763 rtx c = XEXP (op0, 1);
5764 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
5765 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
5767 /* Detect an infinite recursive condition, where we oscillate at this
5768 simplification case between:
5769 A + B == C <---> C - B == A,
5770 where A, B, and C are all constants with non-simplifiable expressions,
5771 usually SYMBOL_REFs. */
5772 if (GET_CODE (tem) == invcode
5773 && CONSTANT_P (x)
5774 && rtx_equal_p (c, XEXP (tem, 1)))
5775 return NULL_RTX;
5777 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
5780 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
5781 the same as (zero_extract:SI FOO (const_int 1) BAR). */
5782 scalar_int_mode int_mode, int_cmp_mode;
5783 if (code == NE
5784 && op1 == const0_rtx
5785 && is_int_mode (mode, &int_mode)
5786 && is_a <scalar_int_mode> (cmp_mode, &int_cmp_mode)
5787 /* ??? Work-around BImode bugs in the ia64 backend. */
5788 && int_mode != BImode
5789 && int_cmp_mode != BImode
5790 && nonzero_bits (op0, int_cmp_mode) == 1
5791 && STORE_FLAG_VALUE == 1)
5792 return GET_MODE_SIZE (int_mode) > GET_MODE_SIZE (int_cmp_mode)
5793 ? simplify_gen_unary (ZERO_EXTEND, int_mode, op0, int_cmp_mode)
5794 : lowpart_subreg (int_mode, op0, int_cmp_mode);
5796 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
5797 if ((code == EQ || code == NE)
5798 && op1 == const0_rtx
5799 && op0code == XOR)
5800 return simplify_gen_relational (code, mode, cmp_mode,
5801 XEXP (op0, 0), XEXP (op0, 1));
5803 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
5804 if ((code == EQ || code == NE)
5805 && op0code == XOR
5806 && rtx_equal_p (XEXP (op0, 0), op1)
5807 && !side_effects_p (XEXP (op0, 0)))
5808 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
5809 CONST0_RTX (mode));
5811 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
5812 if ((code == EQ || code == NE)
5813 && op0code == XOR
5814 && rtx_equal_p (XEXP (op0, 1), op1)
5815 && !side_effects_p (XEXP (op0, 1)))
5816 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
5817 CONST0_RTX (mode));
5819 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
5820 if ((code == EQ || code == NE)
5821 && op0code == XOR
5822 && CONST_SCALAR_INT_P (op1)
5823 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
5824 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
5825 simplify_gen_binary (XOR, cmp_mode,
5826 XEXP (op0, 1), op1));
5828 /* Simplify eq/ne (and/ior x y) x/y) for targets with a BICS instruction or
5829 constant folding if x/y is a constant. */
5830 if ((code == EQ || code == NE)
5831 && (op0code == AND || op0code == IOR)
5832 && !side_effects_p (op1)
5833 && op1 != CONST0_RTX (cmp_mode))
5835 /* Both (eq/ne (and x y) x) and (eq/ne (ior x y) y) simplify to
5836 (eq/ne (and (not y) x) 0). */
5837 if ((op0code == AND && rtx_equal_p (XEXP (op0, 0), op1))
5838 || (op0code == IOR && rtx_equal_p (XEXP (op0, 1), op1)))
5840 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1),
5841 cmp_mode);
5842 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
5844 return simplify_gen_relational (code, mode, cmp_mode, lhs,
5845 CONST0_RTX (cmp_mode));
5848 /* Both (eq/ne (and x y) y) and (eq/ne (ior x y) x) simplify to
5849 (eq/ne (and (not x) y) 0). */
5850 if ((op0code == AND && rtx_equal_p (XEXP (op0, 1), op1))
5851 || (op0code == IOR && rtx_equal_p (XEXP (op0, 0), op1)))
5853 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0),
5854 cmp_mode);
5855 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
5857 return simplify_gen_relational (code, mode, cmp_mode, lhs,
5858 CONST0_RTX (cmp_mode));
5862 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
5863 if ((code == EQ || code == NE)
5864 && GET_CODE (op0) == BSWAP
5865 && CONST_SCALAR_INT_P (op1))
5866 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
5867 simplify_gen_unary (BSWAP, cmp_mode,
5868 op1, cmp_mode));
5870 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
5871 if ((code == EQ || code == NE)
5872 && GET_CODE (op0) == BSWAP
5873 && GET_CODE (op1) == BSWAP)
5874 return simplify_gen_relational (code, mode, cmp_mode,
5875 XEXP (op0, 0), XEXP (op1, 0));
5877 if (op0code == POPCOUNT && op1 == const0_rtx)
5878 switch (code)
5880 case EQ:
5881 case LE:
5882 case LEU:
5883 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
5884 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
5885 XEXP (op0, 0), const0_rtx);
5887 case NE:
5888 case GT:
5889 case GTU:
5890 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
5891 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
5892 XEXP (op0, 0), const0_rtx);
5894 default:
5895 break;
5898 return NULL_RTX;
5901 enum
5903 CMP_EQ = 1,
5904 CMP_LT = 2,
5905 CMP_GT = 4,
5906 CMP_LTU = 8,
5907 CMP_GTU = 16
5911 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
5912 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
5913 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
5914 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
5915 For floating-point comparisons, assume that the operands were ordered. */
5917 static rtx
5918 comparison_result (enum rtx_code code, int known_results)
5920 switch (code)
5922 case EQ:
5923 case UNEQ:
5924 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
5925 case NE:
5926 case LTGT:
5927 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
5929 case LT:
5930 case UNLT:
5931 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
5932 case GE:
5933 case UNGE:
5934 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
5936 case GT:
5937 case UNGT:
5938 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
5939 case LE:
5940 case UNLE:
5941 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
5943 case LTU:
5944 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
5945 case GEU:
5946 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
5948 case GTU:
5949 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
5950 case LEU:
5951 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
5953 case ORDERED:
5954 return const_true_rtx;
5955 case UNORDERED:
5956 return const0_rtx;
5957 default:
5958 gcc_unreachable ();
5962 /* Check if the given comparison (done in the given MODE) is actually
5963 a tautology or a contradiction. If the mode is VOIDmode, the
5964 comparison is done in "infinite precision". If no simplification
5965 is possible, this function returns zero. Otherwise, it returns
5966 either const_true_rtx or const0_rtx. */
5969 simplify_const_relational_operation (enum rtx_code code,
5970 machine_mode mode,
5971 rtx op0, rtx op1)
5973 rtx tem;
5974 rtx trueop0;
5975 rtx trueop1;
5977 gcc_assert (mode != VOIDmode
5978 || (GET_MODE (op0) == VOIDmode
5979 && GET_MODE (op1) == VOIDmode));
5981 /* If op0 is a compare, extract the comparison arguments from it. */
5982 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
5984 op1 = XEXP (op0, 1);
5985 op0 = XEXP (op0, 0);
5987 if (GET_MODE (op0) != VOIDmode)
5988 mode = GET_MODE (op0);
5989 else if (GET_MODE (op1) != VOIDmode)
5990 mode = GET_MODE (op1);
5991 else
5992 return 0;
5995 /* We can't simplify MODE_CC values since we don't know what the
5996 actual comparison is. */
5997 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
5998 return 0;
6000 /* Make sure the constant is second. */
6001 if (swap_commutative_operands_p (op0, op1))
6003 std::swap (op0, op1);
6004 code = swap_condition (code);
6007 trueop0 = avoid_constant_pool_reference (op0);
6008 trueop1 = avoid_constant_pool_reference (op1);
6010 /* For integer comparisons of A and B maybe we can simplify A - B and can
6011 then simplify a comparison of that with zero. If A and B are both either
6012 a register or a CONST_INT, this can't help; testing for these cases will
6013 prevent infinite recursion here and speed things up.
6015 We can only do this for EQ and NE comparisons as otherwise we may
6016 lose or introduce overflow which we cannot disregard as undefined as
6017 we do not know the signedness of the operation on either the left or
6018 the right hand side of the comparison. */
6020 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
6021 && (code == EQ || code == NE)
6022 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
6023 && (REG_P (op1) || CONST_INT_P (trueop1)))
6024 && (tem = simplify_binary_operation (MINUS, mode, op0, op1)) != 0
6025 /* We cannot do this if tem is a nonzero address. */
6026 && ! nonzero_address_p (tem))
6027 return simplify_const_relational_operation (signed_condition (code),
6028 mode, tem, const0_rtx);
6030 if (! HONOR_NANS (mode) && code == ORDERED)
6031 return const_true_rtx;
6033 if (! HONOR_NANS (mode) && code == UNORDERED)
6034 return const0_rtx;
6036 /* For modes without NaNs, if the two operands are equal, we know the
6037 result except if they have side-effects. Even with NaNs we know
6038 the result of unordered comparisons and, if signaling NaNs are
6039 irrelevant, also the result of LT/GT/LTGT. */
6040 if ((! HONOR_NANS (trueop0)
6041 || code == UNEQ || code == UNLE || code == UNGE
6042 || ((code == LT || code == GT || code == LTGT)
6043 && ! HONOR_SNANS (trueop0)))
6044 && rtx_equal_p (trueop0, trueop1)
6045 && ! side_effects_p (trueop0))
6046 return comparison_result (code, CMP_EQ);
6048 /* If the operands are floating-point constants, see if we can fold
6049 the result. */
6050 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
6051 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
6052 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
6054 const REAL_VALUE_TYPE *d0 = CONST_DOUBLE_REAL_VALUE (trueop0);
6055 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
6057 /* Comparisons are unordered iff at least one of the values is NaN. */
6058 if (REAL_VALUE_ISNAN (*d0) || REAL_VALUE_ISNAN (*d1))
6059 switch (code)
6061 case UNEQ:
6062 case UNLT:
6063 case UNGT:
6064 case UNLE:
6065 case UNGE:
6066 case NE:
6067 case UNORDERED:
6068 return const_true_rtx;
6069 case EQ:
6070 case LT:
6071 case GT:
6072 case LE:
6073 case GE:
6074 case LTGT:
6075 case ORDERED:
6076 return const0_rtx;
6077 default:
6078 return 0;
6081 return comparison_result (code,
6082 (real_equal (d0, d1) ? CMP_EQ :
6083 real_less (d0, d1) ? CMP_LT : CMP_GT));
6086 /* Otherwise, see if the operands are both integers. */
6087 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
6088 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
6090 /* It would be nice if we really had a mode here. However, the
6091 largest int representable on the target is as good as
6092 infinite. */
6093 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
6094 rtx_mode_t ptrueop0 = rtx_mode_t (trueop0, cmode);
6095 rtx_mode_t ptrueop1 = rtx_mode_t (trueop1, cmode);
6097 if (wi::eq_p (ptrueop0, ptrueop1))
6098 return comparison_result (code, CMP_EQ);
6099 else
6101 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
6102 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
6103 return comparison_result (code, cr);
6107 /* Optimize comparisons with upper and lower bounds. */
6108 scalar_int_mode int_mode;
6109 if (CONST_INT_P (trueop1)
6110 && is_a <scalar_int_mode> (mode, &int_mode)
6111 && HWI_COMPUTABLE_MODE_P (int_mode)
6112 && !side_effects_p (trueop0))
6114 int sign;
6115 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, int_mode);
6116 HOST_WIDE_INT val = INTVAL (trueop1);
6117 HOST_WIDE_INT mmin, mmax;
6119 if (code == GEU
6120 || code == LEU
6121 || code == GTU
6122 || code == LTU)
6123 sign = 0;
6124 else
6125 sign = 1;
6127 /* Get a reduced range if the sign bit is zero. */
6128 if (nonzero <= (GET_MODE_MASK (int_mode) >> 1))
6130 mmin = 0;
6131 mmax = nonzero;
6133 else
6135 rtx mmin_rtx, mmax_rtx;
6136 get_mode_bounds (int_mode, sign, int_mode, &mmin_rtx, &mmax_rtx);
6138 mmin = INTVAL (mmin_rtx);
6139 mmax = INTVAL (mmax_rtx);
6140 if (sign)
6142 unsigned int sign_copies
6143 = num_sign_bit_copies (trueop0, int_mode);
6145 mmin >>= (sign_copies - 1);
6146 mmax >>= (sign_copies - 1);
6150 switch (code)
6152 /* x >= y is always true for y <= mmin, always false for y > mmax. */
6153 case GEU:
6154 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
6155 return const_true_rtx;
6156 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
6157 return const0_rtx;
6158 break;
6159 case GE:
6160 if (val <= mmin)
6161 return const_true_rtx;
6162 if (val > mmax)
6163 return const0_rtx;
6164 break;
6166 /* x <= y is always true for y >= mmax, always false for y < mmin. */
6167 case LEU:
6168 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
6169 return const_true_rtx;
6170 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
6171 return const0_rtx;
6172 break;
6173 case LE:
6174 if (val >= mmax)
6175 return const_true_rtx;
6176 if (val < mmin)
6177 return const0_rtx;
6178 break;
6180 case EQ:
6181 /* x == y is always false for y out of range. */
6182 if (val < mmin || val > mmax)
6183 return const0_rtx;
6184 break;
6186 /* x > y is always false for y >= mmax, always true for y < mmin. */
6187 case GTU:
6188 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
6189 return const0_rtx;
6190 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
6191 return const_true_rtx;
6192 break;
6193 case GT:
6194 if (val >= mmax)
6195 return const0_rtx;
6196 if (val < mmin)
6197 return const_true_rtx;
6198 break;
6200 /* x < y is always false for y <= mmin, always true for y > mmax. */
6201 case LTU:
6202 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
6203 return const0_rtx;
6204 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
6205 return const_true_rtx;
6206 break;
6207 case LT:
6208 if (val <= mmin)
6209 return const0_rtx;
6210 if (val > mmax)
6211 return const_true_rtx;
6212 break;
6214 case NE:
6215 /* x != y is always true for y out of range. */
6216 if (val < mmin || val > mmax)
6217 return const_true_rtx;
6218 break;
6220 default:
6221 break;
6225 /* Optimize integer comparisons with zero. */
6226 if (is_a <scalar_int_mode> (mode, &int_mode)
6227 && trueop1 == const0_rtx
6228 && !side_effects_p (trueop0))
6230 /* Some addresses are known to be nonzero. We don't know
6231 their sign, but equality comparisons are known. */
6232 if (nonzero_address_p (trueop0))
6234 if (code == EQ || code == LEU)
6235 return const0_rtx;
6236 if (code == NE || code == GTU)
6237 return const_true_rtx;
6240 /* See if the first operand is an IOR with a constant. If so, we
6241 may be able to determine the result of this comparison. */
6242 if (GET_CODE (op0) == IOR)
6244 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
6245 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
6247 int sign_bitnum = GET_MODE_PRECISION (int_mode) - 1;
6248 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
6249 && (UINTVAL (inner_const)
6250 & (HOST_WIDE_INT_1U
6251 << sign_bitnum)));
6253 switch (code)
6255 case EQ:
6256 case LEU:
6257 return const0_rtx;
6258 case NE:
6259 case GTU:
6260 return const_true_rtx;
6261 case LT:
6262 case LE:
6263 if (has_sign)
6264 return const_true_rtx;
6265 break;
6266 case GT:
6267 case GE:
6268 if (has_sign)
6269 return const0_rtx;
6270 break;
6271 default:
6272 break;
6278 /* Optimize comparison of ABS with zero. */
6279 if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
6280 && (GET_CODE (trueop0) == ABS
6281 || (GET_CODE (trueop0) == FLOAT_EXTEND
6282 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
6284 switch (code)
6286 case LT:
6287 /* Optimize abs(x) < 0.0. */
6288 if (!INTEGRAL_MODE_P (mode) && !HONOR_SNANS (mode))
6289 return const0_rtx;
6290 break;
6292 case GE:
6293 /* Optimize abs(x) >= 0.0. */
6294 if (!INTEGRAL_MODE_P (mode) && !HONOR_NANS (mode))
6295 return const_true_rtx;
6296 break;
6298 case UNGE:
6299 /* Optimize ! (abs(x) < 0.0). */
6300 return const_true_rtx;
6302 default:
6303 break;
6307 return 0;
6310 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
6311 where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
6312 or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
6313 can be simplified to that or NULL_RTX if not.
6314 Assume X is compared against zero with CMP_CODE and the true
6315 arm is TRUE_VAL and the false arm is FALSE_VAL. */
6318 simplify_context::simplify_cond_clz_ctz (rtx x, rtx_code cmp_code,
6319 rtx true_val, rtx false_val)
6321 if (cmp_code != EQ && cmp_code != NE)
6322 return NULL_RTX;
6324 /* Result on X == 0 and X !=0 respectively. */
6325 rtx on_zero, on_nonzero;
6326 if (cmp_code == EQ)
6328 on_zero = true_val;
6329 on_nonzero = false_val;
6331 else
6333 on_zero = false_val;
6334 on_nonzero = true_val;
6337 rtx_code op_code = GET_CODE (on_nonzero);
6338 if ((op_code != CLZ && op_code != CTZ)
6339 || !rtx_equal_p (XEXP (on_nonzero, 0), x)
6340 || !CONST_INT_P (on_zero))
6341 return NULL_RTX;
6343 HOST_WIDE_INT op_val;
6344 scalar_int_mode mode ATTRIBUTE_UNUSED
6345 = as_a <scalar_int_mode> (GET_MODE (XEXP (on_nonzero, 0)));
6346 if (((op_code == CLZ && CLZ_DEFINED_VALUE_AT_ZERO (mode, op_val))
6347 || (op_code == CTZ && CTZ_DEFINED_VALUE_AT_ZERO (mode, op_val)))
6348 && op_val == INTVAL (on_zero))
6349 return on_nonzero;
6351 return NULL_RTX;
6354 /* Try to simplify X given that it appears within operand OP of a
6355 VEC_MERGE operation whose mask is MASK. X need not use the same
6356 vector mode as the VEC_MERGE, but it must have the same number of
6357 elements.
6359 Return the simplified X on success, otherwise return NULL_RTX. */
6362 simplify_context::simplify_merge_mask (rtx x, rtx mask, int op)
6364 gcc_assert (VECTOR_MODE_P (GET_MODE (x)));
6365 poly_uint64 nunits = GET_MODE_NUNITS (GET_MODE (x));
6366 if (GET_CODE (x) == VEC_MERGE && rtx_equal_p (XEXP (x, 2), mask))
6368 if (side_effects_p (XEXP (x, 1 - op)))
6369 return NULL_RTX;
6371 return XEXP (x, op);
6373 if (UNARY_P (x)
6374 && VECTOR_MODE_P (GET_MODE (XEXP (x, 0)))
6375 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 0))), nunits))
6377 rtx top0 = simplify_merge_mask (XEXP (x, 0), mask, op);
6378 if (top0)
6379 return simplify_gen_unary (GET_CODE (x), GET_MODE (x), top0,
6380 GET_MODE (XEXP (x, 0)));
6382 if (BINARY_P (x)
6383 && VECTOR_MODE_P (GET_MODE (XEXP (x, 0)))
6384 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 0))), nunits)
6385 && VECTOR_MODE_P (GET_MODE (XEXP (x, 1)))
6386 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 1))), nunits))
6388 rtx top0 = simplify_merge_mask (XEXP (x, 0), mask, op);
6389 rtx top1 = simplify_merge_mask (XEXP (x, 1), mask, op);
6390 if (top0 || top1)
6392 if (COMPARISON_P (x))
6393 return simplify_gen_relational (GET_CODE (x), GET_MODE (x),
6394 GET_MODE (XEXP (x, 0)) != VOIDmode
6395 ? GET_MODE (XEXP (x, 0))
6396 : GET_MODE (XEXP (x, 1)),
6397 top0 ? top0 : XEXP (x, 0),
6398 top1 ? top1 : XEXP (x, 1));
6399 else
6400 return simplify_gen_binary (GET_CODE (x), GET_MODE (x),
6401 top0 ? top0 : XEXP (x, 0),
6402 top1 ? top1 : XEXP (x, 1));
6405 if (GET_RTX_CLASS (GET_CODE (x)) == RTX_TERNARY
6406 && VECTOR_MODE_P (GET_MODE (XEXP (x, 0)))
6407 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 0))), nunits)
6408 && VECTOR_MODE_P (GET_MODE (XEXP (x, 1)))
6409 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 1))), nunits)
6410 && VECTOR_MODE_P (GET_MODE (XEXP (x, 2)))
6411 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 2))), nunits))
6413 rtx top0 = simplify_merge_mask (XEXP (x, 0), mask, op);
6414 rtx top1 = simplify_merge_mask (XEXP (x, 1), mask, op);
6415 rtx top2 = simplify_merge_mask (XEXP (x, 2), mask, op);
6416 if (top0 || top1 || top2)
6417 return simplify_gen_ternary (GET_CODE (x), GET_MODE (x),
6418 GET_MODE (XEXP (x, 0)),
6419 top0 ? top0 : XEXP (x, 0),
6420 top1 ? top1 : XEXP (x, 1),
6421 top2 ? top2 : XEXP (x, 2));
6423 return NULL_RTX;
6427 /* Simplify CODE, an operation with result mode MODE and three operands,
6428 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
6429 a constant. Return 0 if no simplifications is possible. */
6432 simplify_context::simplify_ternary_operation (rtx_code code, machine_mode mode,
6433 machine_mode op0_mode,
6434 rtx op0, rtx op1, rtx op2)
6436 bool any_change = false;
6437 rtx tem, trueop2;
6438 scalar_int_mode int_mode, int_op0_mode;
6439 unsigned int n_elts;
6441 switch (code)
6443 case FMA:
6444 /* Simplify negations around the multiplication. */
6445 /* -a * -b + c => a * b + c. */
6446 if (GET_CODE (op0) == NEG)
6448 tem = simplify_unary_operation (NEG, mode, op1, mode);
6449 if (tem)
6450 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
6452 else if (GET_CODE (op1) == NEG)
6454 tem = simplify_unary_operation (NEG, mode, op0, mode);
6455 if (tem)
6456 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
6459 /* Canonicalize the two multiplication operands. */
6460 /* a * -b + c => -b * a + c. */
6461 if (swap_commutative_operands_p (op0, op1))
6462 std::swap (op0, op1), any_change = true;
6464 if (any_change)
6465 return gen_rtx_FMA (mode, op0, op1, op2);
6466 return NULL_RTX;
6468 case SIGN_EXTRACT:
6469 case ZERO_EXTRACT:
6470 if (CONST_INT_P (op0)
6471 && CONST_INT_P (op1)
6472 && CONST_INT_P (op2)
6473 && is_a <scalar_int_mode> (mode, &int_mode)
6474 && INTVAL (op1) + INTVAL (op2) <= GET_MODE_PRECISION (int_mode)
6475 && HWI_COMPUTABLE_MODE_P (int_mode))
6477 /* Extracting a bit-field from a constant */
6478 unsigned HOST_WIDE_INT val = UINTVAL (op0);
6479 HOST_WIDE_INT op1val = INTVAL (op1);
6480 HOST_WIDE_INT op2val = INTVAL (op2);
6481 if (!BITS_BIG_ENDIAN)
6482 val >>= op2val;
6483 else if (is_a <scalar_int_mode> (op0_mode, &int_op0_mode))
6484 val >>= GET_MODE_PRECISION (int_op0_mode) - op2val - op1val;
6485 else
6486 /* Not enough information to calculate the bit position. */
6487 break;
6489 if (HOST_BITS_PER_WIDE_INT != op1val)
6491 /* First zero-extend. */
6492 val &= (HOST_WIDE_INT_1U << op1val) - 1;
6493 /* If desired, propagate sign bit. */
6494 if (code == SIGN_EXTRACT
6495 && (val & (HOST_WIDE_INT_1U << (op1val - 1)))
6496 != 0)
6497 val |= ~ ((HOST_WIDE_INT_1U << op1val) - 1);
6500 return gen_int_mode (val, int_mode);
6502 break;
6504 case IF_THEN_ELSE:
6505 if (CONST_INT_P (op0))
6506 return op0 != const0_rtx ? op1 : op2;
6508 /* Convert c ? a : a into "a". */
6509 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
6510 return op1;
6512 /* Convert a != b ? a : b into "a". */
6513 if (GET_CODE (op0) == NE
6514 && ! side_effects_p (op0)
6515 && ! HONOR_NANS (mode)
6516 && ! HONOR_SIGNED_ZEROS (mode)
6517 && ((rtx_equal_p (XEXP (op0, 0), op1)
6518 && rtx_equal_p (XEXP (op0, 1), op2))
6519 || (rtx_equal_p (XEXP (op0, 0), op2)
6520 && rtx_equal_p (XEXP (op0, 1), op1))))
6521 return op1;
6523 /* Convert a == b ? a : b into "b". */
6524 if (GET_CODE (op0) == EQ
6525 && ! side_effects_p (op0)
6526 && ! HONOR_NANS (mode)
6527 && ! HONOR_SIGNED_ZEROS (mode)
6528 && ((rtx_equal_p (XEXP (op0, 0), op1)
6529 && rtx_equal_p (XEXP (op0, 1), op2))
6530 || (rtx_equal_p (XEXP (op0, 0), op2)
6531 && rtx_equal_p (XEXP (op0, 1), op1))))
6532 return op2;
6534 /* Convert (!c) != {0,...,0} ? a : b into
6535 c != {0,...,0} ? b : a for vector modes. */
6536 if (VECTOR_MODE_P (GET_MODE (op1))
6537 && GET_CODE (op0) == NE
6538 && GET_CODE (XEXP (op0, 0)) == NOT
6539 && GET_CODE (XEXP (op0, 1)) == CONST_VECTOR)
6541 rtx cv = XEXP (op0, 1);
6542 int nunits;
6543 bool ok = true;
6544 if (!CONST_VECTOR_NUNITS (cv).is_constant (&nunits))
6545 ok = false;
6546 else
6547 for (int i = 0; i < nunits; ++i)
6548 if (CONST_VECTOR_ELT (cv, i) != const0_rtx)
6550 ok = false;
6551 break;
6553 if (ok)
6555 rtx new_op0 = gen_rtx_NE (GET_MODE (op0),
6556 XEXP (XEXP (op0, 0), 0),
6557 XEXP (op0, 1));
6558 rtx retval = gen_rtx_IF_THEN_ELSE (mode, new_op0, op2, op1);
6559 return retval;
6563 /* Convert x == 0 ? N : clz (x) into clz (x) when
6564 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
6565 Similarly for ctz (x). */
6566 if (COMPARISON_P (op0) && !side_effects_p (op0)
6567 && XEXP (op0, 1) == const0_rtx)
6569 rtx simplified
6570 = simplify_cond_clz_ctz (XEXP (op0, 0), GET_CODE (op0),
6571 op1, op2);
6572 if (simplified)
6573 return simplified;
6576 if (COMPARISON_P (op0) && ! side_effects_p (op0))
6578 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
6579 ? GET_MODE (XEXP (op0, 1))
6580 : GET_MODE (XEXP (op0, 0)));
6581 rtx temp;
6583 /* Look for happy constants in op1 and op2. */
6584 if (CONST_INT_P (op1) && CONST_INT_P (op2))
6586 HOST_WIDE_INT t = INTVAL (op1);
6587 HOST_WIDE_INT f = INTVAL (op2);
6589 if (t == STORE_FLAG_VALUE && f == 0)
6590 code = GET_CODE (op0);
6591 else if (t == 0 && f == STORE_FLAG_VALUE)
6593 enum rtx_code tmp;
6594 tmp = reversed_comparison_code (op0, NULL);
6595 if (tmp == UNKNOWN)
6596 break;
6597 code = tmp;
6599 else
6600 break;
6602 return simplify_gen_relational (code, mode, cmp_mode,
6603 XEXP (op0, 0), XEXP (op0, 1));
6606 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
6607 cmp_mode, XEXP (op0, 0),
6608 XEXP (op0, 1));
6610 /* See if any simplifications were possible. */
6611 if (temp)
6613 if (CONST_INT_P (temp))
6614 return temp == const0_rtx ? op2 : op1;
6615 else if (temp)
6616 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
6619 break;
6621 case VEC_MERGE:
6622 gcc_assert (GET_MODE (op0) == mode);
6623 gcc_assert (GET_MODE (op1) == mode);
6624 gcc_assert (VECTOR_MODE_P (mode));
6625 trueop2 = avoid_constant_pool_reference (op2);
6626 if (CONST_INT_P (trueop2)
6627 && GET_MODE_NUNITS (mode).is_constant (&n_elts))
6629 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
6630 unsigned HOST_WIDE_INT mask;
6631 if (n_elts == HOST_BITS_PER_WIDE_INT)
6632 mask = -1;
6633 else
6634 mask = (HOST_WIDE_INT_1U << n_elts) - 1;
6636 if (!(sel & mask) && !side_effects_p (op0))
6637 return op1;
6638 if ((sel & mask) == mask && !side_effects_p (op1))
6639 return op0;
6641 rtx trueop0 = avoid_constant_pool_reference (op0);
6642 rtx trueop1 = avoid_constant_pool_reference (op1);
6643 if (GET_CODE (trueop0) == CONST_VECTOR
6644 && GET_CODE (trueop1) == CONST_VECTOR)
6646 rtvec v = rtvec_alloc (n_elts);
6647 unsigned int i;
6649 for (i = 0; i < n_elts; i++)
6650 RTVEC_ELT (v, i) = ((sel & (HOST_WIDE_INT_1U << i))
6651 ? CONST_VECTOR_ELT (trueop0, i)
6652 : CONST_VECTOR_ELT (trueop1, i));
6653 return gen_rtx_CONST_VECTOR (mode, v);
6656 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
6657 if no element from a appears in the result. */
6658 if (GET_CODE (op0) == VEC_MERGE)
6660 tem = avoid_constant_pool_reference (XEXP (op0, 2));
6661 if (CONST_INT_P (tem))
6663 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
6664 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
6665 return simplify_gen_ternary (code, mode, mode,
6666 XEXP (op0, 1), op1, op2);
6667 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
6668 return simplify_gen_ternary (code, mode, mode,
6669 XEXP (op0, 0), op1, op2);
6672 if (GET_CODE (op1) == VEC_MERGE)
6674 tem = avoid_constant_pool_reference (XEXP (op1, 2));
6675 if (CONST_INT_P (tem))
6677 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
6678 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
6679 return simplify_gen_ternary (code, mode, mode,
6680 op0, XEXP (op1, 1), op2);
6681 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
6682 return simplify_gen_ternary (code, mode, mode,
6683 op0, XEXP (op1, 0), op2);
6687 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
6688 with a. */
6689 if (GET_CODE (op0) == VEC_DUPLICATE
6690 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
6691 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
6692 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (op0, 0))), 1))
6694 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
6695 if (CONST_INT_P (tem) && CONST_INT_P (op2))
6697 if (XEXP (XEXP (op0, 0), 0) == op1
6698 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
6699 return op1;
6702 /* Replace (vec_merge (vec_duplicate (X)) (const_vector [A, B])
6703 (const_int N))
6704 with (vec_concat (X) (B)) if N == 1 or
6705 (vec_concat (A) (X)) if N == 2. */
6706 if (GET_CODE (op0) == VEC_DUPLICATE
6707 && GET_CODE (op1) == CONST_VECTOR
6708 && known_eq (CONST_VECTOR_NUNITS (op1), 2)
6709 && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
6710 && IN_RANGE (sel, 1, 2))
6712 rtx newop0 = XEXP (op0, 0);
6713 rtx newop1 = CONST_VECTOR_ELT (op1, 2 - sel);
6714 if (sel == 2)
6715 std::swap (newop0, newop1);
6716 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
6718 /* Replace (vec_merge (vec_duplicate x) (vec_concat (y) (z)) (const_int N))
6719 with (vec_concat x z) if N == 1, or (vec_concat y x) if N == 2.
6720 Only applies for vectors of two elements. */
6721 if (GET_CODE (op0) == VEC_DUPLICATE
6722 && GET_CODE (op1) == VEC_CONCAT
6723 && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
6724 && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
6725 && IN_RANGE (sel, 1, 2))
6727 rtx newop0 = XEXP (op0, 0);
6728 rtx newop1 = XEXP (op1, 2 - sel);
6729 rtx otherop = XEXP (op1, sel - 1);
6730 if (sel == 2)
6731 std::swap (newop0, newop1);
6732 /* Don't want to throw away the other part of the vec_concat if
6733 it has side-effects. */
6734 if (!side_effects_p (otherop))
6735 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
6738 /* Replace:
6740 (vec_merge:outer (vec_duplicate:outer x:inner)
6741 (subreg:outer y:inner 0)
6742 (const_int N))
6744 with (vec_concat:outer x:inner y:inner) if N == 1,
6745 or (vec_concat:outer y:inner x:inner) if N == 2.
6747 Implicitly, this means we have a paradoxical subreg, but such
6748 a check is cheap, so make it anyway.
6750 Only applies for vectors of two elements. */
6751 if (GET_CODE (op0) == VEC_DUPLICATE
6752 && GET_CODE (op1) == SUBREG
6753 && GET_MODE (op1) == GET_MODE (op0)
6754 && GET_MODE (SUBREG_REG (op1)) == GET_MODE (XEXP (op0, 0))
6755 && paradoxical_subreg_p (op1)
6756 && subreg_lowpart_p (op1)
6757 && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
6758 && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
6759 && IN_RANGE (sel, 1, 2))
6761 rtx newop0 = XEXP (op0, 0);
6762 rtx newop1 = SUBREG_REG (op1);
6763 if (sel == 2)
6764 std::swap (newop0, newop1);
6765 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
6768 /* Same as above but with switched operands:
6769 Replace (vec_merge:outer (subreg:outer x:inner 0)
6770 (vec_duplicate:outer y:inner)
6771 (const_int N))
6773 with (vec_concat:outer x:inner y:inner) if N == 1,
6774 or (vec_concat:outer y:inner x:inner) if N == 2. */
6775 if (GET_CODE (op1) == VEC_DUPLICATE
6776 && GET_CODE (op0) == SUBREG
6777 && GET_MODE (op0) == GET_MODE (op1)
6778 && GET_MODE (SUBREG_REG (op0)) == GET_MODE (XEXP (op1, 0))
6779 && paradoxical_subreg_p (op0)
6780 && subreg_lowpart_p (op0)
6781 && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
6782 && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
6783 && IN_RANGE (sel, 1, 2))
6785 rtx newop0 = SUBREG_REG (op0);
6786 rtx newop1 = XEXP (op1, 0);
6787 if (sel == 2)
6788 std::swap (newop0, newop1);
6789 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
6792 /* Replace (vec_merge (vec_duplicate x) (vec_duplicate y)
6793 (const_int n))
6794 with (vec_concat x y) or (vec_concat y x) depending on value
6795 of N. */
6796 if (GET_CODE (op0) == VEC_DUPLICATE
6797 && GET_CODE (op1) == VEC_DUPLICATE
6798 && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
6799 && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
6800 && IN_RANGE (sel, 1, 2))
6802 rtx newop0 = XEXP (op0, 0);
6803 rtx newop1 = XEXP (op1, 0);
6804 if (sel == 2)
6805 std::swap (newop0, newop1);
6807 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
6811 if (rtx_equal_p (op0, op1)
6812 && !side_effects_p (op2) && !side_effects_p (op1))
6813 return op0;
6815 if (!side_effects_p (op2))
6817 rtx top0
6818 = may_trap_p (op0) ? NULL_RTX : simplify_merge_mask (op0, op2, 0);
6819 rtx top1
6820 = may_trap_p (op1) ? NULL_RTX : simplify_merge_mask (op1, op2, 1);
6821 if (top0 || top1)
6822 return simplify_gen_ternary (code, mode, mode,
6823 top0 ? top0 : op0,
6824 top1 ? top1 : op1, op2);
6827 break;
6829 default:
6830 gcc_unreachable ();
6833 return 0;
6836 /* Try to calculate NUM_BYTES bytes of the target memory image of X,
6837 starting at byte FIRST_BYTE. Return true on success and add the
6838 bytes to BYTES, such that each byte has BITS_PER_UNIT bits and such
6839 that the bytes follow target memory order. Leave BYTES unmodified
6840 on failure.
6842 MODE is the mode of X. The caller must reserve NUM_BYTES bytes in
6843 BYTES before calling this function. */
6845 bool
6846 native_encode_rtx (machine_mode mode, rtx x, vec<target_unit> &bytes,
6847 unsigned int first_byte, unsigned int num_bytes)
6849 /* Check the mode is sensible. */
6850 gcc_assert (GET_MODE (x) == VOIDmode
6851 ? is_a <scalar_int_mode> (mode)
6852 : mode == GET_MODE (x));
6854 if (GET_CODE (x) == CONST_VECTOR)
6856 /* CONST_VECTOR_ELT follows target memory order, so no shuffling
6857 is necessary. The only complication is that MODE_VECTOR_BOOL
6858 vectors can have several elements per byte. */
6859 unsigned int elt_bits = vector_element_size (GET_MODE_BITSIZE (mode),
6860 GET_MODE_NUNITS (mode));
6861 unsigned int elt = first_byte * BITS_PER_UNIT / elt_bits;
6862 if (elt_bits < BITS_PER_UNIT)
6864 /* This is the only case in which elements can be smaller than
6865 a byte. */
6866 gcc_assert (GET_MODE_CLASS (mode) == MODE_VECTOR_BOOL);
6867 for (unsigned int i = 0; i < num_bytes; ++i)
6869 target_unit value = 0;
6870 for (unsigned int j = 0; j < BITS_PER_UNIT; j += elt_bits)
6872 value |= (INTVAL (CONST_VECTOR_ELT (x, elt)) & 1) << j;
6873 elt += 1;
6875 bytes.quick_push (value);
6877 return true;
6880 unsigned int start = bytes.length ();
6881 unsigned int elt_bytes = GET_MODE_UNIT_SIZE (mode);
6882 /* Make FIRST_BYTE relative to ELT. */
6883 first_byte %= elt_bytes;
6884 while (num_bytes > 0)
6886 /* Work out how many bytes we want from element ELT. */
6887 unsigned int chunk_bytes = MIN (num_bytes, elt_bytes - first_byte);
6888 if (!native_encode_rtx (GET_MODE_INNER (mode),
6889 CONST_VECTOR_ELT (x, elt), bytes,
6890 first_byte, chunk_bytes))
6892 bytes.truncate (start);
6893 return false;
6895 elt += 1;
6896 first_byte = 0;
6897 num_bytes -= chunk_bytes;
6899 return true;
6902 /* All subsequent cases are limited to scalars. */
6903 scalar_mode smode;
6904 if (!is_a <scalar_mode> (mode, &smode))
6905 return false;
6907 /* Make sure that the region is in range. */
6908 unsigned int end_byte = first_byte + num_bytes;
6909 unsigned int mode_bytes = GET_MODE_SIZE (smode);
6910 gcc_assert (end_byte <= mode_bytes);
6912 if (CONST_SCALAR_INT_P (x))
6914 /* The target memory layout is affected by both BYTES_BIG_ENDIAN
6915 and WORDS_BIG_ENDIAN. Use the subreg machinery to get the lsb
6916 position of each byte. */
6917 rtx_mode_t value (x, smode);
6918 wide_int_ref value_wi (value);
6919 for (unsigned int byte = first_byte; byte < end_byte; ++byte)
6921 /* Always constant because the inputs are. */
6922 unsigned int lsb
6923 = subreg_size_lsb (1, mode_bytes, byte).to_constant ();
6924 /* Operate directly on the encoding rather than using
6925 wi::extract_uhwi, so that we preserve the sign or zero
6926 extension for modes that are not a whole number of bits in
6927 size. (Zero extension is only used for the combination of
6928 innermode == BImode && STORE_FLAG_VALUE == 1). */
6929 unsigned int elt = lsb / HOST_BITS_PER_WIDE_INT;
6930 unsigned int shift = lsb % HOST_BITS_PER_WIDE_INT;
6931 unsigned HOST_WIDE_INT uhwi = value_wi.elt (elt);
6932 bytes.quick_push (uhwi >> shift);
6934 return true;
6937 if (CONST_DOUBLE_P (x))
6939 /* real_to_target produces an array of integers in target memory order.
6940 All integers before the last one have 32 bits; the last one may
6941 have 32 bits or fewer, depending on whether the mode bitsize
6942 is divisible by 32. Each of these integers is then laid out
6943 in target memory as any other integer would be. */
6944 long el32[MAX_BITSIZE_MODE_ANY_MODE / 32];
6945 real_to_target (el32, CONST_DOUBLE_REAL_VALUE (x), smode);
6947 /* The (maximum) number of target bytes per element of el32. */
6948 unsigned int bytes_per_el32 = 32 / BITS_PER_UNIT;
6949 gcc_assert (bytes_per_el32 != 0);
6951 /* Build up the integers in a similar way to the CONST_SCALAR_INT_P
6952 handling above. */
6953 for (unsigned int byte = first_byte; byte < end_byte; ++byte)
6955 unsigned int index = byte / bytes_per_el32;
6956 unsigned int subbyte = byte % bytes_per_el32;
6957 unsigned int int_bytes = MIN (bytes_per_el32,
6958 mode_bytes - index * bytes_per_el32);
6959 /* Always constant because the inputs are. */
6960 unsigned int lsb
6961 = subreg_size_lsb (1, int_bytes, subbyte).to_constant ();
6962 bytes.quick_push ((unsigned long) el32[index] >> lsb);
6964 return true;
6967 if (GET_CODE (x) == CONST_FIXED)
6969 for (unsigned int byte = first_byte; byte < end_byte; ++byte)
6971 /* Always constant because the inputs are. */
6972 unsigned int lsb
6973 = subreg_size_lsb (1, mode_bytes, byte).to_constant ();
6974 unsigned HOST_WIDE_INT piece = CONST_FIXED_VALUE_LOW (x);
6975 if (lsb >= HOST_BITS_PER_WIDE_INT)
6977 lsb -= HOST_BITS_PER_WIDE_INT;
6978 piece = CONST_FIXED_VALUE_HIGH (x);
6980 bytes.quick_push (piece >> lsb);
6982 return true;
6985 return false;
6988 /* Read a vector of mode MODE from the target memory image given by BYTES,
6989 starting at byte FIRST_BYTE. The vector is known to be encodable using
6990 NPATTERNS interleaved patterns with NELTS_PER_PATTERN elements each,
6991 and BYTES is known to have enough bytes to supply NPATTERNS *
6992 NELTS_PER_PATTERN vector elements. Each element of BYTES contains
6993 BITS_PER_UNIT bits and the bytes are in target memory order.
6995 Return the vector on success, otherwise return NULL_RTX. */
6998 native_decode_vector_rtx (machine_mode mode, const vec<target_unit> &bytes,
6999 unsigned int first_byte, unsigned int npatterns,
7000 unsigned int nelts_per_pattern)
7002 rtx_vector_builder builder (mode, npatterns, nelts_per_pattern);
7004 unsigned int elt_bits = vector_element_size (GET_MODE_BITSIZE (mode),
7005 GET_MODE_NUNITS (mode));
7006 if (elt_bits < BITS_PER_UNIT)
7008 /* This is the only case in which elements can be smaller than a byte.
7009 Element 0 is always in the lsb of the containing byte. */
7010 gcc_assert (GET_MODE_CLASS (mode) == MODE_VECTOR_BOOL);
7011 for (unsigned int i = 0; i < builder.encoded_nelts (); ++i)
7013 unsigned int bit_index = first_byte * BITS_PER_UNIT + i * elt_bits;
7014 unsigned int byte_index = bit_index / BITS_PER_UNIT;
7015 unsigned int lsb = bit_index % BITS_PER_UNIT;
7016 builder.quick_push (bytes[byte_index] & (1 << lsb)
7017 ? CONST1_RTX (BImode)
7018 : CONST0_RTX (BImode));
7021 else
7023 for (unsigned int i = 0; i < builder.encoded_nelts (); ++i)
7025 rtx x = native_decode_rtx (GET_MODE_INNER (mode), bytes, first_byte);
7026 if (!x)
7027 return NULL_RTX;
7028 builder.quick_push (x);
7029 first_byte += elt_bits / BITS_PER_UNIT;
7032 return builder.build ();
7035 /* Read an rtx of mode MODE from the target memory image given by BYTES,
7036 starting at byte FIRST_BYTE. Each element of BYTES contains BITS_PER_UNIT
7037 bits and the bytes are in target memory order. The image has enough
7038 values to specify all bytes of MODE.
7040 Return the rtx on success, otherwise return NULL_RTX. */
7043 native_decode_rtx (machine_mode mode, const vec<target_unit> &bytes,
7044 unsigned int first_byte)
7046 if (VECTOR_MODE_P (mode))
7048 /* If we know at compile time how many elements there are,
7049 pull each element directly from BYTES. */
7050 unsigned int nelts;
7051 if (GET_MODE_NUNITS (mode).is_constant (&nelts))
7052 return native_decode_vector_rtx (mode, bytes, first_byte, nelts, 1);
7053 return NULL_RTX;
7056 scalar_int_mode imode;
7057 if (is_a <scalar_int_mode> (mode, &imode)
7058 && GET_MODE_PRECISION (imode) <= MAX_BITSIZE_MODE_ANY_INT)
7060 /* Pull the bytes msb first, so that we can use simple
7061 shift-and-insert wide_int operations. */
7062 unsigned int size = GET_MODE_SIZE (imode);
7063 wide_int result (wi::zero (GET_MODE_PRECISION (imode)));
7064 for (unsigned int i = 0; i < size; ++i)
7066 unsigned int lsb = (size - i - 1) * BITS_PER_UNIT;
7067 /* Always constant because the inputs are. */
7068 unsigned int subbyte
7069 = subreg_size_offset_from_lsb (1, size, lsb).to_constant ();
7070 result <<= BITS_PER_UNIT;
7071 result |= bytes[first_byte + subbyte];
7073 return immed_wide_int_const (result, imode);
7076 scalar_float_mode fmode;
7077 if (is_a <scalar_float_mode> (mode, &fmode))
7079 /* We need to build an array of integers in target memory order.
7080 All integers before the last one have 32 bits; the last one may
7081 have 32 bits or fewer, depending on whether the mode bitsize
7082 is divisible by 32. */
7083 long el32[MAX_BITSIZE_MODE_ANY_MODE / 32];
7084 unsigned int num_el32 = CEIL (GET_MODE_BITSIZE (fmode), 32);
7085 memset (el32, 0, num_el32 * sizeof (long));
7087 /* The (maximum) number of target bytes per element of el32. */
7088 unsigned int bytes_per_el32 = 32 / BITS_PER_UNIT;
7089 gcc_assert (bytes_per_el32 != 0);
7091 unsigned int mode_bytes = GET_MODE_SIZE (fmode);
7092 for (unsigned int byte = 0; byte < mode_bytes; ++byte)
7094 unsigned int index = byte / bytes_per_el32;
7095 unsigned int subbyte = byte % bytes_per_el32;
7096 unsigned int int_bytes = MIN (bytes_per_el32,
7097 mode_bytes - index * bytes_per_el32);
7098 /* Always constant because the inputs are. */
7099 unsigned int lsb
7100 = subreg_size_lsb (1, int_bytes, subbyte).to_constant ();
7101 el32[index] |= (unsigned long) bytes[first_byte + byte] << lsb;
7103 REAL_VALUE_TYPE r;
7104 real_from_target (&r, el32, fmode);
7105 return const_double_from_real_value (r, fmode);
7108 if (ALL_SCALAR_FIXED_POINT_MODE_P (mode))
7110 scalar_mode smode = as_a <scalar_mode> (mode);
7111 FIXED_VALUE_TYPE f;
7112 f.data.low = 0;
7113 f.data.high = 0;
7114 f.mode = smode;
7116 unsigned int mode_bytes = GET_MODE_SIZE (smode);
7117 for (unsigned int byte = 0; byte < mode_bytes; ++byte)
7119 /* Always constant because the inputs are. */
7120 unsigned int lsb
7121 = subreg_size_lsb (1, mode_bytes, byte).to_constant ();
7122 unsigned HOST_WIDE_INT unit = bytes[first_byte + byte];
7123 if (lsb >= HOST_BITS_PER_WIDE_INT)
7124 f.data.high |= unit << (lsb - HOST_BITS_PER_WIDE_INT);
7125 else
7126 f.data.low |= unit << lsb;
7128 return CONST_FIXED_FROM_FIXED_VALUE (f, mode);
7131 return NULL_RTX;
7134 /* Simplify a byte offset BYTE into CONST_VECTOR X. The main purpose
7135 is to convert a runtime BYTE value into a constant one. */
7137 static poly_uint64
7138 simplify_const_vector_byte_offset (rtx x, poly_uint64 byte)
7140 /* Cope with MODE_VECTOR_BOOL by operating on bits rather than bytes. */
7141 machine_mode mode = GET_MODE (x);
7142 unsigned int elt_bits = vector_element_size (GET_MODE_BITSIZE (mode),
7143 GET_MODE_NUNITS (mode));
7144 /* The number of bits needed to encode one element from each pattern. */
7145 unsigned int sequence_bits = CONST_VECTOR_NPATTERNS (x) * elt_bits;
7147 /* Identify the start point in terms of a sequence number and a byte offset
7148 within that sequence. */
7149 poly_uint64 first_sequence;
7150 unsigned HOST_WIDE_INT subbit;
7151 if (can_div_trunc_p (byte * BITS_PER_UNIT, sequence_bits,
7152 &first_sequence, &subbit))
7154 unsigned int nelts_per_pattern = CONST_VECTOR_NELTS_PER_PATTERN (x);
7155 if (nelts_per_pattern == 1)
7156 /* This is a duplicated vector, so the value of FIRST_SEQUENCE
7157 doesn't matter. */
7158 byte = subbit / BITS_PER_UNIT;
7159 else if (nelts_per_pattern == 2 && known_gt (first_sequence, 0U))
7161 /* The subreg drops the first element from each pattern and
7162 only uses the second element. Find the first sequence
7163 that starts on a byte boundary. */
7164 subbit += least_common_multiple (sequence_bits, BITS_PER_UNIT);
7165 byte = subbit / BITS_PER_UNIT;
7168 return byte;
7171 /* Subroutine of simplify_subreg in which:
7173 - X is known to be a CONST_VECTOR
7174 - OUTERMODE is known to be a vector mode
7176 Try to handle the subreg by operating on the CONST_VECTOR encoding
7177 rather than on each individual element of the CONST_VECTOR.
7179 Return the simplified subreg on success, otherwise return NULL_RTX. */
7181 static rtx
7182 simplify_const_vector_subreg (machine_mode outermode, rtx x,
7183 machine_mode innermode, unsigned int first_byte)
7185 /* Paradoxical subregs of vectors have dubious semantics. */
7186 if (paradoxical_subreg_p (outermode, innermode))
7187 return NULL_RTX;
7189 /* We can only preserve the semantics of a stepped pattern if the new
7190 vector element is the same as the original one. */
7191 if (CONST_VECTOR_STEPPED_P (x)
7192 && GET_MODE_INNER (outermode) != GET_MODE_INNER (innermode))
7193 return NULL_RTX;
7195 /* Cope with MODE_VECTOR_BOOL by operating on bits rather than bytes. */
7196 unsigned int x_elt_bits
7197 = vector_element_size (GET_MODE_BITSIZE (innermode),
7198 GET_MODE_NUNITS (innermode));
7199 unsigned int out_elt_bits
7200 = vector_element_size (GET_MODE_BITSIZE (outermode),
7201 GET_MODE_NUNITS (outermode));
7203 /* The number of bits needed to encode one element from every pattern
7204 of the original vector. */
7205 unsigned int x_sequence_bits = CONST_VECTOR_NPATTERNS (x) * x_elt_bits;
7207 /* The number of bits needed to encode one element from every pattern
7208 of the result. */
7209 unsigned int out_sequence_bits
7210 = least_common_multiple (x_sequence_bits, out_elt_bits);
7212 /* Work out the number of interleaved patterns in the output vector
7213 and the number of encoded elements per pattern. */
7214 unsigned int out_npatterns = out_sequence_bits / out_elt_bits;
7215 unsigned int nelts_per_pattern = CONST_VECTOR_NELTS_PER_PATTERN (x);
7217 /* The encoding scheme requires the number of elements to be a multiple
7218 of the number of patterns, so that each pattern appears at least once
7219 and so that the same number of elements appear from each pattern. */
7220 bool ok_p = multiple_p (GET_MODE_NUNITS (outermode), out_npatterns);
7221 unsigned int const_nunits;
7222 if (GET_MODE_NUNITS (outermode).is_constant (&const_nunits)
7223 && (!ok_p || out_npatterns * nelts_per_pattern > const_nunits))
7225 /* Either the encoding is invalid, or applying it would give us
7226 more elements than we need. Just encode each element directly. */
7227 out_npatterns = const_nunits;
7228 nelts_per_pattern = 1;
7230 else if (!ok_p)
7231 return NULL_RTX;
7233 /* Get enough bytes of X to form the new encoding. */
7234 unsigned int buffer_bits = out_npatterns * nelts_per_pattern * out_elt_bits;
7235 unsigned int buffer_bytes = CEIL (buffer_bits, BITS_PER_UNIT);
7236 auto_vec<target_unit, 128> buffer (buffer_bytes);
7237 if (!native_encode_rtx (innermode, x, buffer, first_byte, buffer_bytes))
7238 return NULL_RTX;
7240 /* Reencode the bytes as OUTERMODE. */
7241 return native_decode_vector_rtx (outermode, buffer, 0, out_npatterns,
7242 nelts_per_pattern);
7245 /* Try to simplify a subreg of a constant by encoding the subreg region
7246 as a sequence of target bytes and reading them back in the new mode.
7247 Return the new value on success, otherwise return null.
7249 The subreg has outer mode OUTERMODE, inner mode INNERMODE, inner value X
7250 and byte offset FIRST_BYTE. */
7252 static rtx
7253 simplify_immed_subreg (fixed_size_mode outermode, rtx x,
7254 machine_mode innermode, unsigned int first_byte)
7256 unsigned int buffer_bytes = GET_MODE_SIZE (outermode);
7257 auto_vec<target_unit, 128> buffer (buffer_bytes);
7259 /* Some ports misuse CCmode. */
7260 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (x))
7261 return x;
7263 /* Paradoxical subregs read undefined values for bytes outside of the
7264 inner value. However, we have traditionally always sign-extended
7265 integer constants and zero-extended others. */
7266 unsigned int inner_bytes = buffer_bytes;
7267 if (paradoxical_subreg_p (outermode, innermode))
7269 if (!GET_MODE_SIZE (innermode).is_constant (&inner_bytes))
7270 return NULL_RTX;
7272 target_unit filler = 0;
7273 if (CONST_SCALAR_INT_P (x) && wi::neg_p (rtx_mode_t (x, innermode)))
7274 filler = -1;
7276 /* Add any leading bytes due to big-endian layout. The number of
7277 bytes must be constant because both modes have constant size. */
7278 unsigned int leading_bytes
7279 = -byte_lowpart_offset (outermode, innermode).to_constant ();
7280 for (unsigned int i = 0; i < leading_bytes; ++i)
7281 buffer.quick_push (filler);
7283 if (!native_encode_rtx (innermode, x, buffer, first_byte, inner_bytes))
7284 return NULL_RTX;
7286 /* Add any trailing bytes due to little-endian layout. */
7287 while (buffer.length () < buffer_bytes)
7288 buffer.quick_push (filler);
7290 else if (!native_encode_rtx (innermode, x, buffer, first_byte, inner_bytes))
7291 return NULL_RTX;
7292 rtx ret = native_decode_rtx (outermode, buffer, 0);
7293 if (ret && MODE_COMPOSITE_P (outermode))
7295 auto_vec<target_unit, 128> buffer2 (buffer_bytes);
7296 if (!native_encode_rtx (outermode, ret, buffer2, 0, buffer_bytes))
7297 return NULL_RTX;
7298 for (unsigned int i = 0; i < buffer_bytes; ++i)
7299 if (buffer[i] != buffer2[i])
7300 return NULL_RTX;
7302 return ret;
7305 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
7306 Return 0 if no simplifications are possible. */
7308 simplify_context::simplify_subreg (machine_mode outermode, rtx op,
7309 machine_mode innermode, poly_uint64 byte)
7311 /* Little bit of sanity checking. */
7312 gcc_assert (innermode != VOIDmode);
7313 gcc_assert (outermode != VOIDmode);
7314 gcc_assert (innermode != BLKmode);
7315 gcc_assert (outermode != BLKmode);
7317 gcc_assert (GET_MODE (op) == innermode
7318 || GET_MODE (op) == VOIDmode);
7320 poly_uint64 outersize = GET_MODE_SIZE (outermode);
7321 if (!multiple_p (byte, outersize))
7322 return NULL_RTX;
7324 poly_uint64 innersize = GET_MODE_SIZE (innermode);
7325 if (maybe_ge (byte, innersize))
7326 return NULL_RTX;
7328 if (outermode == innermode && known_eq (byte, 0U))
7329 return op;
7331 if (GET_CODE (op) == CONST_VECTOR)
7332 byte = simplify_const_vector_byte_offset (op, byte);
7334 if (multiple_p (byte, GET_MODE_UNIT_SIZE (innermode)))
7336 rtx elt;
7338 if (VECTOR_MODE_P (outermode)
7339 && GET_MODE_INNER (outermode) == GET_MODE_INNER (innermode)
7340 && vec_duplicate_p (op, &elt))
7341 return gen_vec_duplicate (outermode, elt);
7343 if (outermode == GET_MODE_INNER (innermode)
7344 && vec_duplicate_p (op, &elt))
7345 return elt;
7348 if (CONST_SCALAR_INT_P (op)
7349 || CONST_DOUBLE_AS_FLOAT_P (op)
7350 || CONST_FIXED_P (op)
7351 || GET_CODE (op) == CONST_VECTOR)
7353 unsigned HOST_WIDE_INT cbyte;
7354 if (byte.is_constant (&cbyte))
7356 if (GET_CODE (op) == CONST_VECTOR && VECTOR_MODE_P (outermode))
7358 rtx tmp = simplify_const_vector_subreg (outermode, op,
7359 innermode, cbyte);
7360 if (tmp)
7361 return tmp;
7364 fixed_size_mode fs_outermode;
7365 if (is_a <fixed_size_mode> (outermode, &fs_outermode))
7366 return simplify_immed_subreg (fs_outermode, op, innermode, cbyte);
7370 /* Changing mode twice with SUBREG => just change it once,
7371 or not at all if changing back op starting mode. */
7372 if (GET_CODE (op) == SUBREG)
7374 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
7375 poly_uint64 innermostsize = GET_MODE_SIZE (innermostmode);
7376 rtx newx;
7378 if (outermode == innermostmode
7379 && known_eq (byte, 0U)
7380 && known_eq (SUBREG_BYTE (op), 0))
7381 return SUBREG_REG (op);
7383 /* Work out the memory offset of the final OUTERMODE value relative
7384 to the inner value of OP. */
7385 poly_int64 mem_offset = subreg_memory_offset (outermode,
7386 innermode, byte);
7387 poly_int64 op_mem_offset = subreg_memory_offset (op);
7388 poly_int64 final_offset = mem_offset + op_mem_offset;
7390 /* See whether resulting subreg will be paradoxical. */
7391 if (!paradoxical_subreg_p (outermode, innermostmode))
7393 /* Bail out in case resulting subreg would be incorrect. */
7394 if (maybe_lt (final_offset, 0)
7395 || maybe_ge (poly_uint64 (final_offset), innermostsize)
7396 || !multiple_p (final_offset, outersize))
7397 return NULL_RTX;
7399 else
7401 poly_int64 required_offset = subreg_memory_offset (outermode,
7402 innermostmode, 0);
7403 if (maybe_ne (final_offset, required_offset))
7404 return NULL_RTX;
7405 /* Paradoxical subregs always have byte offset 0. */
7406 final_offset = 0;
7409 /* Recurse for further possible simplifications. */
7410 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
7411 final_offset);
7412 if (newx)
7413 return newx;
7414 if (validate_subreg (outermode, innermostmode,
7415 SUBREG_REG (op), final_offset))
7417 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
7418 if (SUBREG_PROMOTED_VAR_P (op)
7419 && SUBREG_PROMOTED_SIGN (op) >= 0
7420 && GET_MODE_CLASS (outermode) == MODE_INT
7421 && known_ge (outersize, innersize)
7422 && known_le (outersize, innermostsize)
7423 && subreg_lowpart_p (newx))
7425 SUBREG_PROMOTED_VAR_P (newx) = 1;
7426 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
7428 return newx;
7430 return NULL_RTX;
7433 /* SUBREG of a hard register => just change the register number
7434 and/or mode. If the hard register is not valid in that mode,
7435 suppress this simplification. If the hard register is the stack,
7436 frame, or argument pointer, leave this as a SUBREG. */
7438 if (REG_P (op) && HARD_REGISTER_P (op))
7440 unsigned int regno, final_regno;
7442 regno = REGNO (op);
7443 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
7444 if (HARD_REGISTER_NUM_P (final_regno))
7446 rtx x = gen_rtx_REG_offset (op, outermode, final_regno,
7447 subreg_memory_offset (outermode,
7448 innermode, byte));
7450 /* Propagate original regno. We don't have any way to specify
7451 the offset inside original regno, so do so only for lowpart.
7452 The information is used only by alias analysis that cannot
7453 grog partial register anyway. */
7455 if (known_eq (subreg_lowpart_offset (outermode, innermode), byte))
7456 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
7457 return x;
7461 /* If we have a SUBREG of a register that we are replacing and we are
7462 replacing it with a MEM, make a new MEM and try replacing the
7463 SUBREG with it. Don't do this if the MEM has a mode-dependent address
7464 or if we would be widening it. */
7466 if (MEM_P (op)
7467 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
7468 /* Allow splitting of volatile memory references in case we don't
7469 have instruction to move the whole thing. */
7470 && (! MEM_VOLATILE_P (op)
7471 || ! have_insn_for (SET, innermode))
7472 && !(STRICT_ALIGNMENT && MEM_ALIGN (op) < GET_MODE_ALIGNMENT (outermode))
7473 && known_le (outersize, innersize))
7474 return adjust_address_nv (op, outermode, byte);
7476 /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
7477 of two parts. */
7478 if (GET_CODE (op) == CONCAT
7479 || GET_CODE (op) == VEC_CONCAT)
7481 poly_uint64 final_offset;
7482 rtx part, res;
7484 machine_mode part_mode = GET_MODE (XEXP (op, 0));
7485 if (part_mode == VOIDmode)
7486 part_mode = GET_MODE_INNER (GET_MODE (op));
7487 poly_uint64 part_size = GET_MODE_SIZE (part_mode);
7488 if (known_lt (byte, part_size))
7490 part = XEXP (op, 0);
7491 final_offset = byte;
7493 else if (known_ge (byte, part_size))
7495 part = XEXP (op, 1);
7496 final_offset = byte - part_size;
7498 else
7499 return NULL_RTX;
7501 if (maybe_gt (final_offset + outersize, part_size))
7502 return NULL_RTX;
7504 part_mode = GET_MODE (part);
7505 if (part_mode == VOIDmode)
7506 part_mode = GET_MODE_INNER (GET_MODE (op));
7507 res = simplify_subreg (outermode, part, part_mode, final_offset);
7508 if (res)
7509 return res;
7510 if (validate_subreg (outermode, part_mode, part, final_offset))
7511 return gen_rtx_SUBREG (outermode, part, final_offset);
7512 return NULL_RTX;
7515 /* Simplify
7516 (subreg (vec_merge (X)
7517 (vector)
7518 (const_int ((1 << N) | M)))
7519 (N * sizeof (outermode)))
7521 (subreg (X) (N * sizeof (outermode)))
7523 unsigned int idx;
7524 if (constant_multiple_p (byte, GET_MODE_SIZE (outermode), &idx)
7525 && idx < HOST_BITS_PER_WIDE_INT
7526 && GET_CODE (op) == VEC_MERGE
7527 && GET_MODE_INNER (innermode) == outermode
7528 && CONST_INT_P (XEXP (op, 2))
7529 && (UINTVAL (XEXP (op, 2)) & (HOST_WIDE_INT_1U << idx)) != 0)
7530 return simplify_gen_subreg (outermode, XEXP (op, 0), innermode, byte);
7532 /* A SUBREG resulting from a zero extension may fold to zero if
7533 it extracts higher bits that the ZERO_EXTEND's source bits. */
7534 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
7536 poly_uint64 bitpos = subreg_lsb_1 (outermode, innermode, byte);
7537 if (known_ge (bitpos, GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))))
7538 return CONST0_RTX (outermode);
7541 scalar_int_mode int_outermode, int_innermode;
7542 if (is_a <scalar_int_mode> (outermode, &int_outermode)
7543 && is_a <scalar_int_mode> (innermode, &int_innermode)
7544 && known_eq (byte, subreg_lowpart_offset (int_outermode, int_innermode)))
7546 /* Handle polynomial integers. The upper bits of a paradoxical
7547 subreg are undefined, so this is safe regardless of whether
7548 we're truncating or extending. */
7549 if (CONST_POLY_INT_P (op))
7551 poly_wide_int val
7552 = poly_wide_int::from (const_poly_int_value (op),
7553 GET_MODE_PRECISION (int_outermode),
7554 SIGNED);
7555 return immed_wide_int_const (val, int_outermode);
7558 if (GET_MODE_PRECISION (int_outermode)
7559 < GET_MODE_PRECISION (int_innermode))
7561 rtx tem = simplify_truncation (int_outermode, op, int_innermode);
7562 if (tem)
7563 return tem;
7567 /* If OP is a vector comparison and the subreg is not changing the
7568 number of elements or the size of the elements, change the result
7569 of the comparison to the new mode. */
7570 if (COMPARISON_P (op)
7571 && VECTOR_MODE_P (outermode)
7572 && VECTOR_MODE_P (innermode)
7573 && known_eq (GET_MODE_NUNITS (outermode), GET_MODE_NUNITS (innermode))
7574 && known_eq (GET_MODE_UNIT_SIZE (outermode),
7575 GET_MODE_UNIT_SIZE (innermode)))
7576 return simplify_gen_relational (GET_CODE (op), outermode, innermode,
7577 XEXP (op, 0), XEXP (op, 1));
7578 return NULL_RTX;
7581 /* Make a SUBREG operation or equivalent if it folds. */
7584 simplify_context::simplify_gen_subreg (machine_mode outermode, rtx op,
7585 machine_mode innermode,
7586 poly_uint64 byte)
7588 rtx newx;
7590 newx = simplify_subreg (outermode, op, innermode, byte);
7591 if (newx)
7592 return newx;
7594 if (GET_CODE (op) == SUBREG
7595 || GET_CODE (op) == CONCAT
7596 || GET_MODE (op) == VOIDmode)
7597 return NULL_RTX;
7599 if (MODE_COMPOSITE_P (outermode)
7600 && (CONST_SCALAR_INT_P (op)
7601 || CONST_DOUBLE_AS_FLOAT_P (op)
7602 || CONST_FIXED_P (op)
7603 || GET_CODE (op) == CONST_VECTOR))
7604 return NULL_RTX;
7606 if (validate_subreg (outermode, innermode, op, byte))
7607 return gen_rtx_SUBREG (outermode, op, byte);
7609 return NULL_RTX;
7612 /* Generates a subreg to get the least significant part of EXPR (in mode
7613 INNER_MODE) to OUTER_MODE. */
7616 simplify_context::lowpart_subreg (machine_mode outer_mode, rtx expr,
7617 machine_mode inner_mode)
7619 return simplify_gen_subreg (outer_mode, expr, inner_mode,
7620 subreg_lowpart_offset (outer_mode, inner_mode));
7623 /* Simplify X, an rtx expression.
7625 Return the simplified expression or NULL if no simplifications
7626 were possible.
7628 This is the preferred entry point into the simplification routines;
7629 however, we still allow passes to call the more specific routines.
7631 Right now GCC has three (yes, three) major bodies of RTL simplification
7632 code that need to be unified.
7634 1. fold_rtx in cse.c. This code uses various CSE specific
7635 information to aid in RTL simplification.
7637 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
7638 it uses combine specific information to aid in RTL
7639 simplification.
7641 3. The routines in this file.
7644 Long term we want to only have one body of simplification code; to
7645 get to that state I recommend the following steps:
7647 1. Pour over fold_rtx & simplify_rtx and move any simplifications
7648 which are not pass dependent state into these routines.
7650 2. As code is moved by #1, change fold_rtx & simplify_rtx to
7651 use this routine whenever possible.
7653 3. Allow for pass dependent state to be provided to these
7654 routines and add simplifications based on the pass dependent
7655 state. Remove code from cse.c & combine.c that becomes
7656 redundant/dead.
7658 It will take time, but ultimately the compiler will be easier to
7659 maintain and improve. It's totally silly that when we add a
7660 simplification that it needs to be added to 4 places (3 for RTL
7661 simplification and 1 for tree simplification. */
7664 simplify_rtx (const_rtx x)
7666 const enum rtx_code code = GET_CODE (x);
7667 const machine_mode mode = GET_MODE (x);
7669 switch (GET_RTX_CLASS (code))
7671 case RTX_UNARY:
7672 return simplify_unary_operation (code, mode,
7673 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
7674 case RTX_COMM_ARITH:
7675 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
7676 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
7678 /* Fall through. */
7680 case RTX_BIN_ARITH:
7681 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
7683 case RTX_TERNARY:
7684 case RTX_BITFIELD_OPS:
7685 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
7686 XEXP (x, 0), XEXP (x, 1),
7687 XEXP (x, 2));
7689 case RTX_COMPARE:
7690 case RTX_COMM_COMPARE:
7691 return simplify_relational_operation (code, mode,
7692 ((GET_MODE (XEXP (x, 0))
7693 != VOIDmode)
7694 ? GET_MODE (XEXP (x, 0))
7695 : GET_MODE (XEXP (x, 1))),
7696 XEXP (x, 0),
7697 XEXP (x, 1));
7699 case RTX_EXTRA:
7700 if (code == SUBREG)
7701 return simplify_subreg (mode, SUBREG_REG (x),
7702 GET_MODE (SUBREG_REG (x)),
7703 SUBREG_BYTE (x));
7704 break;
7706 case RTX_OBJ:
7707 if (code == LO_SUM)
7709 /* Convert (lo_sum (high FOO) FOO) to FOO. */
7710 if (GET_CODE (XEXP (x, 0)) == HIGH
7711 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
7712 return XEXP (x, 1);
7714 break;
7716 default:
7717 break;
7719 return NULL;
7722 #if CHECKING_P
7724 namespace selftest {
7726 /* Make a unique pseudo REG of mode MODE for use by selftests. */
7728 static rtx
7729 make_test_reg (machine_mode mode)
7731 static int test_reg_num = LAST_VIRTUAL_REGISTER + 1;
7733 return gen_rtx_REG (mode, test_reg_num++);
7736 static void
7737 test_scalar_int_ops (machine_mode mode)
7739 rtx op0 = make_test_reg (mode);
7740 rtx op1 = make_test_reg (mode);
7741 rtx six = GEN_INT (6);
7743 rtx neg_op0 = simplify_gen_unary (NEG, mode, op0, mode);
7744 rtx not_op0 = simplify_gen_unary (NOT, mode, op0, mode);
7745 rtx bswap_op0 = simplify_gen_unary (BSWAP, mode, op0, mode);
7747 rtx and_op0_op1 = simplify_gen_binary (AND, mode, op0, op1);
7748 rtx ior_op0_op1 = simplify_gen_binary (IOR, mode, op0, op1);
7749 rtx xor_op0_op1 = simplify_gen_binary (XOR, mode, op0, op1);
7751 rtx and_op0_6 = simplify_gen_binary (AND, mode, op0, six);
7752 rtx and_op1_6 = simplify_gen_binary (AND, mode, op1, six);
7754 /* Test some binary identities. */
7755 ASSERT_RTX_EQ (op0, simplify_gen_binary (PLUS, mode, op0, const0_rtx));
7756 ASSERT_RTX_EQ (op0, simplify_gen_binary (PLUS, mode, const0_rtx, op0));
7757 ASSERT_RTX_EQ (op0, simplify_gen_binary (MINUS, mode, op0, const0_rtx));
7758 ASSERT_RTX_EQ (op0, simplify_gen_binary (MULT, mode, op0, const1_rtx));
7759 ASSERT_RTX_EQ (op0, simplify_gen_binary (MULT, mode, const1_rtx, op0));
7760 ASSERT_RTX_EQ (op0, simplify_gen_binary (DIV, mode, op0, const1_rtx));
7761 ASSERT_RTX_EQ (op0, simplify_gen_binary (AND, mode, op0, constm1_rtx));
7762 ASSERT_RTX_EQ (op0, simplify_gen_binary (AND, mode, constm1_rtx, op0));
7763 ASSERT_RTX_EQ (op0, simplify_gen_binary (IOR, mode, op0, const0_rtx));
7764 ASSERT_RTX_EQ (op0, simplify_gen_binary (IOR, mode, const0_rtx, op0));
7765 ASSERT_RTX_EQ (op0, simplify_gen_binary (XOR, mode, op0, const0_rtx));
7766 ASSERT_RTX_EQ (op0, simplify_gen_binary (XOR, mode, const0_rtx, op0));
7767 ASSERT_RTX_EQ (op0, simplify_gen_binary (ASHIFT, mode, op0, const0_rtx));
7768 ASSERT_RTX_EQ (op0, simplify_gen_binary (ROTATE, mode, op0, const0_rtx));
7769 ASSERT_RTX_EQ (op0, simplify_gen_binary (ASHIFTRT, mode, op0, const0_rtx));
7770 ASSERT_RTX_EQ (op0, simplify_gen_binary (LSHIFTRT, mode, op0, const0_rtx));
7771 ASSERT_RTX_EQ (op0, simplify_gen_binary (ROTATERT, mode, op0, const0_rtx));
7773 /* Test some self-inverse operations. */
7774 ASSERT_RTX_EQ (op0, simplify_gen_unary (NEG, mode, neg_op0, mode));
7775 ASSERT_RTX_EQ (op0, simplify_gen_unary (NOT, mode, not_op0, mode));
7776 ASSERT_RTX_EQ (op0, simplify_gen_unary (BSWAP, mode, bswap_op0, mode));
7778 /* Test some reflexive operations. */
7779 ASSERT_RTX_EQ (op0, simplify_gen_binary (AND, mode, op0, op0));
7780 ASSERT_RTX_EQ (op0, simplify_gen_binary (IOR, mode, op0, op0));
7781 ASSERT_RTX_EQ (op0, simplify_gen_binary (SMIN, mode, op0, op0));
7782 ASSERT_RTX_EQ (op0, simplify_gen_binary (SMAX, mode, op0, op0));
7783 ASSERT_RTX_EQ (op0, simplify_gen_binary (UMIN, mode, op0, op0));
7784 ASSERT_RTX_EQ (op0, simplify_gen_binary (UMAX, mode, op0, op0));
7786 ASSERT_RTX_EQ (const0_rtx, simplify_gen_binary (MINUS, mode, op0, op0));
7787 ASSERT_RTX_EQ (const0_rtx, simplify_gen_binary (XOR, mode, op0, op0));
7789 /* Test simplify_distributive_operation. */
7790 ASSERT_RTX_EQ (simplify_gen_binary (AND, mode, xor_op0_op1, six),
7791 simplify_gen_binary (XOR, mode, and_op0_6, and_op1_6));
7792 ASSERT_RTX_EQ (simplify_gen_binary (AND, mode, ior_op0_op1, six),
7793 simplify_gen_binary (IOR, mode, and_op0_6, and_op1_6));
7794 ASSERT_RTX_EQ (simplify_gen_binary (AND, mode, and_op0_op1, six),
7795 simplify_gen_binary (AND, mode, and_op0_6, and_op1_6));
7797 /* Test useless extensions are eliminated. */
7798 ASSERT_RTX_EQ (op0, simplify_gen_unary (TRUNCATE, mode, op0, mode));
7799 ASSERT_RTX_EQ (op0, simplify_gen_unary (ZERO_EXTEND, mode, op0, mode));
7800 ASSERT_RTX_EQ (op0, simplify_gen_unary (SIGN_EXTEND, mode, op0, mode));
7801 ASSERT_RTX_EQ (op0, lowpart_subreg (mode, op0, mode));
7804 /* Verify some simplifications of integer extension/truncation.
7805 Machine mode BMODE is the guaranteed wider than SMODE. */
7807 static void
7808 test_scalar_int_ext_ops (machine_mode bmode, machine_mode smode)
7810 rtx sreg = make_test_reg (smode);
7812 /* Check truncation of extension. */
7813 ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE, smode,
7814 simplify_gen_unary (ZERO_EXTEND, bmode,
7815 sreg, smode),
7816 bmode),
7817 sreg);
7818 ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE, smode,
7819 simplify_gen_unary (SIGN_EXTEND, bmode,
7820 sreg, smode),
7821 bmode),
7822 sreg);
7823 ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE, smode,
7824 lowpart_subreg (bmode, sreg, smode),
7825 bmode),
7826 sreg);
7829 /* Verify more simplifications of integer extension/truncation.
7830 BMODE is wider than MMODE which is wider than SMODE. */
7832 static void
7833 test_scalar_int_ext_ops2 (machine_mode bmode, machine_mode mmode,
7834 machine_mode smode)
7836 rtx breg = make_test_reg (bmode);
7837 rtx mreg = make_test_reg (mmode);
7838 rtx sreg = make_test_reg (smode);
7840 /* Check truncate of truncate. */
7841 ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE, smode,
7842 simplify_gen_unary (TRUNCATE, mmode,
7843 breg, bmode),
7844 mmode),
7845 simplify_gen_unary (TRUNCATE, smode, breg, bmode));
7847 /* Check extension of extension. */
7848 ASSERT_RTX_EQ (simplify_gen_unary (ZERO_EXTEND, bmode,
7849 simplify_gen_unary (ZERO_EXTEND, mmode,
7850 sreg, smode),
7851 mmode),
7852 simplify_gen_unary (ZERO_EXTEND, bmode, sreg, smode));
7853 ASSERT_RTX_EQ (simplify_gen_unary (SIGN_EXTEND, bmode,
7854 simplify_gen_unary (SIGN_EXTEND, mmode,
7855 sreg, smode),
7856 mmode),
7857 simplify_gen_unary (SIGN_EXTEND, bmode, sreg, smode));
7858 ASSERT_RTX_EQ (simplify_gen_unary (SIGN_EXTEND, bmode,
7859 simplify_gen_unary (ZERO_EXTEND, mmode,
7860 sreg, smode),
7861 mmode),
7862 simplify_gen_unary (ZERO_EXTEND, bmode, sreg, smode));
7864 /* Check truncation of extension. */
7865 ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE, smode,
7866 simplify_gen_unary (ZERO_EXTEND, bmode,
7867 mreg, mmode),
7868 bmode),
7869 simplify_gen_unary (TRUNCATE, smode, mreg, mmode));
7870 ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE, smode,
7871 simplify_gen_unary (SIGN_EXTEND, bmode,
7872 mreg, mmode),
7873 bmode),
7874 simplify_gen_unary (TRUNCATE, smode, mreg, mmode));
7875 ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE, smode,
7876 lowpart_subreg (bmode, mreg, mmode),
7877 bmode),
7878 simplify_gen_unary (TRUNCATE, smode, mreg, mmode));
7882 /* Verify some simplifications involving scalar expressions. */
7884 static void
7885 test_scalar_ops ()
7887 for (unsigned int i = 0; i < NUM_MACHINE_MODES; ++i)
7889 machine_mode mode = (machine_mode) i;
7890 if (SCALAR_INT_MODE_P (mode) && mode != BImode)
7891 test_scalar_int_ops (mode);
7894 test_scalar_int_ext_ops (HImode, QImode);
7895 test_scalar_int_ext_ops (SImode, QImode);
7896 test_scalar_int_ext_ops (SImode, HImode);
7897 test_scalar_int_ext_ops (DImode, QImode);
7898 test_scalar_int_ext_ops (DImode, HImode);
7899 test_scalar_int_ext_ops (DImode, SImode);
7901 test_scalar_int_ext_ops2 (SImode, HImode, QImode);
7902 test_scalar_int_ext_ops2 (DImode, HImode, QImode);
7903 test_scalar_int_ext_ops2 (DImode, SImode, QImode);
7904 test_scalar_int_ext_ops2 (DImode, SImode, HImode);
7907 /* Test vector simplifications involving VEC_DUPLICATE in which the
7908 operands and result have vector mode MODE. SCALAR_REG is a pseudo
7909 register that holds one element of MODE. */
7911 static void
7912 test_vector_ops_duplicate (machine_mode mode, rtx scalar_reg)
7914 scalar_mode inner_mode = GET_MODE_INNER (mode);
7915 rtx duplicate = gen_rtx_VEC_DUPLICATE (mode, scalar_reg);
7916 poly_uint64 nunits = GET_MODE_NUNITS (mode);
7917 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
7919 /* Test some simple unary cases with VEC_DUPLICATE arguments. */
7920 rtx not_scalar_reg = gen_rtx_NOT (inner_mode, scalar_reg);
7921 rtx duplicate_not = gen_rtx_VEC_DUPLICATE (mode, not_scalar_reg);
7922 ASSERT_RTX_EQ (duplicate,
7923 simplify_unary_operation (NOT, mode,
7924 duplicate_not, mode));
7926 rtx neg_scalar_reg = gen_rtx_NEG (inner_mode, scalar_reg);
7927 rtx duplicate_neg = gen_rtx_VEC_DUPLICATE (mode, neg_scalar_reg);
7928 ASSERT_RTX_EQ (duplicate,
7929 simplify_unary_operation (NEG, mode,
7930 duplicate_neg, mode));
7932 /* Test some simple binary cases with VEC_DUPLICATE arguments. */
7933 ASSERT_RTX_EQ (duplicate,
7934 simplify_binary_operation (PLUS, mode, duplicate,
7935 CONST0_RTX (mode)));
7937 ASSERT_RTX_EQ (duplicate,
7938 simplify_binary_operation (MINUS, mode, duplicate,
7939 CONST0_RTX (mode)));
7941 ASSERT_RTX_PTR_EQ (CONST0_RTX (mode),
7942 simplify_binary_operation (MINUS, mode, duplicate,
7943 duplicate));
7946 /* Test a scalar VEC_SELECT of a VEC_DUPLICATE. */
7947 rtx zero_par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
7948 ASSERT_RTX_PTR_EQ (scalar_reg,
7949 simplify_binary_operation (VEC_SELECT, inner_mode,
7950 duplicate, zero_par));
7952 unsigned HOST_WIDE_INT const_nunits;
7953 if (nunits.is_constant (&const_nunits))
7955 /* And again with the final element. */
7956 rtx last_index = gen_int_mode (const_nunits - 1, word_mode);
7957 rtx last_par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, last_index));
7958 ASSERT_RTX_PTR_EQ (scalar_reg,
7959 simplify_binary_operation (VEC_SELECT, inner_mode,
7960 duplicate, last_par));
7962 /* Test a scalar subreg of a VEC_MERGE of a VEC_DUPLICATE. */
7963 rtx vector_reg = make_test_reg (mode);
7964 for (unsigned HOST_WIDE_INT i = 0; i < const_nunits; i++)
7966 if (i >= HOST_BITS_PER_WIDE_INT)
7967 break;
7968 rtx mask = GEN_INT ((HOST_WIDE_INT_1U << i) | (i + 1));
7969 rtx vm = gen_rtx_VEC_MERGE (mode, duplicate, vector_reg, mask);
7970 poly_uint64 offset = i * GET_MODE_SIZE (inner_mode);
7971 ASSERT_RTX_EQ (scalar_reg,
7972 simplify_gen_subreg (inner_mode, vm,
7973 mode, offset));
7977 /* Test a scalar subreg of a VEC_DUPLICATE. */
7978 poly_uint64 offset = subreg_lowpart_offset (inner_mode, mode);
7979 ASSERT_RTX_EQ (scalar_reg,
7980 simplify_gen_subreg (inner_mode, duplicate,
7981 mode, offset));
7983 machine_mode narrower_mode;
7984 if (maybe_ne (nunits, 2U)
7985 && multiple_p (nunits, 2)
7986 && mode_for_vector (inner_mode, 2).exists (&narrower_mode)
7987 && VECTOR_MODE_P (narrower_mode))
7989 /* Test VEC_DUPLICATE of a vector. */
7990 rtx_vector_builder nbuilder (narrower_mode, 2, 1);
7991 nbuilder.quick_push (const0_rtx);
7992 nbuilder.quick_push (const1_rtx);
7993 rtx_vector_builder builder (mode, 2, 1);
7994 builder.quick_push (const0_rtx);
7995 builder.quick_push (const1_rtx);
7996 ASSERT_RTX_EQ (builder.build (),
7997 simplify_unary_operation (VEC_DUPLICATE, mode,
7998 nbuilder.build (),
7999 narrower_mode));
8001 /* Test VEC_SELECT of a vector. */
8002 rtx vec_par
8003 = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, const1_rtx, const0_rtx));
8004 rtx narrower_duplicate
8005 = gen_rtx_VEC_DUPLICATE (narrower_mode, scalar_reg);
8006 ASSERT_RTX_EQ (narrower_duplicate,
8007 simplify_binary_operation (VEC_SELECT, narrower_mode,
8008 duplicate, vec_par));
8010 /* Test a vector subreg of a VEC_DUPLICATE. */
8011 poly_uint64 offset = subreg_lowpart_offset (narrower_mode, mode);
8012 ASSERT_RTX_EQ (narrower_duplicate,
8013 simplify_gen_subreg (narrower_mode, duplicate,
8014 mode, offset));
8018 /* Test vector simplifications involving VEC_SERIES in which the
8019 operands and result have vector mode MODE. SCALAR_REG is a pseudo
8020 register that holds one element of MODE. */
8022 static void
8023 test_vector_ops_series (machine_mode mode, rtx scalar_reg)
8025 /* Test unary cases with VEC_SERIES arguments. */
8026 scalar_mode inner_mode = GET_MODE_INNER (mode);
8027 rtx duplicate = gen_rtx_VEC_DUPLICATE (mode, scalar_reg);
8028 rtx neg_scalar_reg = gen_rtx_NEG (inner_mode, scalar_reg);
8029 rtx series_0_r = gen_rtx_VEC_SERIES (mode, const0_rtx, scalar_reg);
8030 rtx series_0_nr = gen_rtx_VEC_SERIES (mode, const0_rtx, neg_scalar_reg);
8031 rtx series_nr_1 = gen_rtx_VEC_SERIES (mode, neg_scalar_reg, const1_rtx);
8032 rtx series_r_m1 = gen_rtx_VEC_SERIES (mode, scalar_reg, constm1_rtx);
8033 rtx series_r_r = gen_rtx_VEC_SERIES (mode, scalar_reg, scalar_reg);
8034 rtx series_nr_nr = gen_rtx_VEC_SERIES (mode, neg_scalar_reg,
8035 neg_scalar_reg);
8036 ASSERT_RTX_EQ (series_0_r,
8037 simplify_unary_operation (NEG, mode, series_0_nr, mode));
8038 ASSERT_RTX_EQ (series_r_m1,
8039 simplify_unary_operation (NEG, mode, series_nr_1, mode));
8040 ASSERT_RTX_EQ (series_r_r,
8041 simplify_unary_operation (NEG, mode, series_nr_nr, mode));
8043 /* Test that a VEC_SERIES with a zero step is simplified away. */
8044 ASSERT_RTX_EQ (duplicate,
8045 simplify_binary_operation (VEC_SERIES, mode,
8046 scalar_reg, const0_rtx));
8048 /* Test PLUS and MINUS with VEC_SERIES. */
8049 rtx series_0_1 = gen_const_vec_series (mode, const0_rtx, const1_rtx);
8050 rtx series_0_m1 = gen_const_vec_series (mode, const0_rtx, constm1_rtx);
8051 rtx series_r_1 = gen_rtx_VEC_SERIES (mode, scalar_reg, const1_rtx);
8052 ASSERT_RTX_EQ (series_r_r,
8053 simplify_binary_operation (PLUS, mode, series_0_r,
8054 duplicate));
8055 ASSERT_RTX_EQ (series_r_1,
8056 simplify_binary_operation (PLUS, mode, duplicate,
8057 series_0_1));
8058 ASSERT_RTX_EQ (series_r_m1,
8059 simplify_binary_operation (PLUS, mode, duplicate,
8060 series_0_m1));
8061 ASSERT_RTX_EQ (series_0_r,
8062 simplify_binary_operation (MINUS, mode, series_r_r,
8063 duplicate));
8064 ASSERT_RTX_EQ (series_r_m1,
8065 simplify_binary_operation (MINUS, mode, duplicate,
8066 series_0_1));
8067 ASSERT_RTX_EQ (series_r_1,
8068 simplify_binary_operation (MINUS, mode, duplicate,
8069 series_0_m1));
8070 ASSERT_RTX_EQ (series_0_m1,
8071 simplify_binary_operation (VEC_SERIES, mode, const0_rtx,
8072 constm1_rtx));
8074 /* Test NEG on constant vector series. */
8075 ASSERT_RTX_EQ (series_0_m1,
8076 simplify_unary_operation (NEG, mode, series_0_1, mode));
8077 ASSERT_RTX_EQ (series_0_1,
8078 simplify_unary_operation (NEG, mode, series_0_m1, mode));
8080 /* Test PLUS and MINUS on constant vector series. */
8081 rtx scalar2 = gen_int_mode (2, inner_mode);
8082 rtx scalar3 = gen_int_mode (3, inner_mode);
8083 rtx series_1_1 = gen_const_vec_series (mode, const1_rtx, const1_rtx);
8084 rtx series_0_2 = gen_const_vec_series (mode, const0_rtx, scalar2);
8085 rtx series_1_3 = gen_const_vec_series (mode, const1_rtx, scalar3);
8086 ASSERT_RTX_EQ (series_1_1,
8087 simplify_binary_operation (PLUS, mode, series_0_1,
8088 CONST1_RTX (mode)));
8089 ASSERT_RTX_EQ (series_0_m1,
8090 simplify_binary_operation (PLUS, mode, CONST0_RTX (mode),
8091 series_0_m1));
8092 ASSERT_RTX_EQ (series_1_3,
8093 simplify_binary_operation (PLUS, mode, series_1_1,
8094 series_0_2));
8095 ASSERT_RTX_EQ (series_0_1,
8096 simplify_binary_operation (MINUS, mode, series_1_1,
8097 CONST1_RTX (mode)));
8098 ASSERT_RTX_EQ (series_1_1,
8099 simplify_binary_operation (MINUS, mode, CONST1_RTX (mode),
8100 series_0_m1));
8101 ASSERT_RTX_EQ (series_1_1,
8102 simplify_binary_operation (MINUS, mode, series_1_3,
8103 series_0_2));
8105 /* Test MULT between constant vectors. */
8106 rtx vec2 = gen_const_vec_duplicate (mode, scalar2);
8107 rtx vec3 = gen_const_vec_duplicate (mode, scalar3);
8108 rtx scalar9 = gen_int_mode (9, inner_mode);
8109 rtx series_3_9 = gen_const_vec_series (mode, scalar3, scalar9);
8110 ASSERT_RTX_EQ (series_0_2,
8111 simplify_binary_operation (MULT, mode, series_0_1, vec2));
8112 ASSERT_RTX_EQ (series_3_9,
8113 simplify_binary_operation (MULT, mode, vec3, series_1_3));
8114 if (!GET_MODE_NUNITS (mode).is_constant ())
8115 ASSERT_FALSE (simplify_binary_operation (MULT, mode, series_0_1,
8116 series_0_1));
8118 /* Test ASHIFT between constant vectors. */
8119 ASSERT_RTX_EQ (series_0_2,
8120 simplify_binary_operation (ASHIFT, mode, series_0_1,
8121 CONST1_RTX (mode)));
8122 if (!GET_MODE_NUNITS (mode).is_constant ())
8123 ASSERT_FALSE (simplify_binary_operation (ASHIFT, mode, CONST1_RTX (mode),
8124 series_0_1));
8127 static rtx
8128 simplify_merge_mask (rtx x, rtx mask, int op)
8130 return simplify_context ().simplify_merge_mask (x, mask, op);
8133 /* Verify simplify_merge_mask works correctly. */
8135 static void
8136 test_vec_merge (machine_mode mode)
8138 rtx op0 = make_test_reg (mode);
8139 rtx op1 = make_test_reg (mode);
8140 rtx op2 = make_test_reg (mode);
8141 rtx op3 = make_test_reg (mode);
8142 rtx op4 = make_test_reg (mode);
8143 rtx op5 = make_test_reg (mode);
8144 rtx mask1 = make_test_reg (SImode);
8145 rtx mask2 = make_test_reg (SImode);
8146 rtx vm1 = gen_rtx_VEC_MERGE (mode, op0, op1, mask1);
8147 rtx vm2 = gen_rtx_VEC_MERGE (mode, op2, op3, mask1);
8148 rtx vm3 = gen_rtx_VEC_MERGE (mode, op4, op5, mask1);
8150 /* Simple vec_merge. */
8151 ASSERT_EQ (op0, simplify_merge_mask (vm1, mask1, 0));
8152 ASSERT_EQ (op1, simplify_merge_mask (vm1, mask1, 1));
8153 ASSERT_EQ (NULL_RTX, simplify_merge_mask (vm1, mask2, 0));
8154 ASSERT_EQ (NULL_RTX, simplify_merge_mask (vm1, mask2, 1));
8156 /* Nested vec_merge.
8157 It's tempting to make this simplify right down to opN, but we don't
8158 because all the simplify_* functions assume that the operands have
8159 already been simplified. */
8160 rtx nvm = gen_rtx_VEC_MERGE (mode, vm1, vm2, mask1);
8161 ASSERT_EQ (vm1, simplify_merge_mask (nvm, mask1, 0));
8162 ASSERT_EQ (vm2, simplify_merge_mask (nvm, mask1, 1));
8164 /* Intermediate unary op. */
8165 rtx unop = gen_rtx_NOT (mode, vm1);
8166 ASSERT_RTX_EQ (gen_rtx_NOT (mode, op0),
8167 simplify_merge_mask (unop, mask1, 0));
8168 ASSERT_RTX_EQ (gen_rtx_NOT (mode, op1),
8169 simplify_merge_mask (unop, mask1, 1));
8171 /* Intermediate binary op. */
8172 rtx binop = gen_rtx_PLUS (mode, vm1, vm2);
8173 ASSERT_RTX_EQ (gen_rtx_PLUS (mode, op0, op2),
8174 simplify_merge_mask (binop, mask1, 0));
8175 ASSERT_RTX_EQ (gen_rtx_PLUS (mode, op1, op3),
8176 simplify_merge_mask (binop, mask1, 1));
8178 /* Intermediate ternary op. */
8179 rtx tenop = gen_rtx_FMA (mode, vm1, vm2, vm3);
8180 ASSERT_RTX_EQ (gen_rtx_FMA (mode, op0, op2, op4),
8181 simplify_merge_mask (tenop, mask1, 0));
8182 ASSERT_RTX_EQ (gen_rtx_FMA (mode, op1, op3, op5),
8183 simplify_merge_mask (tenop, mask1, 1));
8185 /* Side effects. */
8186 rtx badop0 = gen_rtx_PRE_INC (mode, op0);
8187 rtx badvm = gen_rtx_VEC_MERGE (mode, badop0, op1, mask1);
8188 ASSERT_EQ (badop0, simplify_merge_mask (badvm, mask1, 0));
8189 ASSERT_EQ (NULL_RTX, simplify_merge_mask (badvm, mask1, 1));
8191 /* Called indirectly. */
8192 ASSERT_RTX_EQ (gen_rtx_VEC_MERGE (mode, op0, op3, mask1),
8193 simplify_rtx (nvm));
8196 /* Test subregs of integer vector constant X, trying elements in
8197 the range [ELT_BIAS, ELT_BIAS + constant_lower_bound (NELTS)),
8198 where NELTS is the number of elements in X. Subregs involving
8199 elements [ELT_BIAS, ELT_BIAS + FIRST_VALID) are expected to fail. */
8201 static void
8202 test_vector_subregs_modes (rtx x, poly_uint64 elt_bias = 0,
8203 unsigned int first_valid = 0)
8205 machine_mode inner_mode = GET_MODE (x);
8206 scalar_mode int_mode = GET_MODE_INNER (inner_mode);
8208 for (unsigned int modei = 0; modei < NUM_MACHINE_MODES; ++modei)
8210 machine_mode outer_mode = (machine_mode) modei;
8211 if (!VECTOR_MODE_P (outer_mode))
8212 continue;
8214 unsigned int outer_nunits;
8215 if (GET_MODE_INNER (outer_mode) == int_mode
8216 && GET_MODE_NUNITS (outer_mode).is_constant (&outer_nunits)
8217 && multiple_p (GET_MODE_NUNITS (inner_mode), outer_nunits))
8219 /* Test subregs in which the outer mode is a smaller,
8220 constant-sized vector of the same element type. */
8221 unsigned int limit
8222 = constant_lower_bound (GET_MODE_NUNITS (inner_mode));
8223 for (unsigned int elt = 0; elt < limit; elt += outer_nunits)
8225 rtx expected = NULL_RTX;
8226 if (elt >= first_valid)
8228 rtx_vector_builder builder (outer_mode, outer_nunits, 1);
8229 for (unsigned int i = 0; i < outer_nunits; ++i)
8230 builder.quick_push (CONST_VECTOR_ELT (x, elt + i));
8231 expected = builder.build ();
8233 poly_uint64 byte = (elt_bias + elt) * GET_MODE_SIZE (int_mode);
8234 ASSERT_RTX_EQ (expected,
8235 simplify_subreg (outer_mode, x,
8236 inner_mode, byte));
8239 else if (known_eq (GET_MODE_SIZE (outer_mode),
8240 GET_MODE_SIZE (inner_mode))
8241 && known_eq (elt_bias, 0U)
8242 && (GET_MODE_CLASS (outer_mode) != MODE_VECTOR_BOOL
8243 || known_eq (GET_MODE_BITSIZE (outer_mode),
8244 GET_MODE_NUNITS (outer_mode)))
8245 && (!FLOAT_MODE_P (outer_mode)
8246 || (FLOAT_MODE_FORMAT (outer_mode)->ieee_bits
8247 == GET_MODE_UNIT_PRECISION (outer_mode)))
8248 && (GET_MODE_SIZE (inner_mode).is_constant ()
8249 || !CONST_VECTOR_STEPPED_P (x)))
8251 /* Try converting to OUTER_MODE and back. */
8252 rtx outer_x = simplify_subreg (outer_mode, x, inner_mode, 0);
8253 ASSERT_TRUE (outer_x != NULL_RTX);
8254 ASSERT_RTX_EQ (x, simplify_subreg (inner_mode, outer_x,
8255 outer_mode, 0));
8259 if (BYTES_BIG_ENDIAN == WORDS_BIG_ENDIAN)
8261 /* Test each byte in the element range. */
8262 unsigned int limit
8263 = constant_lower_bound (GET_MODE_SIZE (inner_mode));
8264 for (unsigned int i = 0; i < limit; ++i)
8266 unsigned int elt = i / GET_MODE_SIZE (int_mode);
8267 rtx expected = NULL_RTX;
8268 if (elt >= first_valid)
8270 unsigned int byte_shift = i % GET_MODE_SIZE (int_mode);
8271 if (BYTES_BIG_ENDIAN)
8272 byte_shift = GET_MODE_SIZE (int_mode) - byte_shift - 1;
8273 rtx_mode_t vec_elt (CONST_VECTOR_ELT (x, elt), int_mode);
8274 wide_int shifted_elt
8275 = wi::lrshift (vec_elt, byte_shift * BITS_PER_UNIT);
8276 expected = immed_wide_int_const (shifted_elt, QImode);
8278 poly_uint64 byte = elt_bias * GET_MODE_SIZE (int_mode) + i;
8279 ASSERT_RTX_EQ (expected,
8280 simplify_subreg (QImode, x, inner_mode, byte));
8285 /* Test constant subregs of integer vector mode INNER_MODE, using 1
8286 element per pattern. */
8288 static void
8289 test_vector_subregs_repeating (machine_mode inner_mode)
8291 poly_uint64 nunits = GET_MODE_NUNITS (inner_mode);
8292 unsigned int min_nunits = constant_lower_bound (nunits);
8293 scalar_mode int_mode = GET_MODE_INNER (inner_mode);
8294 unsigned int count = gcd (min_nunits, 8);
8296 rtx_vector_builder builder (inner_mode, count, 1);
8297 for (unsigned int i = 0; i < count; ++i)
8298 builder.quick_push (gen_int_mode (8 - i, int_mode));
8299 rtx x = builder.build ();
8301 test_vector_subregs_modes (x);
8302 if (!nunits.is_constant ())
8303 test_vector_subregs_modes (x, nunits - min_nunits);
8306 /* Test constant subregs of integer vector mode INNER_MODE, using 2
8307 elements per pattern. */
8309 static void
8310 test_vector_subregs_fore_back (machine_mode inner_mode)
8312 poly_uint64 nunits = GET_MODE_NUNITS (inner_mode);
8313 unsigned int min_nunits = constant_lower_bound (nunits);
8314 scalar_mode int_mode = GET_MODE_INNER (inner_mode);
8315 unsigned int count = gcd (min_nunits, 4);
8317 rtx_vector_builder builder (inner_mode, count, 2);
8318 for (unsigned int i = 0; i < count; ++i)
8319 builder.quick_push (gen_int_mode (i, int_mode));
8320 for (unsigned int i = 0; i < count; ++i)
8321 builder.quick_push (gen_int_mode (-(int) i, int_mode));
8322 rtx x = builder.build ();
8324 test_vector_subregs_modes (x);
8325 if (!nunits.is_constant ())
8326 test_vector_subregs_modes (x, nunits - min_nunits, count);
8329 /* Test constant subregs of integer vector mode INNER_MODE, using 3
8330 elements per pattern. */
8332 static void
8333 test_vector_subregs_stepped (machine_mode inner_mode)
8335 /* Build { 0, 1, 2, 3, ... }. */
8336 scalar_mode int_mode = GET_MODE_INNER (inner_mode);
8337 rtx_vector_builder builder (inner_mode, 1, 3);
8338 for (unsigned int i = 0; i < 3; ++i)
8339 builder.quick_push (gen_int_mode (i, int_mode));
8340 rtx x = builder.build ();
8342 test_vector_subregs_modes (x);
8345 /* Test constant subregs of integer vector mode INNER_MODE. */
8347 static void
8348 test_vector_subregs (machine_mode inner_mode)
8350 test_vector_subregs_repeating (inner_mode);
8351 test_vector_subregs_fore_back (inner_mode);
8352 test_vector_subregs_stepped (inner_mode);
8355 /* Verify some simplifications involving vectors. */
8357 static void
8358 test_vector_ops ()
8360 for (unsigned int i = 0; i < NUM_MACHINE_MODES; ++i)
8362 machine_mode mode = (machine_mode) i;
8363 if (VECTOR_MODE_P (mode))
8365 rtx scalar_reg = make_test_reg (GET_MODE_INNER (mode));
8366 test_vector_ops_duplicate (mode, scalar_reg);
8367 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
8368 && maybe_gt (GET_MODE_NUNITS (mode), 2))
8370 test_vector_ops_series (mode, scalar_reg);
8371 test_vector_subregs (mode);
8373 test_vec_merge (mode);
8378 template<unsigned int N>
8379 struct simplify_const_poly_int_tests
8381 static void run ();
8384 template<>
8385 struct simplify_const_poly_int_tests<1>
8387 static void run () {}
8390 /* Test various CONST_POLY_INT properties. */
8392 template<unsigned int N>
8393 void
8394 simplify_const_poly_int_tests<N>::run ()
8396 rtx x1 = gen_int_mode (poly_int64 (1, 1), QImode);
8397 rtx x2 = gen_int_mode (poly_int64 (-80, 127), QImode);
8398 rtx x3 = gen_int_mode (poly_int64 (-79, -128), QImode);
8399 rtx x4 = gen_int_mode (poly_int64 (5, 4), QImode);
8400 rtx x5 = gen_int_mode (poly_int64 (30, 24), QImode);
8401 rtx x6 = gen_int_mode (poly_int64 (20, 16), QImode);
8402 rtx x7 = gen_int_mode (poly_int64 (7, 4), QImode);
8403 rtx x8 = gen_int_mode (poly_int64 (30, 24), HImode);
8404 rtx x9 = gen_int_mode (poly_int64 (-30, -24), HImode);
8405 rtx x10 = gen_int_mode (poly_int64 (-31, -24), HImode);
8406 rtx two = GEN_INT (2);
8407 rtx six = GEN_INT (6);
8408 poly_uint64 offset = subreg_lowpart_offset (QImode, HImode);
8410 /* These tests only try limited operation combinations. Fuller arithmetic
8411 testing is done directly on poly_ints. */
8412 ASSERT_EQ (simplify_unary_operation (NEG, HImode, x8, HImode), x9);
8413 ASSERT_EQ (simplify_unary_operation (NOT, HImode, x8, HImode), x10);
8414 ASSERT_EQ (simplify_unary_operation (TRUNCATE, QImode, x8, HImode), x5);
8415 ASSERT_EQ (simplify_binary_operation (PLUS, QImode, x1, x2), x3);
8416 ASSERT_EQ (simplify_binary_operation (MINUS, QImode, x3, x1), x2);
8417 ASSERT_EQ (simplify_binary_operation (MULT, QImode, x4, six), x5);
8418 ASSERT_EQ (simplify_binary_operation (MULT, QImode, six, x4), x5);
8419 ASSERT_EQ (simplify_binary_operation (ASHIFT, QImode, x4, two), x6);
8420 ASSERT_EQ (simplify_binary_operation (IOR, QImode, x4, two), x7);
8421 ASSERT_EQ (simplify_subreg (HImode, x5, QImode, 0), x8);
8422 ASSERT_EQ (simplify_subreg (QImode, x8, HImode, offset), x5);
8425 /* Run all of the selftests within this file. */
8427 void
8428 simplify_rtx_c_tests ()
8430 test_scalar_ops ();
8431 test_vector_ops ();
8432 simplify_const_poly_int_tests<NUM_POLY_INT_COEFFS>::run ();
8435 } // namespace selftest
8437 #endif /* CHECKING_P */