Fortran: Missing error with IMPLICIT none (external) [PR100972]
[official-gcc.git] / gcc / simplify-rtx.c
blobeea747664d4aceaffb394d5280c4e1731b535dde
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2021 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "memmodel.h"
30 #include "optabs.h"
31 #include "emit-rtl.h"
32 #include "recog.h"
33 #include "diagnostic-core.h"
34 #include "varasm.h"
35 #include "flags.h"
36 #include "selftest.h"
37 #include "selftest-rtl.h"
38 #include "rtx-vector-builder.h"
39 #include "rtlanal.h"
41 /* Simplification and canonicalization of RTL. */
43 /* Much code operates on (low, high) pairs; the low value is an
44 unsigned wide int, the high value a signed wide int. We
45 occasionally need to sign extend from low to high as if low were a
46 signed wide int. */
47 #define HWI_SIGN_EXTEND(low) \
48 ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
50 static bool plus_minus_operand_p (const_rtx);
52 /* Negate I, which satisfies poly_int_rtx_p. MODE is the mode of I. */
54 static rtx
55 neg_poly_int_rtx (machine_mode mode, const_rtx i)
57 return immed_wide_int_const (-wi::to_poly_wide (i, mode), mode);
60 /* Test whether expression, X, is an immediate constant that represents
61 the most significant bit of machine mode MODE. */
63 bool
64 mode_signbit_p (machine_mode mode, const_rtx x)
66 unsigned HOST_WIDE_INT val;
67 unsigned int width;
68 scalar_int_mode int_mode;
70 if (!is_int_mode (mode, &int_mode))
71 return false;
73 width = GET_MODE_PRECISION (int_mode);
74 if (width == 0)
75 return false;
77 if (width <= HOST_BITS_PER_WIDE_INT
78 && CONST_INT_P (x))
79 val = INTVAL (x);
80 #if TARGET_SUPPORTS_WIDE_INT
81 else if (CONST_WIDE_INT_P (x))
83 unsigned int i;
84 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
85 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
86 return false;
87 for (i = 0; i < elts - 1; i++)
88 if (CONST_WIDE_INT_ELT (x, i) != 0)
89 return false;
90 val = CONST_WIDE_INT_ELT (x, elts - 1);
91 width %= HOST_BITS_PER_WIDE_INT;
92 if (width == 0)
93 width = HOST_BITS_PER_WIDE_INT;
95 #else
96 else if (width <= HOST_BITS_PER_DOUBLE_INT
97 && CONST_DOUBLE_AS_INT_P (x)
98 && CONST_DOUBLE_LOW (x) == 0)
100 val = CONST_DOUBLE_HIGH (x);
101 width -= HOST_BITS_PER_WIDE_INT;
103 #endif
104 else
105 /* X is not an integer constant. */
106 return false;
108 if (width < HOST_BITS_PER_WIDE_INT)
109 val &= (HOST_WIDE_INT_1U << width) - 1;
110 return val == (HOST_WIDE_INT_1U << (width - 1));
113 /* Test whether VAL is equal to the most significant bit of mode MODE
114 (after masking with the mode mask of MODE). Returns false if the
115 precision of MODE is too large to handle. */
117 bool
118 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
120 unsigned int width;
121 scalar_int_mode int_mode;
123 if (!is_int_mode (mode, &int_mode))
124 return false;
126 width = GET_MODE_PRECISION (int_mode);
127 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
128 return false;
130 val &= GET_MODE_MASK (int_mode);
131 return val == (HOST_WIDE_INT_1U << (width - 1));
134 /* Test whether the most significant bit of mode MODE is set in VAL.
135 Returns false if the precision of MODE is too large to handle. */
136 bool
137 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
139 unsigned int width;
141 scalar_int_mode int_mode;
142 if (!is_int_mode (mode, &int_mode))
143 return false;
145 width = GET_MODE_PRECISION (int_mode);
146 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
147 return false;
149 val &= HOST_WIDE_INT_1U << (width - 1);
150 return val != 0;
153 /* Test whether the most significant bit of mode MODE is clear in VAL.
154 Returns false if the precision of MODE is too large to handle. */
155 bool
156 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
158 unsigned int width;
160 scalar_int_mode int_mode;
161 if (!is_int_mode (mode, &int_mode))
162 return false;
164 width = GET_MODE_PRECISION (int_mode);
165 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
166 return false;
168 val &= HOST_WIDE_INT_1U << (width - 1);
169 return val == 0;
172 /* Make a binary operation by properly ordering the operands and
173 seeing if the expression folds. */
176 simplify_context::simplify_gen_binary (rtx_code code, machine_mode mode,
177 rtx op0, rtx op1)
179 rtx tem;
181 /* If this simplifies, do it. */
182 tem = simplify_binary_operation (code, mode, op0, op1);
183 if (tem)
184 return tem;
186 /* Put complex operands first and constants second if commutative. */
187 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
188 && swap_commutative_operands_p (op0, op1))
189 std::swap (op0, op1);
191 return gen_rtx_fmt_ee (code, mode, op0, op1);
194 /* If X is a MEM referencing the constant pool, return the real value.
195 Otherwise return X. */
197 avoid_constant_pool_reference (rtx x)
199 rtx c, tmp, addr;
200 machine_mode cmode;
201 poly_int64 offset = 0;
203 switch (GET_CODE (x))
205 case MEM:
206 break;
208 case FLOAT_EXTEND:
209 /* Handle float extensions of constant pool references. */
210 tmp = XEXP (x, 0);
211 c = avoid_constant_pool_reference (tmp);
212 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
213 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c),
214 GET_MODE (x));
215 return x;
217 default:
218 return x;
221 if (GET_MODE (x) == BLKmode)
222 return x;
224 addr = XEXP (x, 0);
226 /* Call target hook to avoid the effects of -fpic etc.... */
227 addr = targetm.delegitimize_address (addr);
229 /* Split the address into a base and integer offset. */
230 addr = strip_offset (addr, &offset);
232 if (GET_CODE (addr) == LO_SUM)
233 addr = XEXP (addr, 1);
235 /* If this is a constant pool reference, we can turn it into its
236 constant and hope that simplifications happen. */
237 if (GET_CODE (addr) == SYMBOL_REF
238 && CONSTANT_POOL_ADDRESS_P (addr))
240 c = get_pool_constant (addr);
241 cmode = get_pool_mode (addr);
243 /* If we're accessing the constant in a different mode than it was
244 originally stored, attempt to fix that up via subreg simplifications.
245 If that fails we have no choice but to return the original memory. */
246 if (known_eq (offset, 0) && cmode == GET_MODE (x))
247 return c;
248 else if (known_in_range_p (offset, 0, GET_MODE_SIZE (cmode)))
250 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
251 if (tem && CONSTANT_P (tem))
252 return tem;
256 return x;
259 /* Simplify a MEM based on its attributes. This is the default
260 delegitimize_address target hook, and it's recommended that every
261 overrider call it. */
264 delegitimize_mem_from_attrs (rtx x)
266 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
267 use their base addresses as equivalent. */
268 if (MEM_P (x)
269 && MEM_EXPR (x)
270 && MEM_OFFSET_KNOWN_P (x))
272 tree decl = MEM_EXPR (x);
273 machine_mode mode = GET_MODE (x);
274 poly_int64 offset = 0;
276 switch (TREE_CODE (decl))
278 default:
279 decl = NULL;
280 break;
282 case VAR_DECL:
283 break;
285 case ARRAY_REF:
286 case ARRAY_RANGE_REF:
287 case COMPONENT_REF:
288 case BIT_FIELD_REF:
289 case REALPART_EXPR:
290 case IMAGPART_EXPR:
291 case VIEW_CONVERT_EXPR:
293 poly_int64 bitsize, bitpos, bytepos, toffset_val = 0;
294 tree toffset;
295 int unsignedp, reversep, volatilep = 0;
297 decl
298 = get_inner_reference (decl, &bitsize, &bitpos, &toffset, &mode,
299 &unsignedp, &reversep, &volatilep);
300 if (maybe_ne (bitsize, GET_MODE_BITSIZE (mode))
301 || !multiple_p (bitpos, BITS_PER_UNIT, &bytepos)
302 || (toffset && !poly_int_tree_p (toffset, &toffset_val)))
303 decl = NULL;
304 else
305 offset += bytepos + toffset_val;
306 break;
310 if (decl
311 && mode == GET_MODE (x)
312 && VAR_P (decl)
313 && (TREE_STATIC (decl)
314 || DECL_THREAD_LOCAL_P (decl))
315 && DECL_RTL_SET_P (decl)
316 && MEM_P (DECL_RTL (decl)))
318 rtx newx;
320 offset += MEM_OFFSET (x);
322 newx = DECL_RTL (decl);
324 if (MEM_P (newx))
326 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
327 poly_int64 n_offset, o_offset;
329 /* Avoid creating a new MEM needlessly if we already had
330 the same address. We do if there's no OFFSET and the
331 old address X is identical to NEWX, or if X is of the
332 form (plus NEWX OFFSET), or the NEWX is of the form
333 (plus Y (const_int Z)) and X is that with the offset
334 added: (plus Y (const_int Z+OFFSET)). */
335 n = strip_offset (n, &n_offset);
336 o = strip_offset (o, &o_offset);
337 if (!(known_eq (o_offset, n_offset + offset)
338 && rtx_equal_p (o, n)))
339 x = adjust_address_nv (newx, mode, offset);
341 else if (GET_MODE (x) == GET_MODE (newx)
342 && known_eq (offset, 0))
343 x = newx;
347 return x;
350 /* Make a unary operation by first seeing if it folds and otherwise making
351 the specified operation. */
354 simplify_context::simplify_gen_unary (rtx_code code, machine_mode mode, rtx op,
355 machine_mode op_mode)
357 rtx tem;
359 /* If this simplifies, use it. */
360 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
361 return tem;
363 return gen_rtx_fmt_e (code, mode, op);
366 /* Likewise for ternary operations. */
369 simplify_context::simplify_gen_ternary (rtx_code code, machine_mode mode,
370 machine_mode op0_mode,
371 rtx op0, rtx op1, rtx op2)
373 rtx tem;
375 /* If this simplifies, use it. */
376 if ((tem = simplify_ternary_operation (code, mode, op0_mode,
377 op0, op1, op2)) != 0)
378 return tem;
380 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
383 /* Likewise, for relational operations.
384 CMP_MODE specifies mode comparison is done in. */
387 simplify_context::simplify_gen_relational (rtx_code code, machine_mode mode,
388 machine_mode cmp_mode,
389 rtx op0, rtx op1)
391 rtx tem;
393 if ((tem = simplify_relational_operation (code, mode, cmp_mode,
394 op0, op1)) != 0)
395 return tem;
397 return gen_rtx_fmt_ee (code, mode, op0, op1);
400 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
401 and simplify the result. If FN is non-NULL, call this callback on each
402 X, if it returns non-NULL, replace X with its return value and simplify the
403 result. */
406 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
407 rtx (*fn) (rtx, const_rtx, void *), void *data)
409 enum rtx_code code = GET_CODE (x);
410 machine_mode mode = GET_MODE (x);
411 machine_mode op_mode;
412 const char *fmt;
413 rtx op0, op1, op2, newx, op;
414 rtvec vec, newvec;
415 int i, j;
417 if (__builtin_expect (fn != NULL, 0))
419 newx = fn (x, old_rtx, data);
420 if (newx)
421 return newx;
423 else if (rtx_equal_p (x, old_rtx))
424 return copy_rtx ((rtx) data);
426 switch (GET_RTX_CLASS (code))
428 case RTX_UNARY:
429 op0 = XEXP (x, 0);
430 op_mode = GET_MODE (op0);
431 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
432 if (op0 == XEXP (x, 0))
433 return x;
434 return simplify_gen_unary (code, mode, op0, op_mode);
436 case RTX_BIN_ARITH:
437 case RTX_COMM_ARITH:
438 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
439 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
440 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
441 return x;
442 return simplify_gen_binary (code, mode, op0, op1);
444 case RTX_COMPARE:
445 case RTX_COMM_COMPARE:
446 op0 = XEXP (x, 0);
447 op1 = XEXP (x, 1);
448 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
449 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
450 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
451 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
452 return x;
453 return simplify_gen_relational (code, mode, op_mode, op0, op1);
455 case RTX_TERNARY:
456 case RTX_BITFIELD_OPS:
457 op0 = XEXP (x, 0);
458 op_mode = GET_MODE (op0);
459 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
460 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
461 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
462 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
463 return x;
464 if (op_mode == VOIDmode)
465 op_mode = GET_MODE (op0);
466 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
468 case RTX_EXTRA:
469 if (code == SUBREG)
471 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
472 if (op0 == SUBREG_REG (x))
473 return x;
474 op0 = simplify_gen_subreg (GET_MODE (x), op0,
475 GET_MODE (SUBREG_REG (x)),
476 SUBREG_BYTE (x));
477 return op0 ? op0 : x;
479 break;
481 case RTX_OBJ:
482 if (code == MEM)
484 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
485 if (op0 == XEXP (x, 0))
486 return x;
487 return replace_equiv_address_nv (x, op0);
489 else if (code == LO_SUM)
491 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
492 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
494 /* (lo_sum (high x) y) -> y where x and y have the same base. */
495 if (GET_CODE (op0) == HIGH)
497 rtx base0, base1, offset0, offset1;
498 split_const (XEXP (op0, 0), &base0, &offset0);
499 split_const (op1, &base1, &offset1);
500 if (rtx_equal_p (base0, base1))
501 return op1;
504 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
505 return x;
506 return gen_rtx_LO_SUM (mode, op0, op1);
508 break;
510 default:
511 break;
514 newx = x;
515 fmt = GET_RTX_FORMAT (code);
516 for (i = 0; fmt[i]; i++)
517 switch (fmt[i])
519 case 'E':
520 vec = XVEC (x, i);
521 newvec = XVEC (newx, i);
522 for (j = 0; j < GET_NUM_ELEM (vec); j++)
524 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
525 old_rtx, fn, data);
526 if (op != RTVEC_ELT (vec, j))
528 if (newvec == vec)
530 newvec = shallow_copy_rtvec (vec);
531 if (x == newx)
532 newx = shallow_copy_rtx (x);
533 XVEC (newx, i) = newvec;
535 RTVEC_ELT (newvec, j) = op;
538 break;
540 case 'e':
541 if (XEXP (x, i))
543 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
544 if (op != XEXP (x, i))
546 if (x == newx)
547 newx = shallow_copy_rtx (x);
548 XEXP (newx, i) = op;
551 break;
553 return newx;
556 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
557 resulting RTX. Return a new RTX which is as simplified as possible. */
560 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
562 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
565 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
566 Only handle cases where the truncated value is inherently an rvalue.
568 RTL provides two ways of truncating a value:
570 1. a lowpart subreg. This form is only a truncation when both
571 the outer and inner modes (here MODE and OP_MODE respectively)
572 are scalar integers, and only then when the subreg is used as
573 an rvalue.
575 It is only valid to form such truncating subregs if the
576 truncation requires no action by the target. The onus for
577 proving this is on the creator of the subreg -- e.g. the
578 caller to simplify_subreg or simplify_gen_subreg -- and typically
579 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
581 2. a TRUNCATE. This form handles both scalar and compound integers.
583 The first form is preferred where valid. However, the TRUNCATE
584 handling in simplify_unary_operation turns the second form into the
585 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
586 so it is generally safe to form rvalue truncations using:
588 simplify_gen_unary (TRUNCATE, ...)
590 and leave simplify_unary_operation to work out which representation
591 should be used.
593 Because of the proof requirements on (1), simplify_truncation must
594 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
595 regardless of whether the outer truncation came from a SUBREG or a
596 TRUNCATE. For example, if the caller has proven that an SImode
597 truncation of:
599 (and:DI X Y)
601 is a no-op and can be represented as a subreg, it does not follow
602 that SImode truncations of X and Y are also no-ops. On a target
603 like 64-bit MIPS that requires SImode values to be stored in
604 sign-extended form, an SImode truncation of:
606 (and:DI (reg:DI X) (const_int 63))
608 is trivially a no-op because only the lower 6 bits can be set.
609 However, X is still an arbitrary 64-bit number and so we cannot
610 assume that truncating it too is a no-op. */
613 simplify_context::simplify_truncation (machine_mode mode, rtx op,
614 machine_mode op_mode)
616 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
617 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
618 scalar_int_mode int_mode, int_op_mode, subreg_mode;
620 gcc_assert (precision <= op_precision);
622 /* Optimize truncations of zero and sign extended values. */
623 if (GET_CODE (op) == ZERO_EXTEND
624 || GET_CODE (op) == SIGN_EXTEND)
626 /* There are three possibilities. If MODE is the same as the
627 origmode, we can omit both the extension and the subreg.
628 If MODE is not larger than the origmode, we can apply the
629 truncation without the extension. Finally, if the outermode
630 is larger than the origmode, we can just extend to the appropriate
631 mode. */
632 machine_mode origmode = GET_MODE (XEXP (op, 0));
633 if (mode == origmode)
634 return XEXP (op, 0);
635 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
636 return simplify_gen_unary (TRUNCATE, mode,
637 XEXP (op, 0), origmode);
638 else
639 return simplify_gen_unary (GET_CODE (op), mode,
640 XEXP (op, 0), origmode);
643 /* If the machine can perform operations in the truncated mode, distribute
644 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
645 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
646 if (1
647 && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
648 && (GET_CODE (op) == PLUS
649 || GET_CODE (op) == MINUS
650 || GET_CODE (op) == MULT))
652 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
653 if (op0)
655 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
656 if (op1)
657 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
661 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
662 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
663 the outer subreg is effectively a truncation to the original mode. */
664 if ((GET_CODE (op) == LSHIFTRT
665 || GET_CODE (op) == ASHIFTRT)
666 /* Ensure that OP_MODE is at least twice as wide as MODE
667 to avoid the possibility that an outer LSHIFTRT shifts by more
668 than the sign extension's sign_bit_copies and introduces zeros
669 into the high bits of the result. */
670 && 2 * precision <= op_precision
671 && CONST_INT_P (XEXP (op, 1))
672 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
673 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
674 && UINTVAL (XEXP (op, 1)) < precision)
675 return simplify_gen_binary (ASHIFTRT, mode,
676 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
678 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
679 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
680 the outer subreg is effectively a truncation to the original mode. */
681 if ((GET_CODE (op) == LSHIFTRT
682 || GET_CODE (op) == ASHIFTRT)
683 && CONST_INT_P (XEXP (op, 1))
684 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
685 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
686 && UINTVAL (XEXP (op, 1)) < precision)
687 return simplify_gen_binary (LSHIFTRT, mode,
688 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
690 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
691 to (ashift:QI (x:QI) C), where C is a suitable small constant and
692 the outer subreg is effectively a truncation to the original mode. */
693 if (GET_CODE (op) == ASHIFT
694 && CONST_INT_P (XEXP (op, 1))
695 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
696 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
697 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
698 && UINTVAL (XEXP (op, 1)) < precision)
699 return simplify_gen_binary (ASHIFT, mode,
700 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
702 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
703 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
704 and C2. */
705 if (GET_CODE (op) == AND
706 && (GET_CODE (XEXP (op, 0)) == LSHIFTRT
707 || GET_CODE (XEXP (op, 0)) == ASHIFTRT)
708 && CONST_INT_P (XEXP (XEXP (op, 0), 1))
709 && CONST_INT_P (XEXP (op, 1)))
711 rtx op0 = (XEXP (XEXP (op, 0), 0));
712 rtx shift_op = XEXP (XEXP (op, 0), 1);
713 rtx mask_op = XEXP (op, 1);
714 unsigned HOST_WIDE_INT shift = UINTVAL (shift_op);
715 unsigned HOST_WIDE_INT mask = UINTVAL (mask_op);
717 if (shift < precision
718 /* If doing this transform works for an X with all bits set,
719 it works for any X. */
720 && ((GET_MODE_MASK (mode) >> shift) & mask)
721 == ((GET_MODE_MASK (op_mode) >> shift) & mask)
722 && (op0 = simplify_gen_unary (TRUNCATE, mode, op0, op_mode))
723 && (op0 = simplify_gen_binary (LSHIFTRT, mode, op0, shift_op)))
725 mask_op = GEN_INT (trunc_int_for_mode (mask, mode));
726 return simplify_gen_binary (AND, mode, op0, mask_op);
730 /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
731 (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
732 changing len. */
733 if ((GET_CODE (op) == ZERO_EXTRACT || GET_CODE (op) == SIGN_EXTRACT)
734 && REG_P (XEXP (op, 0))
735 && GET_MODE (XEXP (op, 0)) == GET_MODE (op)
736 && CONST_INT_P (XEXP (op, 1))
737 && CONST_INT_P (XEXP (op, 2)))
739 rtx op0 = XEXP (op, 0);
740 unsigned HOST_WIDE_INT len = UINTVAL (XEXP (op, 1));
741 unsigned HOST_WIDE_INT pos = UINTVAL (XEXP (op, 2));
742 if (BITS_BIG_ENDIAN && pos >= op_precision - precision)
744 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
745 if (op0)
747 pos -= op_precision - precision;
748 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
749 XEXP (op, 1), GEN_INT (pos));
752 else if (!BITS_BIG_ENDIAN && precision >= len + pos)
754 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
755 if (op0)
756 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
757 XEXP (op, 1), XEXP (op, 2));
761 /* Recognize a word extraction from a multi-word subreg. */
762 if ((GET_CODE (op) == LSHIFTRT
763 || GET_CODE (op) == ASHIFTRT)
764 && SCALAR_INT_MODE_P (mode)
765 && SCALAR_INT_MODE_P (op_mode)
766 && precision >= BITS_PER_WORD
767 && 2 * precision <= op_precision
768 && CONST_INT_P (XEXP (op, 1))
769 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
770 && UINTVAL (XEXP (op, 1)) < op_precision)
772 poly_int64 byte = subreg_lowpart_offset (mode, op_mode);
773 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
774 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
775 (WORDS_BIG_ENDIAN
776 ? byte - shifted_bytes
777 : byte + shifted_bytes));
780 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
781 and try replacing the TRUNCATE and shift with it. Don't do this
782 if the MEM has a mode-dependent address. */
783 if ((GET_CODE (op) == LSHIFTRT
784 || GET_CODE (op) == ASHIFTRT)
785 && is_a <scalar_int_mode> (mode, &int_mode)
786 && is_a <scalar_int_mode> (op_mode, &int_op_mode)
787 && MEM_P (XEXP (op, 0))
788 && CONST_INT_P (XEXP (op, 1))
789 && INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (int_mode) == 0
790 && INTVAL (XEXP (op, 1)) > 0
791 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (int_op_mode)
792 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
793 MEM_ADDR_SPACE (XEXP (op, 0)))
794 && ! MEM_VOLATILE_P (XEXP (op, 0))
795 && (GET_MODE_SIZE (int_mode) >= UNITS_PER_WORD
796 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
798 poly_int64 byte = subreg_lowpart_offset (int_mode, int_op_mode);
799 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
800 return adjust_address_nv (XEXP (op, 0), int_mode,
801 (WORDS_BIG_ENDIAN
802 ? byte - shifted_bytes
803 : byte + shifted_bytes));
806 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
807 (OP:SI foo:SI) if OP is NEG or ABS. */
808 if ((GET_CODE (op) == ABS
809 || GET_CODE (op) == NEG)
810 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
811 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
812 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
813 return simplify_gen_unary (GET_CODE (op), mode,
814 XEXP (XEXP (op, 0), 0), mode);
816 /* Simplifications of (truncate:A (subreg:B X 0)). */
817 if (GET_CODE (op) == SUBREG
818 && is_a <scalar_int_mode> (mode, &int_mode)
819 && SCALAR_INT_MODE_P (op_mode)
820 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &subreg_mode)
821 && subreg_lowpart_p (op))
823 /* (truncate:A (subreg:B (truncate:C X) 0)) is (truncate:A X). */
824 if (GET_CODE (SUBREG_REG (op)) == TRUNCATE)
826 rtx inner = XEXP (SUBREG_REG (op), 0);
827 if (GET_MODE_PRECISION (int_mode)
828 <= GET_MODE_PRECISION (subreg_mode))
829 return simplify_gen_unary (TRUNCATE, int_mode, inner,
830 GET_MODE (inner));
831 else
832 /* If subreg above is paradoxical and C is narrower
833 than A, return (subreg:A (truncate:C X) 0). */
834 return simplify_gen_subreg (int_mode, SUBREG_REG (op),
835 subreg_mode, 0);
838 /* Simplifications of (truncate:A (subreg:B X:C 0)) with
839 paradoxical subregs (B is wider than C). */
840 if (is_a <scalar_int_mode> (op_mode, &int_op_mode))
842 unsigned int int_op_prec = GET_MODE_PRECISION (int_op_mode);
843 unsigned int subreg_prec = GET_MODE_PRECISION (subreg_mode);
844 if (int_op_prec > subreg_prec)
846 if (int_mode == subreg_mode)
847 return SUBREG_REG (op);
848 if (GET_MODE_PRECISION (int_mode) < subreg_prec)
849 return simplify_gen_unary (TRUNCATE, int_mode,
850 SUBREG_REG (op), subreg_mode);
852 /* Simplification of (truncate:A (subreg:B X:C 0)) where
853 A is narrower than B and B is narrower than C. */
854 else if (int_op_prec < subreg_prec
855 && GET_MODE_PRECISION (int_mode) < int_op_prec)
856 return simplify_gen_unary (TRUNCATE, int_mode,
857 SUBREG_REG (op), subreg_mode);
861 /* (truncate:A (truncate:B X)) is (truncate:A X). */
862 if (GET_CODE (op) == TRUNCATE)
863 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
864 GET_MODE (XEXP (op, 0)));
866 /* (truncate:A (ior X C)) is (const_int -1) if C is equal to that already,
867 in mode A. */
868 if (GET_CODE (op) == IOR
869 && SCALAR_INT_MODE_P (mode)
870 && SCALAR_INT_MODE_P (op_mode)
871 && CONST_INT_P (XEXP (op, 1))
872 && trunc_int_for_mode (INTVAL (XEXP (op, 1)), mode) == -1)
873 return constm1_rtx;
875 return NULL_RTX;
878 /* Try to simplify a unary operation CODE whose output mode is to be
879 MODE with input operand OP whose mode was originally OP_MODE.
880 Return zero if no simplification can be made. */
882 simplify_context::simplify_unary_operation (rtx_code code, machine_mode mode,
883 rtx op, machine_mode op_mode)
885 rtx trueop, tem;
887 trueop = avoid_constant_pool_reference (op);
889 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
890 if (tem)
891 return tem;
893 return simplify_unary_operation_1 (code, mode, op);
896 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
897 to be exact. */
899 static bool
900 exact_int_to_float_conversion_p (const_rtx op)
902 machine_mode op0_mode = GET_MODE (XEXP (op, 0));
903 /* Constants can reach here with -frounding-math, if they do then
904 the conversion isn't exact. */
905 if (op0_mode == VOIDmode)
906 return false;
907 int out_bits = significand_size (GET_MODE_INNER (GET_MODE (op)));
908 int in_prec = GET_MODE_UNIT_PRECISION (op0_mode);
909 int in_bits = in_prec;
910 if (HWI_COMPUTABLE_MODE_P (op0_mode))
912 unsigned HOST_WIDE_INT nonzero = nonzero_bits (XEXP (op, 0), op0_mode);
913 if (GET_CODE (op) == FLOAT)
914 in_bits -= num_sign_bit_copies (XEXP (op, 0), op0_mode);
915 else if (GET_CODE (op) == UNSIGNED_FLOAT)
916 in_bits = wi::min_precision (wi::uhwi (nonzero, in_prec), UNSIGNED);
917 else
918 gcc_unreachable ();
919 in_bits -= wi::ctz (wi::uhwi (nonzero, in_prec));
921 return in_bits <= out_bits;
924 /* Perform some simplifications we can do even if the operands
925 aren't constant. */
927 simplify_context::simplify_unary_operation_1 (rtx_code code, machine_mode mode,
928 rtx op)
930 enum rtx_code reversed;
931 rtx temp, elt, base, step;
932 scalar_int_mode inner, int_mode, op_mode, op0_mode;
934 switch (code)
936 case NOT:
937 /* (not (not X)) == X. */
938 if (GET_CODE (op) == NOT)
939 return XEXP (op, 0);
941 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
942 comparison is all ones. */
943 if (COMPARISON_P (op)
944 && (mode == BImode || STORE_FLAG_VALUE == -1)
945 && ((reversed = reversed_comparison_code (op, NULL)) != UNKNOWN))
946 return simplify_gen_relational (reversed, mode, VOIDmode,
947 XEXP (op, 0), XEXP (op, 1));
949 /* (not (plus X -1)) can become (neg X). */
950 if (GET_CODE (op) == PLUS
951 && XEXP (op, 1) == constm1_rtx)
952 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
954 /* Similarly, (not (neg X)) is (plus X -1). Only do this for
955 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
956 and MODE_VECTOR_INT. */
957 if (GET_CODE (op) == NEG && CONSTM1_RTX (mode))
958 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
959 CONSTM1_RTX (mode));
961 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
962 if (GET_CODE (op) == XOR
963 && CONST_INT_P (XEXP (op, 1))
964 && (temp = simplify_unary_operation (NOT, mode,
965 XEXP (op, 1), mode)) != 0)
966 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
968 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
969 if (GET_CODE (op) == PLUS
970 && CONST_INT_P (XEXP (op, 1))
971 && mode_signbit_p (mode, XEXP (op, 1))
972 && (temp = simplify_unary_operation (NOT, mode,
973 XEXP (op, 1), mode)) != 0)
974 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
977 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
978 operands other than 1, but that is not valid. We could do a
979 similar simplification for (not (lshiftrt C X)) where C is
980 just the sign bit, but this doesn't seem common enough to
981 bother with. */
982 if (GET_CODE (op) == ASHIFT
983 && XEXP (op, 0) == const1_rtx)
985 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
986 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
989 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
990 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
991 so we can perform the above simplification. */
992 if (STORE_FLAG_VALUE == -1
993 && is_a <scalar_int_mode> (mode, &int_mode)
994 && GET_CODE (op) == ASHIFTRT
995 && CONST_INT_P (XEXP (op, 1))
996 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (int_mode) - 1)
997 return simplify_gen_relational (GE, int_mode, VOIDmode,
998 XEXP (op, 0), const0_rtx);
1001 if (partial_subreg_p (op)
1002 && subreg_lowpart_p (op)
1003 && GET_CODE (SUBREG_REG (op)) == ASHIFT
1004 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
1006 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
1007 rtx x;
1009 x = gen_rtx_ROTATE (inner_mode,
1010 simplify_gen_unary (NOT, inner_mode, const1_rtx,
1011 inner_mode),
1012 XEXP (SUBREG_REG (op), 1));
1013 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
1014 if (temp)
1015 return temp;
1018 /* Apply De Morgan's laws to reduce number of patterns for machines
1019 with negating logical insns (and-not, nand, etc.). If result has
1020 only one NOT, put it first, since that is how the patterns are
1021 coded. */
1022 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
1024 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
1025 machine_mode op_mode;
1027 op_mode = GET_MODE (in1);
1028 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
1030 op_mode = GET_MODE (in2);
1031 if (op_mode == VOIDmode)
1032 op_mode = mode;
1033 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
1035 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
1036 std::swap (in1, in2);
1038 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
1039 mode, in1, in2);
1042 /* (not (bswap x)) -> (bswap (not x)). */
1043 if (GET_CODE (op) == BSWAP)
1045 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1046 return simplify_gen_unary (BSWAP, mode, x, mode);
1048 break;
1050 case NEG:
1051 /* (neg (neg X)) == X. */
1052 if (GET_CODE (op) == NEG)
1053 return XEXP (op, 0);
1055 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1056 If comparison is not reversible use
1057 x ? y : (neg y). */
1058 if (GET_CODE (op) == IF_THEN_ELSE)
1060 rtx cond = XEXP (op, 0);
1061 rtx true_rtx = XEXP (op, 1);
1062 rtx false_rtx = XEXP (op, 2);
1064 if ((GET_CODE (true_rtx) == NEG
1065 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
1066 || (GET_CODE (false_rtx) == NEG
1067 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)))
1069 if (reversed_comparison_code (cond, NULL) != UNKNOWN)
1070 temp = reversed_comparison (cond, mode);
1071 else
1073 temp = cond;
1074 std::swap (true_rtx, false_rtx);
1076 return simplify_gen_ternary (IF_THEN_ELSE, mode,
1077 mode, temp, true_rtx, false_rtx);
1081 /* (neg (plus X 1)) can become (not X). */
1082 if (GET_CODE (op) == PLUS
1083 && XEXP (op, 1) == const1_rtx)
1084 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1086 /* Similarly, (neg (not X)) is (plus X 1). */
1087 if (GET_CODE (op) == NOT)
1088 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
1089 CONST1_RTX (mode));
1091 /* (neg (minus X Y)) can become (minus Y X). This transformation
1092 isn't safe for modes with signed zeros, since if X and Y are
1093 both +0, (minus Y X) is the same as (minus X Y). If the
1094 rounding mode is towards +infinity (or -infinity) then the two
1095 expressions will be rounded differently. */
1096 if (GET_CODE (op) == MINUS
1097 && !HONOR_SIGNED_ZEROS (mode)
1098 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1099 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
1101 if (GET_CODE (op) == PLUS
1102 && !HONOR_SIGNED_ZEROS (mode)
1103 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1105 /* (neg (plus A C)) is simplified to (minus -C A). */
1106 if (CONST_SCALAR_INT_P (XEXP (op, 1))
1107 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
1109 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1110 if (temp)
1111 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1114 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1115 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1116 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1119 /* (neg (mult A B)) becomes (mult A (neg B)).
1120 This works even for floating-point values. */
1121 if (GET_CODE (op) == MULT
1122 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1124 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1125 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1128 /* NEG commutes with ASHIFT since it is multiplication. Only do
1129 this if we can then eliminate the NEG (e.g., if the operand
1130 is a constant). */
1131 if (GET_CODE (op) == ASHIFT)
1133 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1134 if (temp)
1135 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1138 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1139 C is equal to the width of MODE minus 1. */
1140 if (GET_CODE (op) == ASHIFTRT
1141 && CONST_INT_P (XEXP (op, 1))
1142 && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
1143 return simplify_gen_binary (LSHIFTRT, mode,
1144 XEXP (op, 0), XEXP (op, 1));
1146 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1147 C is equal to the width of MODE minus 1. */
1148 if (GET_CODE (op) == LSHIFTRT
1149 && CONST_INT_P (XEXP (op, 1))
1150 && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
1151 return simplify_gen_binary (ASHIFTRT, mode,
1152 XEXP (op, 0), XEXP (op, 1));
1154 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1155 if (GET_CODE (op) == XOR
1156 && XEXP (op, 1) == const1_rtx
1157 && nonzero_bits (XEXP (op, 0), mode) == 1)
1158 return plus_constant (mode, XEXP (op, 0), -1);
1160 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1161 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1162 if (GET_CODE (op) == LT
1163 && XEXP (op, 1) == const0_rtx
1164 && is_a <scalar_int_mode> (GET_MODE (XEXP (op, 0)), &inner))
1166 int_mode = as_a <scalar_int_mode> (mode);
1167 int isize = GET_MODE_PRECISION (inner);
1168 if (STORE_FLAG_VALUE == 1)
1170 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1171 gen_int_shift_amount (inner,
1172 isize - 1));
1173 if (int_mode == inner)
1174 return temp;
1175 if (GET_MODE_PRECISION (int_mode) > isize)
1176 return simplify_gen_unary (SIGN_EXTEND, int_mode, temp, inner);
1177 return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
1179 else if (STORE_FLAG_VALUE == -1)
1181 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1182 gen_int_shift_amount (inner,
1183 isize - 1));
1184 if (int_mode == inner)
1185 return temp;
1186 if (GET_MODE_PRECISION (int_mode) > isize)
1187 return simplify_gen_unary (ZERO_EXTEND, int_mode, temp, inner);
1188 return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
1192 if (vec_series_p (op, &base, &step))
1194 /* Only create a new series if we can simplify both parts. In other
1195 cases this isn't really a simplification, and it's not necessarily
1196 a win to replace a vector operation with a scalar operation. */
1197 scalar_mode inner_mode = GET_MODE_INNER (mode);
1198 base = simplify_unary_operation (NEG, inner_mode, base, inner_mode);
1199 if (base)
1201 step = simplify_unary_operation (NEG, inner_mode,
1202 step, inner_mode);
1203 if (step)
1204 return gen_vec_series (mode, base, step);
1207 break;
1209 case TRUNCATE:
1210 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1211 with the umulXi3_highpart patterns. */
1212 if (GET_CODE (op) == LSHIFTRT
1213 && GET_CODE (XEXP (op, 0)) == MULT)
1214 break;
1216 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1218 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1220 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1221 if (temp)
1222 return temp;
1224 /* We can't handle truncation to a partial integer mode here
1225 because we don't know the real bitsize of the partial
1226 integer mode. */
1227 break;
1230 if (GET_MODE (op) != VOIDmode)
1232 temp = simplify_truncation (mode, op, GET_MODE (op));
1233 if (temp)
1234 return temp;
1237 /* If we know that the value is already truncated, we can
1238 replace the TRUNCATE with a SUBREG. */
1239 if (known_eq (GET_MODE_NUNITS (mode), 1)
1240 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1241 || truncated_to_mode (mode, op)))
1243 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1244 if (temp)
1245 return temp;
1248 /* A truncate of a comparison can be replaced with a subreg if
1249 STORE_FLAG_VALUE permits. This is like the previous test,
1250 but it works even if the comparison is done in a mode larger
1251 than HOST_BITS_PER_WIDE_INT. */
1252 if (HWI_COMPUTABLE_MODE_P (mode)
1253 && COMPARISON_P (op)
1254 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0
1255 && TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1257 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1258 if (temp)
1259 return temp;
1262 /* A truncate of a memory is just loading the low part of the memory
1263 if we are not changing the meaning of the address. */
1264 if (GET_CODE (op) == MEM
1265 && !VECTOR_MODE_P (mode)
1266 && !MEM_VOLATILE_P (op)
1267 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1269 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1270 if (temp)
1271 return temp;
1274 /* Check for useless truncation. */
1275 if (GET_MODE (op) == mode)
1276 return op;
1277 break;
1279 case FLOAT_TRUNCATE:
1280 /* Check for useless truncation. */
1281 if (GET_MODE (op) == mode)
1282 return op;
1284 if (DECIMAL_FLOAT_MODE_P (mode))
1285 break;
1287 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1288 if (GET_CODE (op) == FLOAT_EXTEND
1289 && GET_MODE (XEXP (op, 0)) == mode)
1290 return XEXP (op, 0);
1292 /* (float_truncate:SF (float_truncate:DF foo:XF))
1293 = (float_truncate:SF foo:XF).
1294 This may eliminate double rounding, so it is unsafe.
1296 (float_truncate:SF (float_extend:XF foo:DF))
1297 = (float_truncate:SF foo:DF).
1299 (float_truncate:DF (float_extend:XF foo:SF))
1300 = (float_extend:DF foo:SF). */
1301 if ((GET_CODE (op) == FLOAT_TRUNCATE
1302 && flag_unsafe_math_optimizations)
1303 || GET_CODE (op) == FLOAT_EXTEND)
1304 return simplify_gen_unary (GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)))
1305 > GET_MODE_UNIT_SIZE (mode)
1306 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1307 mode,
1308 XEXP (op, 0), mode);
1310 /* (float_truncate (float x)) is (float x) */
1311 if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1312 && (flag_unsafe_math_optimizations
1313 || exact_int_to_float_conversion_p (op)))
1314 return simplify_gen_unary (GET_CODE (op), mode,
1315 XEXP (op, 0),
1316 GET_MODE (XEXP (op, 0)));
1318 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1319 (OP:SF foo:SF) if OP is NEG or ABS. */
1320 if ((GET_CODE (op) == ABS
1321 || GET_CODE (op) == NEG)
1322 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1323 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1324 return simplify_gen_unary (GET_CODE (op), mode,
1325 XEXP (XEXP (op, 0), 0), mode);
1327 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1328 is (float_truncate:SF x). */
1329 if (GET_CODE (op) == SUBREG
1330 && subreg_lowpart_p (op)
1331 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1332 return SUBREG_REG (op);
1333 break;
1335 case FLOAT_EXTEND:
1336 /* Check for useless extension. */
1337 if (GET_MODE (op) == mode)
1338 return op;
1340 if (DECIMAL_FLOAT_MODE_P (mode))
1341 break;
1343 /* (float_extend (float_extend x)) is (float_extend x)
1345 (float_extend (float x)) is (float x) assuming that double
1346 rounding can't happen.
1348 if (GET_CODE (op) == FLOAT_EXTEND
1349 || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1350 && exact_int_to_float_conversion_p (op)))
1351 return simplify_gen_unary (GET_CODE (op), mode,
1352 XEXP (op, 0),
1353 GET_MODE (XEXP (op, 0)));
1355 break;
1357 case ABS:
1358 /* (abs (neg <foo>)) -> (abs <foo>) */
1359 if (GET_CODE (op) == NEG)
1360 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1361 GET_MODE (XEXP (op, 0)));
1363 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1364 do nothing. */
1365 if (GET_MODE (op) == VOIDmode)
1366 break;
1368 /* If operand is something known to be positive, ignore the ABS. */
1369 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1370 || val_signbit_known_clear_p (GET_MODE (op),
1371 nonzero_bits (op, GET_MODE (op))))
1372 return op;
1374 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1375 if (is_a <scalar_int_mode> (mode, &int_mode)
1376 && (num_sign_bit_copies (op, int_mode)
1377 == GET_MODE_PRECISION (int_mode)))
1378 return gen_rtx_NEG (int_mode, op);
1380 break;
1382 case FFS:
1383 /* (ffs (*_extend <X>)) = (ffs <X>) */
1384 if (GET_CODE (op) == SIGN_EXTEND
1385 || GET_CODE (op) == ZERO_EXTEND)
1386 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1387 GET_MODE (XEXP (op, 0)));
1388 break;
1390 case POPCOUNT:
1391 switch (GET_CODE (op))
1393 case BSWAP:
1394 case ZERO_EXTEND:
1395 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1396 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1397 GET_MODE (XEXP (op, 0)));
1399 case ROTATE:
1400 case ROTATERT:
1401 /* Rotations don't affect popcount. */
1402 if (!side_effects_p (XEXP (op, 1)))
1403 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1404 GET_MODE (XEXP (op, 0)));
1405 break;
1407 default:
1408 break;
1410 break;
1412 case PARITY:
1413 switch (GET_CODE (op))
1415 case NOT:
1416 case BSWAP:
1417 case ZERO_EXTEND:
1418 case SIGN_EXTEND:
1419 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1420 GET_MODE (XEXP (op, 0)));
1422 case ROTATE:
1423 case ROTATERT:
1424 /* Rotations don't affect parity. */
1425 if (!side_effects_p (XEXP (op, 1)))
1426 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1427 GET_MODE (XEXP (op, 0)));
1428 break;
1430 case PARITY:
1431 /* (parity (parity x)) -> parity (x). */
1432 return op;
1434 default:
1435 break;
1437 break;
1439 case BSWAP:
1440 /* (bswap (bswap x)) -> x. */
1441 if (GET_CODE (op) == BSWAP)
1442 return XEXP (op, 0);
1443 break;
1445 case FLOAT:
1446 /* (float (sign_extend <X>)) = (float <X>). */
1447 if (GET_CODE (op) == SIGN_EXTEND)
1448 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1449 GET_MODE (XEXP (op, 0)));
1450 break;
1452 case SIGN_EXTEND:
1453 /* Check for useless extension. */
1454 if (GET_MODE (op) == mode)
1455 return op;
1457 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1458 becomes just the MINUS if its mode is MODE. This allows
1459 folding switch statements on machines using casesi (such as
1460 the VAX). */
1461 if (GET_CODE (op) == TRUNCATE
1462 && GET_MODE (XEXP (op, 0)) == mode
1463 && GET_CODE (XEXP (op, 0)) == MINUS
1464 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1465 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1466 return XEXP (op, 0);
1468 /* Extending a widening multiplication should be canonicalized to
1469 a wider widening multiplication. */
1470 if (GET_CODE (op) == MULT)
1472 rtx lhs = XEXP (op, 0);
1473 rtx rhs = XEXP (op, 1);
1474 enum rtx_code lcode = GET_CODE (lhs);
1475 enum rtx_code rcode = GET_CODE (rhs);
1477 /* Widening multiplies usually extend both operands, but sometimes
1478 they use a shift to extract a portion of a register. */
1479 if ((lcode == SIGN_EXTEND
1480 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1481 && (rcode == SIGN_EXTEND
1482 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1484 machine_mode lmode = GET_MODE (lhs);
1485 machine_mode rmode = GET_MODE (rhs);
1486 int bits;
1488 if (lcode == ASHIFTRT)
1489 /* Number of bits not shifted off the end. */
1490 bits = (GET_MODE_UNIT_PRECISION (lmode)
1491 - INTVAL (XEXP (lhs, 1)));
1492 else /* lcode == SIGN_EXTEND */
1493 /* Size of inner mode. */
1494 bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0)));
1496 if (rcode == ASHIFTRT)
1497 bits += (GET_MODE_UNIT_PRECISION (rmode)
1498 - INTVAL (XEXP (rhs, 1)));
1499 else /* rcode == SIGN_EXTEND */
1500 bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0)));
1502 /* We can only widen multiplies if the result is mathematiclly
1503 equivalent. I.e. if overflow was impossible. */
1504 if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op)))
1505 return simplify_gen_binary
1506 (MULT, mode,
1507 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1508 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1512 /* Check for a sign extension of a subreg of a promoted
1513 variable, where the promotion is sign-extended, and the
1514 target mode is the same as the variable's promotion. */
1515 if (GET_CODE (op) == SUBREG
1516 && SUBREG_PROMOTED_VAR_P (op)
1517 && SUBREG_PROMOTED_SIGNED_P (op))
1519 rtx subreg = SUBREG_REG (op);
1520 machine_mode subreg_mode = GET_MODE (subreg);
1521 if (!paradoxical_subreg_p (mode, subreg_mode))
1523 temp = rtl_hooks.gen_lowpart_no_emit (mode, subreg);
1524 if (temp)
1526 /* Preserve SUBREG_PROMOTED_VAR_P. */
1527 if (partial_subreg_p (temp))
1529 SUBREG_PROMOTED_VAR_P (temp) = 1;
1530 SUBREG_PROMOTED_SET (temp, 1);
1532 return temp;
1535 else
1536 /* Sign-extending a sign-extended subreg. */
1537 return simplify_gen_unary (SIGN_EXTEND, mode,
1538 subreg, subreg_mode);
1541 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1542 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1543 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1545 gcc_assert (GET_MODE_UNIT_PRECISION (mode)
1546 > GET_MODE_UNIT_PRECISION (GET_MODE (op)));
1547 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1548 GET_MODE (XEXP (op, 0)));
1551 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1552 is (sign_extend:M (subreg:O <X>)) if there is mode with
1553 GET_MODE_BITSIZE (N) - I bits.
1554 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1555 is similarly (zero_extend:M (subreg:O <X>)). */
1556 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1557 && GET_CODE (XEXP (op, 0)) == ASHIFT
1558 && is_a <scalar_int_mode> (mode, &int_mode)
1559 && CONST_INT_P (XEXP (op, 1))
1560 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1561 && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
1562 GET_MODE_PRECISION (op_mode) > INTVAL (XEXP (op, 1))))
1564 scalar_int_mode tmode;
1565 gcc_assert (GET_MODE_PRECISION (int_mode)
1566 > GET_MODE_PRECISION (op_mode));
1567 if (int_mode_for_size (GET_MODE_PRECISION (op_mode)
1568 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1570 rtx inner =
1571 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1572 if (inner)
1573 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1574 ? SIGN_EXTEND : ZERO_EXTEND,
1575 int_mode, inner, tmode);
1579 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1580 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1581 if (GET_CODE (op) == LSHIFTRT
1582 && CONST_INT_P (XEXP (op, 1))
1583 && XEXP (op, 1) != const0_rtx)
1584 return simplify_gen_unary (ZERO_EXTEND, mode, op, GET_MODE (op));
1586 /* (sign_extend:M (truncate:N (lshiftrt:O <X> (const_int I)))) where
1587 I is GET_MODE_PRECISION(O) - GET_MODE_PRECISION(N), simplifies to
1588 (ashiftrt:M <X> (const_int I)) if modes M and O are the same, and
1589 (truncate:M (ashiftrt:O <X> (const_int I))) if M is narrower than
1590 O, and (sign_extend:M (ashiftrt:O <X> (const_int I))) if M is
1591 wider than O. */
1592 if (GET_CODE (op) == TRUNCATE
1593 && GET_CODE (XEXP (op, 0)) == LSHIFTRT
1594 && CONST_INT_P (XEXP (XEXP (op, 0), 1)))
1596 scalar_int_mode m_mode, n_mode, o_mode;
1597 rtx old_shift = XEXP (op, 0);
1598 if (is_a <scalar_int_mode> (mode, &m_mode)
1599 && is_a <scalar_int_mode> (GET_MODE (op), &n_mode)
1600 && is_a <scalar_int_mode> (GET_MODE (old_shift), &o_mode)
1601 && GET_MODE_PRECISION (o_mode) - GET_MODE_PRECISION (n_mode)
1602 == INTVAL (XEXP (old_shift, 1)))
1604 rtx new_shift = simplify_gen_binary (ASHIFTRT,
1605 GET_MODE (old_shift),
1606 XEXP (old_shift, 0),
1607 XEXP (old_shift, 1));
1608 if (GET_MODE_PRECISION (m_mode) > GET_MODE_PRECISION (o_mode))
1609 return simplify_gen_unary (SIGN_EXTEND, mode, new_shift,
1610 GET_MODE (new_shift));
1611 if (mode != GET_MODE (new_shift))
1612 return simplify_gen_unary (TRUNCATE, mode, new_shift,
1613 GET_MODE (new_shift));
1614 return new_shift;
1618 #if defined(POINTERS_EXTEND_UNSIGNED)
1619 /* As we do not know which address space the pointer is referring to,
1620 we can do this only if the target does not support different pointer
1621 or address modes depending on the address space. */
1622 if (target_default_pointer_address_modes_p ()
1623 && ! POINTERS_EXTEND_UNSIGNED
1624 && mode == Pmode && GET_MODE (op) == ptr_mode
1625 && (CONSTANT_P (op)
1626 || (GET_CODE (op) == SUBREG
1627 && REG_P (SUBREG_REG (op))
1628 && REG_POINTER (SUBREG_REG (op))
1629 && GET_MODE (SUBREG_REG (op)) == Pmode))
1630 && !targetm.have_ptr_extend ())
1632 temp
1633 = convert_memory_address_addr_space_1 (Pmode, op,
1634 ADDR_SPACE_GENERIC, false,
1635 true);
1636 if (temp)
1637 return temp;
1639 #endif
1640 break;
1642 case ZERO_EXTEND:
1643 /* Check for useless extension. */
1644 if (GET_MODE (op) == mode)
1645 return op;
1647 /* Check for a zero extension of a subreg of a promoted
1648 variable, where the promotion is zero-extended, and the
1649 target mode is the same as the variable's promotion. */
1650 if (GET_CODE (op) == SUBREG
1651 && SUBREG_PROMOTED_VAR_P (op)
1652 && SUBREG_PROMOTED_UNSIGNED_P (op))
1654 rtx subreg = SUBREG_REG (op);
1655 machine_mode subreg_mode = GET_MODE (subreg);
1656 if (!paradoxical_subreg_p (mode, subreg_mode))
1658 temp = rtl_hooks.gen_lowpart_no_emit (mode, subreg);
1659 if (temp)
1661 /* Preserve SUBREG_PROMOTED_VAR_P. */
1662 if (partial_subreg_p (temp))
1664 SUBREG_PROMOTED_VAR_P (temp) = 1;
1665 SUBREG_PROMOTED_SET (temp, 0);
1667 return temp;
1670 else
1671 /* Zero-extending a zero-extended subreg. */
1672 return simplify_gen_unary (ZERO_EXTEND, mode,
1673 subreg, subreg_mode);
1676 /* Extending a widening multiplication should be canonicalized to
1677 a wider widening multiplication. */
1678 if (GET_CODE (op) == MULT)
1680 rtx lhs = XEXP (op, 0);
1681 rtx rhs = XEXP (op, 1);
1682 enum rtx_code lcode = GET_CODE (lhs);
1683 enum rtx_code rcode = GET_CODE (rhs);
1685 /* Widening multiplies usually extend both operands, but sometimes
1686 they use a shift to extract a portion of a register. */
1687 if ((lcode == ZERO_EXTEND
1688 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1689 && (rcode == ZERO_EXTEND
1690 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1692 machine_mode lmode = GET_MODE (lhs);
1693 machine_mode rmode = GET_MODE (rhs);
1694 int bits;
1696 if (lcode == LSHIFTRT)
1697 /* Number of bits not shifted off the end. */
1698 bits = (GET_MODE_UNIT_PRECISION (lmode)
1699 - INTVAL (XEXP (lhs, 1)));
1700 else /* lcode == ZERO_EXTEND */
1701 /* Size of inner mode. */
1702 bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0)));
1704 if (rcode == LSHIFTRT)
1705 bits += (GET_MODE_UNIT_PRECISION (rmode)
1706 - INTVAL (XEXP (rhs, 1)));
1707 else /* rcode == ZERO_EXTEND */
1708 bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0)));
1710 /* We can only widen multiplies if the result is mathematiclly
1711 equivalent. I.e. if overflow was impossible. */
1712 if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op)))
1713 return simplify_gen_binary
1714 (MULT, mode,
1715 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1716 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1720 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1721 if (GET_CODE (op) == ZERO_EXTEND)
1722 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1723 GET_MODE (XEXP (op, 0)));
1725 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1726 is (zero_extend:M (subreg:O <X>)) if there is mode with
1727 GET_MODE_PRECISION (N) - I bits. */
1728 if (GET_CODE (op) == LSHIFTRT
1729 && GET_CODE (XEXP (op, 0)) == ASHIFT
1730 && is_a <scalar_int_mode> (mode, &int_mode)
1731 && CONST_INT_P (XEXP (op, 1))
1732 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1733 && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
1734 GET_MODE_PRECISION (op_mode) > INTVAL (XEXP (op, 1))))
1736 scalar_int_mode tmode;
1737 if (int_mode_for_size (GET_MODE_PRECISION (op_mode)
1738 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1740 rtx inner =
1741 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1742 if (inner)
1743 return simplify_gen_unary (ZERO_EXTEND, int_mode,
1744 inner, tmode);
1748 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1749 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1750 of mode N. E.g.
1751 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1752 (and:SI (reg:SI) (const_int 63)). */
1753 if (partial_subreg_p (op)
1754 && is_a <scalar_int_mode> (mode, &int_mode)
1755 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &op0_mode)
1756 && GET_MODE_PRECISION (op0_mode) <= HOST_BITS_PER_WIDE_INT
1757 && GET_MODE_PRECISION (int_mode) >= GET_MODE_PRECISION (op0_mode)
1758 && subreg_lowpart_p (op)
1759 && (nonzero_bits (SUBREG_REG (op), op0_mode)
1760 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1762 if (GET_MODE_PRECISION (int_mode) == GET_MODE_PRECISION (op0_mode))
1763 return SUBREG_REG (op);
1764 return simplify_gen_unary (ZERO_EXTEND, int_mode, SUBREG_REG (op),
1765 op0_mode);
1768 #if defined(POINTERS_EXTEND_UNSIGNED)
1769 /* As we do not know which address space the pointer is referring to,
1770 we can do this only if the target does not support different pointer
1771 or address modes depending on the address space. */
1772 if (target_default_pointer_address_modes_p ()
1773 && POINTERS_EXTEND_UNSIGNED > 0
1774 && mode == Pmode && GET_MODE (op) == ptr_mode
1775 && (CONSTANT_P (op)
1776 || (GET_CODE (op) == SUBREG
1777 && REG_P (SUBREG_REG (op))
1778 && REG_POINTER (SUBREG_REG (op))
1779 && GET_MODE (SUBREG_REG (op)) == Pmode))
1780 && !targetm.have_ptr_extend ())
1782 temp
1783 = convert_memory_address_addr_space_1 (Pmode, op,
1784 ADDR_SPACE_GENERIC, false,
1785 true);
1786 if (temp)
1787 return temp;
1789 #endif
1790 break;
1792 default:
1793 break;
1796 if (VECTOR_MODE_P (mode)
1797 && vec_duplicate_p (op, &elt)
1798 && code != VEC_DUPLICATE)
1800 if (code == SIGN_EXTEND || code == ZERO_EXTEND)
1801 /* Enforce a canonical order of VEC_DUPLICATE wrt other unary
1802 operations by promoting VEC_DUPLICATE to the root of the expression
1803 (as far as possible). */
1804 temp = simplify_gen_unary (code, GET_MODE_INNER (mode),
1805 elt, GET_MODE_INNER (GET_MODE (op)));
1806 else
1807 /* Try applying the operator to ELT and see if that simplifies.
1808 We can duplicate the result if so.
1810 The reason we traditionally haven't used simplify_gen_unary
1811 for these codes is that it didn't necessarily seem to be a
1812 win to convert things like:
1814 (neg:V (vec_duplicate:V (reg:S R)))
1818 (vec_duplicate:V (neg:S (reg:S R)))
1820 The first might be done entirely in vector registers while the
1821 second might need a move between register files.
1823 However, there also cases where promoting the vec_duplicate is
1824 more efficient, and there is definite value in having a canonical
1825 form when matching instruction patterns. We should consider
1826 extending the simplify_gen_unary code above to more cases. */
1827 temp = simplify_unary_operation (code, GET_MODE_INNER (mode),
1828 elt, GET_MODE_INNER (GET_MODE (op)));
1829 if (temp)
1830 return gen_vec_duplicate (mode, temp);
1833 return 0;
1836 /* Try to compute the value of a unary operation CODE whose output mode is to
1837 be MODE with input operand OP whose mode was originally OP_MODE.
1838 Return zero if the value cannot be computed. */
1840 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1841 rtx op, machine_mode op_mode)
1843 scalar_int_mode result_mode;
1845 if (code == VEC_DUPLICATE)
1847 gcc_assert (VECTOR_MODE_P (mode));
1848 if (GET_MODE (op) != VOIDmode)
1850 if (!VECTOR_MODE_P (GET_MODE (op)))
1851 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1852 else
1853 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1854 (GET_MODE (op)));
1856 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op))
1857 return gen_const_vec_duplicate (mode, op);
1858 if (GET_CODE (op) == CONST_VECTOR
1859 && (CONST_VECTOR_DUPLICATE_P (op)
1860 || CONST_VECTOR_NUNITS (op).is_constant ()))
1862 unsigned int npatterns = (CONST_VECTOR_DUPLICATE_P (op)
1863 ? CONST_VECTOR_NPATTERNS (op)
1864 : CONST_VECTOR_NUNITS (op).to_constant ());
1865 gcc_assert (multiple_p (GET_MODE_NUNITS (mode), npatterns));
1866 rtx_vector_builder builder (mode, npatterns, 1);
1867 for (unsigned i = 0; i < npatterns; i++)
1868 builder.quick_push (CONST_VECTOR_ELT (op, i));
1869 return builder.build ();
1873 if (VECTOR_MODE_P (mode)
1874 && GET_CODE (op) == CONST_VECTOR
1875 && known_eq (GET_MODE_NUNITS (mode), CONST_VECTOR_NUNITS (op)))
1877 gcc_assert (GET_MODE (op) == op_mode);
1879 rtx_vector_builder builder;
1880 if (!builder.new_unary_operation (mode, op, false))
1881 return 0;
1883 unsigned int count = builder.encoded_nelts ();
1884 for (unsigned int i = 0; i < count; i++)
1886 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1887 CONST_VECTOR_ELT (op, i),
1888 GET_MODE_INNER (op_mode));
1889 if (!x || !valid_for_const_vector_p (mode, x))
1890 return 0;
1891 builder.quick_push (x);
1893 return builder.build ();
1896 /* The order of these tests is critical so that, for example, we don't
1897 check the wrong mode (input vs. output) for a conversion operation,
1898 such as FIX. At some point, this should be simplified. */
1900 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1902 REAL_VALUE_TYPE d;
1904 if (op_mode == VOIDmode)
1906 /* CONST_INT have VOIDmode as the mode. We assume that all
1907 the bits of the constant are significant, though, this is
1908 a dangerous assumption as many times CONST_INTs are
1909 created and used with garbage in the bits outside of the
1910 precision of the implied mode of the const_int. */
1911 op_mode = MAX_MODE_INT;
1914 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), SIGNED);
1916 /* Avoid the folding if flag_signaling_nans is on and
1917 operand is a signaling NaN. */
1918 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1919 return 0;
1921 d = real_value_truncate (mode, d);
1923 /* Avoid the folding if flag_rounding_math is on and the
1924 conversion is not exact. */
1925 if (HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1927 bool fail = false;
1928 wide_int w = real_to_integer (&d, &fail,
1929 GET_MODE_PRECISION
1930 (as_a <scalar_int_mode> (op_mode)));
1931 if (fail || wi::ne_p (w, wide_int (rtx_mode_t (op, op_mode))))
1932 return 0;
1935 return const_double_from_real_value (d, mode);
1937 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1939 REAL_VALUE_TYPE d;
1941 if (op_mode == VOIDmode)
1943 /* CONST_INT have VOIDmode as the mode. We assume that all
1944 the bits of the constant are significant, though, this is
1945 a dangerous assumption as many times CONST_INTs are
1946 created and used with garbage in the bits outside of the
1947 precision of the implied mode of the const_int. */
1948 op_mode = MAX_MODE_INT;
1951 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), UNSIGNED);
1953 /* Avoid the folding if flag_signaling_nans is on and
1954 operand is a signaling NaN. */
1955 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1956 return 0;
1958 d = real_value_truncate (mode, d);
1960 /* Avoid the folding if flag_rounding_math is on and the
1961 conversion is not exact. */
1962 if (HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1964 bool fail = false;
1965 wide_int w = real_to_integer (&d, &fail,
1966 GET_MODE_PRECISION
1967 (as_a <scalar_int_mode> (op_mode)));
1968 if (fail || wi::ne_p (w, wide_int (rtx_mode_t (op, op_mode))))
1969 return 0;
1972 return const_double_from_real_value (d, mode);
1975 if (CONST_SCALAR_INT_P (op) && is_a <scalar_int_mode> (mode, &result_mode))
1977 unsigned int width = GET_MODE_PRECISION (result_mode);
1978 if (width > MAX_BITSIZE_MODE_ANY_INT)
1979 return 0;
1981 wide_int result;
1982 scalar_int_mode imode = (op_mode == VOIDmode
1983 ? result_mode
1984 : as_a <scalar_int_mode> (op_mode));
1985 rtx_mode_t op0 = rtx_mode_t (op, imode);
1986 int int_value;
1988 #if TARGET_SUPPORTS_WIDE_INT == 0
1989 /* This assert keeps the simplification from producing a result
1990 that cannot be represented in a CONST_DOUBLE but a lot of
1991 upstream callers expect that this function never fails to
1992 simplify something and so you if you added this to the test
1993 above the code would die later anyway. If this assert
1994 happens, you just need to make the port support wide int. */
1995 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1996 #endif
1998 switch (code)
2000 case NOT:
2001 result = wi::bit_not (op0);
2002 break;
2004 case NEG:
2005 result = wi::neg (op0);
2006 break;
2008 case ABS:
2009 result = wi::abs (op0);
2010 break;
2012 case FFS:
2013 result = wi::shwi (wi::ffs (op0), result_mode);
2014 break;
2016 case CLZ:
2017 if (wi::ne_p (op0, 0))
2018 int_value = wi::clz (op0);
2019 else if (! CLZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
2020 return NULL_RTX;
2021 result = wi::shwi (int_value, result_mode);
2022 break;
2024 case CLRSB:
2025 result = wi::shwi (wi::clrsb (op0), result_mode);
2026 break;
2028 case CTZ:
2029 if (wi::ne_p (op0, 0))
2030 int_value = wi::ctz (op0);
2031 else if (! CTZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
2032 return NULL_RTX;
2033 result = wi::shwi (int_value, result_mode);
2034 break;
2036 case POPCOUNT:
2037 result = wi::shwi (wi::popcount (op0), result_mode);
2038 break;
2040 case PARITY:
2041 result = wi::shwi (wi::parity (op0), result_mode);
2042 break;
2044 case BSWAP:
2045 result = wide_int (op0).bswap ();
2046 break;
2048 case TRUNCATE:
2049 case ZERO_EXTEND:
2050 result = wide_int::from (op0, width, UNSIGNED);
2051 break;
2053 case SIGN_EXTEND:
2054 result = wide_int::from (op0, width, SIGNED);
2055 break;
2057 case SS_NEG:
2058 if (wi::only_sign_bit_p (op0))
2059 result = wi::max_value (GET_MODE_PRECISION (imode), SIGNED);
2060 else
2061 result = wi::neg (op0);
2062 break;
2064 case SS_ABS:
2065 if (wi::only_sign_bit_p (op0))
2066 result = wi::max_value (GET_MODE_PRECISION (imode), SIGNED);
2067 else
2068 result = wi::abs (op0);
2069 break;
2071 case SQRT:
2072 default:
2073 return 0;
2076 return immed_wide_int_const (result, result_mode);
2079 else if (CONST_DOUBLE_AS_FLOAT_P (op)
2080 && SCALAR_FLOAT_MODE_P (mode)
2081 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
2083 REAL_VALUE_TYPE d = *CONST_DOUBLE_REAL_VALUE (op);
2084 switch (code)
2086 case SQRT:
2087 return 0;
2088 case ABS:
2089 d = real_value_abs (&d);
2090 break;
2091 case NEG:
2092 d = real_value_negate (&d);
2093 break;
2094 case FLOAT_TRUNCATE:
2095 /* Don't perform the operation if flag_signaling_nans is on
2096 and the operand is a signaling NaN. */
2097 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
2098 return NULL_RTX;
2099 /* Or if flag_rounding_math is on and the truncation is not
2100 exact. */
2101 if (HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2102 && !exact_real_truncate (mode, &d))
2103 return NULL_RTX;
2104 d = real_value_truncate (mode, d);
2105 break;
2106 case FLOAT_EXTEND:
2107 /* Don't perform the operation if flag_signaling_nans is on
2108 and the operand is a signaling NaN. */
2109 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
2110 return NULL_RTX;
2111 /* All this does is change the mode, unless changing
2112 mode class. */
2113 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
2114 real_convert (&d, mode, &d);
2115 break;
2116 case FIX:
2117 /* Don't perform the operation if flag_signaling_nans is on
2118 and the operand is a signaling NaN. */
2119 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
2120 return NULL_RTX;
2121 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
2122 break;
2123 case NOT:
2125 long tmp[4];
2126 int i;
2128 real_to_target (tmp, &d, GET_MODE (op));
2129 for (i = 0; i < 4; i++)
2130 tmp[i] = ~tmp[i];
2131 real_from_target (&d, tmp, mode);
2132 break;
2134 default:
2135 gcc_unreachable ();
2137 return const_double_from_real_value (d, mode);
2139 else if (CONST_DOUBLE_AS_FLOAT_P (op)
2140 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
2141 && is_int_mode (mode, &result_mode))
2143 unsigned int width = GET_MODE_PRECISION (result_mode);
2144 if (width > MAX_BITSIZE_MODE_ANY_INT)
2145 return 0;
2147 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
2148 operators are intentionally left unspecified (to ease implementation
2149 by target backends), for consistency, this routine implements the
2150 same semantics for constant folding as used by the middle-end. */
2152 /* This was formerly used only for non-IEEE float.
2153 eggert@twinsun.com says it is safe for IEEE also. */
2154 REAL_VALUE_TYPE t;
2155 const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (op);
2156 wide_int wmax, wmin;
2157 /* This is part of the abi to real_to_integer, but we check
2158 things before making this call. */
2159 bool fail;
2161 switch (code)
2163 case FIX:
2164 if (REAL_VALUE_ISNAN (*x))
2165 return const0_rtx;
2167 /* Test against the signed upper bound. */
2168 wmax = wi::max_value (width, SIGNED);
2169 real_from_integer (&t, VOIDmode, wmax, SIGNED);
2170 if (real_less (&t, x))
2171 return immed_wide_int_const (wmax, mode);
2173 /* Test against the signed lower bound. */
2174 wmin = wi::min_value (width, SIGNED);
2175 real_from_integer (&t, VOIDmode, wmin, SIGNED);
2176 if (real_less (x, &t))
2177 return immed_wide_int_const (wmin, mode);
2179 return immed_wide_int_const (real_to_integer (x, &fail, width),
2180 mode);
2182 case UNSIGNED_FIX:
2183 if (REAL_VALUE_ISNAN (*x) || REAL_VALUE_NEGATIVE (*x))
2184 return const0_rtx;
2186 /* Test against the unsigned upper bound. */
2187 wmax = wi::max_value (width, UNSIGNED);
2188 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
2189 if (real_less (&t, x))
2190 return immed_wide_int_const (wmax, mode);
2192 return immed_wide_int_const (real_to_integer (x, &fail, width),
2193 mode);
2195 default:
2196 gcc_unreachable ();
2200 /* Handle polynomial integers. */
2201 else if (CONST_POLY_INT_P (op))
2203 poly_wide_int result;
2204 switch (code)
2206 case NEG:
2207 result = -const_poly_int_value (op);
2208 break;
2210 case NOT:
2211 result = ~const_poly_int_value (op);
2212 break;
2214 default:
2215 return NULL_RTX;
2217 return immed_wide_int_const (result, mode);
2220 return NULL_RTX;
2223 /* Subroutine of simplify_binary_operation to simplify a binary operation
2224 CODE that can commute with byte swapping, with result mode MODE and
2225 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2226 Return zero if no simplification or canonicalization is possible. */
2229 simplify_context::simplify_byte_swapping_operation (rtx_code code,
2230 machine_mode mode,
2231 rtx op0, rtx op1)
2233 rtx tem;
2235 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2236 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
2238 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
2239 simplify_gen_unary (BSWAP, mode, op1, mode));
2240 return simplify_gen_unary (BSWAP, mode, tem, mode);
2243 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2244 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
2246 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
2247 return simplify_gen_unary (BSWAP, mode, tem, mode);
2250 return NULL_RTX;
2253 /* Subroutine of simplify_binary_operation to simplify a commutative,
2254 associative binary operation CODE with result mode MODE, operating
2255 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2256 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2257 canonicalization is possible. */
2260 simplify_context::simplify_associative_operation (rtx_code code,
2261 machine_mode mode,
2262 rtx op0, rtx op1)
2264 rtx tem;
2266 /* Linearize the operator to the left. */
2267 if (GET_CODE (op1) == code)
2269 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2270 if (GET_CODE (op0) == code)
2272 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2273 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2276 /* "a op (b op c)" becomes "(b op c) op a". */
2277 if (! swap_commutative_operands_p (op1, op0))
2278 return simplify_gen_binary (code, mode, op1, op0);
2280 std::swap (op0, op1);
2283 if (GET_CODE (op0) == code)
2285 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2286 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2288 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2289 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2292 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2293 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2294 if (tem != 0)
2295 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2297 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2298 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2299 if (tem != 0)
2300 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2303 return 0;
2306 /* Return a mask describing the COMPARISON. */
2307 static int
2308 comparison_to_mask (enum rtx_code comparison)
2310 switch (comparison)
2312 case LT:
2313 return 8;
2314 case GT:
2315 return 4;
2316 case EQ:
2317 return 2;
2318 case UNORDERED:
2319 return 1;
2321 case LTGT:
2322 return 12;
2323 case LE:
2324 return 10;
2325 case GE:
2326 return 6;
2327 case UNLT:
2328 return 9;
2329 case UNGT:
2330 return 5;
2331 case UNEQ:
2332 return 3;
2334 case ORDERED:
2335 return 14;
2336 case NE:
2337 return 13;
2338 case UNLE:
2339 return 11;
2340 case UNGE:
2341 return 7;
2343 default:
2344 gcc_unreachable ();
2348 /* Return a comparison corresponding to the MASK. */
2349 static enum rtx_code
2350 mask_to_comparison (int mask)
2352 switch (mask)
2354 case 8:
2355 return LT;
2356 case 4:
2357 return GT;
2358 case 2:
2359 return EQ;
2360 case 1:
2361 return UNORDERED;
2363 case 12:
2364 return LTGT;
2365 case 10:
2366 return LE;
2367 case 6:
2368 return GE;
2369 case 9:
2370 return UNLT;
2371 case 5:
2372 return UNGT;
2373 case 3:
2374 return UNEQ;
2376 case 14:
2377 return ORDERED;
2378 case 13:
2379 return NE;
2380 case 11:
2381 return UNLE;
2382 case 7:
2383 return UNGE;
2385 default:
2386 gcc_unreachable ();
2390 /* Return true if CODE is valid for comparisons of mode MODE, false
2391 otherwise.
2393 It is always safe to return false, even if the code was valid for the
2394 given mode as that will merely suppress optimizations. */
2396 static bool
2397 comparison_code_valid_for_mode (enum rtx_code code, enum machine_mode mode)
2399 switch (code)
2401 /* These are valid for integral, floating and vector modes. */
2402 case NE:
2403 case EQ:
2404 case GE:
2405 case GT:
2406 case LE:
2407 case LT:
2408 return (INTEGRAL_MODE_P (mode)
2409 || FLOAT_MODE_P (mode)
2410 || VECTOR_MODE_P (mode));
2412 /* These are valid for floating point modes. */
2413 case LTGT:
2414 case UNORDERED:
2415 case ORDERED:
2416 case UNEQ:
2417 case UNGE:
2418 case UNGT:
2419 case UNLE:
2420 case UNLT:
2421 return FLOAT_MODE_P (mode);
2423 /* These are filtered out in simplify_logical_operation, but
2424 we check for them too as a matter of safety. They are valid
2425 for integral and vector modes. */
2426 case GEU:
2427 case GTU:
2428 case LEU:
2429 case LTU:
2430 return INTEGRAL_MODE_P (mode) || VECTOR_MODE_P (mode);
2432 default:
2433 gcc_unreachable ();
2437 /* Canonicalize RES, a scalar const0_rtx/const_true_rtx to the right
2438 false/true value of comparison with MODE where comparison operands
2439 have CMP_MODE. */
2441 static rtx
2442 relational_result (machine_mode mode, machine_mode cmp_mode, rtx res)
2444 if (SCALAR_FLOAT_MODE_P (mode))
2446 if (res == const0_rtx)
2447 return CONST0_RTX (mode);
2448 #ifdef FLOAT_STORE_FLAG_VALUE
2449 REAL_VALUE_TYPE val = FLOAT_STORE_FLAG_VALUE (mode);
2450 return const_double_from_real_value (val, mode);
2451 #else
2452 return NULL_RTX;
2453 #endif
2455 if (VECTOR_MODE_P (mode))
2457 if (res == const0_rtx)
2458 return CONST0_RTX (mode);
2459 #ifdef VECTOR_STORE_FLAG_VALUE
2460 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
2461 if (val == NULL_RTX)
2462 return NULL_RTX;
2463 if (val == const1_rtx)
2464 return CONST1_RTX (mode);
2466 return gen_const_vec_duplicate (mode, val);
2467 #else
2468 return NULL_RTX;
2469 #endif
2471 /* For vector comparison with scalar int result, it is unknown
2472 if the target means here a comparison into an integral bitmask,
2473 or comparison where all comparisons true mean const_true_rtx
2474 whole result, or where any comparisons true mean const_true_rtx
2475 whole result. For const0_rtx all the cases are the same. */
2476 if (VECTOR_MODE_P (cmp_mode)
2477 && SCALAR_INT_MODE_P (mode)
2478 && res == const_true_rtx)
2479 return NULL_RTX;
2481 return res;
2484 /* Simplify a logical operation CODE with result mode MODE, operating on OP0
2485 and OP1, which should be both relational operations. Return 0 if no such
2486 simplification is possible. */
2488 simplify_context::simplify_logical_relational_operation (rtx_code code,
2489 machine_mode mode,
2490 rtx op0, rtx op1)
2492 /* We only handle IOR of two relational operations. */
2493 if (code != IOR)
2494 return 0;
2496 if (!(COMPARISON_P (op0) && COMPARISON_P (op1)))
2497 return 0;
2499 if (!(rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2500 && rtx_equal_p (XEXP (op0, 1), XEXP (op1, 1))))
2501 return 0;
2503 enum rtx_code code0 = GET_CODE (op0);
2504 enum rtx_code code1 = GET_CODE (op1);
2506 /* We don't handle unsigned comparisons currently. */
2507 if (code0 == LTU || code0 == GTU || code0 == LEU || code0 == GEU)
2508 return 0;
2509 if (code1 == LTU || code1 == GTU || code1 == LEU || code1 == GEU)
2510 return 0;
2512 int mask0 = comparison_to_mask (code0);
2513 int mask1 = comparison_to_mask (code1);
2515 int mask = mask0 | mask1;
2517 if (mask == 15)
2518 return relational_result (mode, GET_MODE (op0), const_true_rtx);
2520 code = mask_to_comparison (mask);
2522 /* Many comparison codes are only valid for certain mode classes. */
2523 if (!comparison_code_valid_for_mode (code, mode))
2524 return 0;
2526 op0 = XEXP (op1, 0);
2527 op1 = XEXP (op1, 1);
2529 return simplify_gen_relational (code, mode, VOIDmode, op0, op1);
2532 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2533 and OP1. Return 0 if no simplification is possible.
2535 Don't use this for relational operations such as EQ or LT.
2536 Use simplify_relational_operation instead. */
2538 simplify_context::simplify_binary_operation (rtx_code code, machine_mode mode,
2539 rtx op0, rtx op1)
2541 rtx trueop0, trueop1;
2542 rtx tem;
2544 /* Relational operations don't work here. We must know the mode
2545 of the operands in order to do the comparison correctly.
2546 Assuming a full word can give incorrect results.
2547 Consider comparing 128 with -128 in QImode. */
2548 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2549 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2551 /* Make sure the constant is second. */
2552 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2553 && swap_commutative_operands_p (op0, op1))
2554 std::swap (op0, op1);
2556 trueop0 = avoid_constant_pool_reference (op0);
2557 trueop1 = avoid_constant_pool_reference (op1);
2559 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2560 if (tem)
2561 return tem;
2562 tem = simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2564 if (tem)
2565 return tem;
2567 /* If the above steps did not result in a simplification and op0 or op1
2568 were constant pool references, use the referenced constants directly. */
2569 if (trueop0 != op0 || trueop1 != op1)
2570 return simplify_gen_binary (code, mode, trueop0, trueop1);
2572 return NULL_RTX;
2575 /* Subroutine of simplify_binary_operation_1 that looks for cases in
2576 which OP0 and OP1 are both vector series or vector duplicates
2577 (which are really just series with a step of 0). If so, try to
2578 form a new series by applying CODE to the bases and to the steps.
2579 Return null if no simplification is possible.
2581 MODE is the mode of the operation and is known to be a vector
2582 integer mode. */
2585 simplify_context::simplify_binary_operation_series (rtx_code code,
2586 machine_mode mode,
2587 rtx op0, rtx op1)
2589 rtx base0, step0;
2590 if (vec_duplicate_p (op0, &base0))
2591 step0 = const0_rtx;
2592 else if (!vec_series_p (op0, &base0, &step0))
2593 return NULL_RTX;
2595 rtx base1, step1;
2596 if (vec_duplicate_p (op1, &base1))
2597 step1 = const0_rtx;
2598 else if (!vec_series_p (op1, &base1, &step1))
2599 return NULL_RTX;
2601 /* Only create a new series if we can simplify both parts. In other
2602 cases this isn't really a simplification, and it's not necessarily
2603 a win to replace a vector operation with a scalar operation. */
2604 scalar_mode inner_mode = GET_MODE_INNER (mode);
2605 rtx new_base = simplify_binary_operation (code, inner_mode, base0, base1);
2606 if (!new_base)
2607 return NULL_RTX;
2609 rtx new_step = simplify_binary_operation (code, inner_mode, step0, step1);
2610 if (!new_step)
2611 return NULL_RTX;
2613 return gen_vec_series (mode, new_base, new_step);
2616 /* Subroutine of simplify_binary_operation_1. Un-distribute a binary
2617 operation CODE with result mode MODE, operating on OP0 and OP1.
2618 e.g. simplify (xor (and A C) (and (B C)) to (and (xor (A B) C).
2619 Returns NULL_RTX if no simplification is possible. */
2622 simplify_context::simplify_distributive_operation (rtx_code code,
2623 machine_mode mode,
2624 rtx op0, rtx op1)
2626 enum rtx_code op = GET_CODE (op0);
2627 gcc_assert (GET_CODE (op1) == op);
2629 if (rtx_equal_p (XEXP (op0, 1), XEXP (op1, 1))
2630 && ! side_effects_p (XEXP (op0, 1)))
2631 return simplify_gen_binary (op, mode,
2632 simplify_gen_binary (code, mode,
2633 XEXP (op0, 0),
2634 XEXP (op1, 0)),
2635 XEXP (op0, 1));
2637 if (GET_RTX_CLASS (op) == RTX_COMM_ARITH)
2639 if (rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2640 && ! side_effects_p (XEXP (op0, 0)))
2641 return simplify_gen_binary (op, mode,
2642 simplify_gen_binary (code, mode,
2643 XEXP (op0, 1),
2644 XEXP (op1, 1)),
2645 XEXP (op0, 0));
2646 if (rtx_equal_p (XEXP (op0, 0), XEXP (op1, 1))
2647 && ! side_effects_p (XEXP (op0, 0)))
2648 return simplify_gen_binary (op, mode,
2649 simplify_gen_binary (code, mode,
2650 XEXP (op0, 1),
2651 XEXP (op1, 0)),
2652 XEXP (op0, 0));
2653 if (rtx_equal_p (XEXP (op0, 1), XEXP (op1, 0))
2654 && ! side_effects_p (XEXP (op0, 1)))
2655 return simplify_gen_binary (op, mode,
2656 simplify_gen_binary (code, mode,
2657 XEXP (op0, 0),
2658 XEXP (op1, 1)),
2659 XEXP (op0, 1));
2662 return NULL_RTX;
2665 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2666 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2667 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2668 actual constants. */
2671 simplify_context::simplify_binary_operation_1 (rtx_code code,
2672 machine_mode mode,
2673 rtx op0, rtx op1,
2674 rtx trueop0, rtx trueop1)
2676 rtx tem, reversed, opleft, opright, elt0, elt1;
2677 HOST_WIDE_INT val;
2678 scalar_int_mode int_mode, inner_mode;
2679 poly_int64 offset;
2681 /* Even if we can't compute a constant result,
2682 there are some cases worth simplifying. */
2684 switch (code)
2686 case PLUS:
2687 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2688 when x is NaN, infinite, or finite and nonzero. They aren't
2689 when x is -0 and the rounding mode is not towards -infinity,
2690 since (-0) + 0 is then 0. */
2691 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2692 return op0;
2694 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2695 transformations are safe even for IEEE. */
2696 if (GET_CODE (op0) == NEG)
2697 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2698 else if (GET_CODE (op1) == NEG)
2699 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2701 /* (~a) + 1 -> -a */
2702 if (INTEGRAL_MODE_P (mode)
2703 && GET_CODE (op0) == NOT
2704 && trueop1 == const1_rtx)
2705 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2707 /* Handle both-operands-constant cases. We can only add
2708 CONST_INTs to constants since the sum of relocatable symbols
2709 can't be handled by most assemblers. Don't add CONST_INT
2710 to CONST_INT since overflow won't be computed properly if wider
2711 than HOST_BITS_PER_WIDE_INT. */
2713 if ((GET_CODE (op0) == CONST
2714 || GET_CODE (op0) == SYMBOL_REF
2715 || GET_CODE (op0) == LABEL_REF)
2716 && poly_int_rtx_p (op1, &offset))
2717 return plus_constant (mode, op0, offset);
2718 else if ((GET_CODE (op1) == CONST
2719 || GET_CODE (op1) == SYMBOL_REF
2720 || GET_CODE (op1) == LABEL_REF)
2721 && poly_int_rtx_p (op0, &offset))
2722 return plus_constant (mode, op1, offset);
2724 /* See if this is something like X * C - X or vice versa or
2725 if the multiplication is written as a shift. If so, we can
2726 distribute and make a new multiply, shift, or maybe just
2727 have X (if C is 2 in the example above). But don't make
2728 something more expensive than we had before. */
2730 if (is_a <scalar_int_mode> (mode, &int_mode))
2732 rtx lhs = op0, rhs = op1;
2734 wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2735 wide_int coeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2737 if (GET_CODE (lhs) == NEG)
2739 coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2740 lhs = XEXP (lhs, 0);
2742 else if (GET_CODE (lhs) == MULT
2743 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2745 coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2746 lhs = XEXP (lhs, 0);
2748 else if (GET_CODE (lhs) == ASHIFT
2749 && CONST_INT_P (XEXP (lhs, 1))
2750 && INTVAL (XEXP (lhs, 1)) >= 0
2751 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2753 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2754 GET_MODE_PRECISION (int_mode));
2755 lhs = XEXP (lhs, 0);
2758 if (GET_CODE (rhs) == NEG)
2760 coeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2761 rhs = XEXP (rhs, 0);
2763 else if (GET_CODE (rhs) == MULT
2764 && CONST_INT_P (XEXP (rhs, 1)))
2766 coeff1 = rtx_mode_t (XEXP (rhs, 1), int_mode);
2767 rhs = XEXP (rhs, 0);
2769 else if (GET_CODE (rhs) == ASHIFT
2770 && CONST_INT_P (XEXP (rhs, 1))
2771 && INTVAL (XEXP (rhs, 1)) >= 0
2772 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
2774 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2775 GET_MODE_PRECISION (int_mode));
2776 rhs = XEXP (rhs, 0);
2779 if (rtx_equal_p (lhs, rhs))
2781 rtx orig = gen_rtx_PLUS (int_mode, op0, op1);
2782 rtx coeff;
2783 bool speed = optimize_function_for_speed_p (cfun);
2785 coeff = immed_wide_int_const (coeff0 + coeff1, int_mode);
2787 tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
2788 return (set_src_cost (tem, int_mode, speed)
2789 <= set_src_cost (orig, int_mode, speed) ? tem : 0);
2792 /* Optimize (X - 1) * Y + Y to X * Y. */
2793 lhs = op0;
2794 rhs = op1;
2795 if (GET_CODE (op0) == MULT)
2797 if (((GET_CODE (XEXP (op0, 0)) == PLUS
2798 && XEXP (XEXP (op0, 0), 1) == constm1_rtx)
2799 || (GET_CODE (XEXP (op0, 0)) == MINUS
2800 && XEXP (XEXP (op0, 0), 1) == const1_rtx))
2801 && rtx_equal_p (XEXP (op0, 1), op1))
2802 lhs = XEXP (XEXP (op0, 0), 0);
2803 else if (((GET_CODE (XEXP (op0, 1)) == PLUS
2804 && XEXP (XEXP (op0, 1), 1) == constm1_rtx)
2805 || (GET_CODE (XEXP (op0, 1)) == MINUS
2806 && XEXP (XEXP (op0, 1), 1) == const1_rtx))
2807 && rtx_equal_p (XEXP (op0, 0), op1))
2808 lhs = XEXP (XEXP (op0, 1), 0);
2810 else if (GET_CODE (op1) == MULT)
2812 if (((GET_CODE (XEXP (op1, 0)) == PLUS
2813 && XEXP (XEXP (op1, 0), 1) == constm1_rtx)
2814 || (GET_CODE (XEXP (op1, 0)) == MINUS
2815 && XEXP (XEXP (op1, 0), 1) == const1_rtx))
2816 && rtx_equal_p (XEXP (op1, 1), op0))
2817 rhs = XEXP (XEXP (op1, 0), 0);
2818 else if (((GET_CODE (XEXP (op1, 1)) == PLUS
2819 && XEXP (XEXP (op1, 1), 1) == constm1_rtx)
2820 || (GET_CODE (XEXP (op1, 1)) == MINUS
2821 && XEXP (XEXP (op1, 1), 1) == const1_rtx))
2822 && rtx_equal_p (XEXP (op1, 0), op0))
2823 rhs = XEXP (XEXP (op1, 1), 0);
2825 if (lhs != op0 || rhs != op1)
2826 return simplify_gen_binary (MULT, int_mode, lhs, rhs);
2829 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2830 if (CONST_SCALAR_INT_P (op1)
2831 && GET_CODE (op0) == XOR
2832 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2833 && mode_signbit_p (mode, op1))
2834 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2835 simplify_gen_binary (XOR, mode, op1,
2836 XEXP (op0, 1)));
2838 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2839 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2840 && GET_CODE (op0) == MULT
2841 && GET_CODE (XEXP (op0, 0)) == NEG)
2843 rtx in1, in2;
2845 in1 = XEXP (XEXP (op0, 0), 0);
2846 in2 = XEXP (op0, 1);
2847 return simplify_gen_binary (MINUS, mode, op1,
2848 simplify_gen_binary (MULT, mode,
2849 in1, in2));
2852 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2853 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2854 is 1. */
2855 if (COMPARISON_P (op0)
2856 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2857 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2858 && (reversed = reversed_comparison (op0, mode)))
2859 return
2860 simplify_gen_unary (NEG, mode, reversed, mode);
2862 /* If one of the operands is a PLUS or a MINUS, see if we can
2863 simplify this by the associative law.
2864 Don't use the associative law for floating point.
2865 The inaccuracy makes it nonassociative,
2866 and subtle programs can break if operations are associated. */
2868 if (INTEGRAL_MODE_P (mode)
2869 && (plus_minus_operand_p (op0)
2870 || plus_minus_operand_p (op1))
2871 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2872 return tem;
2874 /* Reassociate floating point addition only when the user
2875 specifies associative math operations. */
2876 if (FLOAT_MODE_P (mode)
2877 && flag_associative_math)
2879 tem = simplify_associative_operation (code, mode, op0, op1);
2880 if (tem)
2881 return tem;
2884 /* Handle vector series. */
2885 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
2887 tem = simplify_binary_operation_series (code, mode, op0, op1);
2888 if (tem)
2889 return tem;
2891 break;
2893 case COMPARE:
2894 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2895 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2896 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2897 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2899 rtx xop00 = XEXP (op0, 0);
2900 rtx xop10 = XEXP (op1, 0);
2902 if (REG_P (xop00) && REG_P (xop10)
2903 && REGNO (xop00) == REGNO (xop10)
2904 && GET_MODE (xop00) == mode
2905 && GET_MODE (xop10) == mode
2906 && GET_MODE_CLASS (mode) == MODE_CC)
2907 return xop00;
2909 break;
2911 case MINUS:
2912 /* We can't assume x-x is 0 even with non-IEEE floating point,
2913 but since it is zero except in very strange circumstances, we
2914 will treat it as zero with -ffinite-math-only. */
2915 if (rtx_equal_p (trueop0, trueop1)
2916 && ! side_effects_p (op0)
2917 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2918 return CONST0_RTX (mode);
2920 /* Change subtraction from zero into negation. (0 - x) is the
2921 same as -x when x is NaN, infinite, or finite and nonzero.
2922 But if the mode has signed zeros, and does not round towards
2923 -infinity, then 0 - 0 is 0, not -0. */
2924 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2925 return simplify_gen_unary (NEG, mode, op1, mode);
2927 /* (-1 - a) is ~a, unless the expression contains symbolic
2928 constants, in which case not retaining additions and
2929 subtractions could cause invalid assembly to be produced. */
2930 if (trueop0 == constm1_rtx
2931 && !contains_symbolic_reference_p (op1))
2932 return simplify_gen_unary (NOT, mode, op1, mode);
2934 /* Subtracting 0 has no effect unless the mode has signalling NaNs,
2935 or has signed zeros and supports rounding towards -infinity.
2936 In such a case, 0 - 0 is -0. */
2937 if (!(HONOR_SIGNED_ZEROS (mode)
2938 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2939 && !HONOR_SNANS (mode)
2940 && trueop1 == CONST0_RTX (mode))
2941 return op0;
2943 /* See if this is something like X * C - X or vice versa or
2944 if the multiplication is written as a shift. If so, we can
2945 distribute and make a new multiply, shift, or maybe just
2946 have X (if C is 2 in the example above). But don't make
2947 something more expensive than we had before. */
2949 if (is_a <scalar_int_mode> (mode, &int_mode))
2951 rtx lhs = op0, rhs = op1;
2953 wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2954 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2956 if (GET_CODE (lhs) == NEG)
2958 coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2959 lhs = XEXP (lhs, 0);
2961 else if (GET_CODE (lhs) == MULT
2962 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2964 coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2965 lhs = XEXP (lhs, 0);
2967 else if (GET_CODE (lhs) == ASHIFT
2968 && CONST_INT_P (XEXP (lhs, 1))
2969 && INTVAL (XEXP (lhs, 1)) >= 0
2970 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2972 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2973 GET_MODE_PRECISION (int_mode));
2974 lhs = XEXP (lhs, 0);
2977 if (GET_CODE (rhs) == NEG)
2979 negcoeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2980 rhs = XEXP (rhs, 0);
2982 else if (GET_CODE (rhs) == MULT
2983 && CONST_INT_P (XEXP (rhs, 1)))
2985 negcoeff1 = wi::neg (rtx_mode_t (XEXP (rhs, 1), int_mode));
2986 rhs = XEXP (rhs, 0);
2988 else if (GET_CODE (rhs) == ASHIFT
2989 && CONST_INT_P (XEXP (rhs, 1))
2990 && INTVAL (XEXP (rhs, 1)) >= 0
2991 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
2993 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2994 GET_MODE_PRECISION (int_mode));
2995 negcoeff1 = -negcoeff1;
2996 rhs = XEXP (rhs, 0);
2999 if (rtx_equal_p (lhs, rhs))
3001 rtx orig = gen_rtx_MINUS (int_mode, op0, op1);
3002 rtx coeff;
3003 bool speed = optimize_function_for_speed_p (cfun);
3005 coeff = immed_wide_int_const (coeff0 + negcoeff1, int_mode);
3007 tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
3008 return (set_src_cost (tem, int_mode, speed)
3009 <= set_src_cost (orig, int_mode, speed) ? tem : 0);
3012 /* Optimize (X + 1) * Y - Y to X * Y. */
3013 lhs = op0;
3014 if (GET_CODE (op0) == MULT)
3016 if (((GET_CODE (XEXP (op0, 0)) == PLUS
3017 && XEXP (XEXP (op0, 0), 1) == const1_rtx)
3018 || (GET_CODE (XEXP (op0, 0)) == MINUS
3019 && XEXP (XEXP (op0, 0), 1) == constm1_rtx))
3020 && rtx_equal_p (XEXP (op0, 1), op1))
3021 lhs = XEXP (XEXP (op0, 0), 0);
3022 else if (((GET_CODE (XEXP (op0, 1)) == PLUS
3023 && XEXP (XEXP (op0, 1), 1) == const1_rtx)
3024 || (GET_CODE (XEXP (op0, 1)) == MINUS
3025 && XEXP (XEXP (op0, 1), 1) == constm1_rtx))
3026 && rtx_equal_p (XEXP (op0, 0), op1))
3027 lhs = XEXP (XEXP (op0, 1), 0);
3029 if (lhs != op0)
3030 return simplify_gen_binary (MULT, int_mode, lhs, op1);
3033 /* (a - (-b)) -> (a + b). True even for IEEE. */
3034 if (GET_CODE (op1) == NEG)
3035 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
3037 /* (-x - c) may be simplified as (-c - x). */
3038 if (GET_CODE (op0) == NEG
3039 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
3041 tem = simplify_unary_operation (NEG, mode, op1, mode);
3042 if (tem)
3043 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
3046 if ((GET_CODE (op0) == CONST
3047 || GET_CODE (op0) == SYMBOL_REF
3048 || GET_CODE (op0) == LABEL_REF)
3049 && poly_int_rtx_p (op1, &offset))
3050 return plus_constant (mode, op0, trunc_int_for_mode (-offset, mode));
3052 /* Don't let a relocatable value get a negative coeff. */
3053 if (poly_int_rtx_p (op1) && GET_MODE (op0) != VOIDmode)
3054 return simplify_gen_binary (PLUS, mode,
3055 op0,
3056 neg_poly_int_rtx (mode, op1));
3058 /* (x - (x & y)) -> (x & ~y) */
3059 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
3061 if (rtx_equal_p (op0, XEXP (op1, 0)))
3063 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
3064 GET_MODE (XEXP (op1, 1)));
3065 return simplify_gen_binary (AND, mode, op0, tem);
3067 if (rtx_equal_p (op0, XEXP (op1, 1)))
3069 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
3070 GET_MODE (XEXP (op1, 0)));
3071 return simplify_gen_binary (AND, mode, op0, tem);
3075 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
3076 by reversing the comparison code if valid. */
3077 if (STORE_FLAG_VALUE == 1
3078 && trueop0 == const1_rtx
3079 && COMPARISON_P (op1)
3080 && (reversed = reversed_comparison (op1, mode)))
3081 return reversed;
3083 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
3084 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
3085 && GET_CODE (op1) == MULT
3086 && GET_CODE (XEXP (op1, 0)) == NEG)
3088 rtx in1, in2;
3090 in1 = XEXP (XEXP (op1, 0), 0);
3091 in2 = XEXP (op1, 1);
3092 return simplify_gen_binary (PLUS, mode,
3093 simplify_gen_binary (MULT, mode,
3094 in1, in2),
3095 op0);
3098 /* Canonicalize (minus (neg A) (mult B C)) to
3099 (minus (mult (neg B) C) A). */
3100 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
3101 && GET_CODE (op1) == MULT
3102 && GET_CODE (op0) == NEG)
3104 rtx in1, in2;
3106 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
3107 in2 = XEXP (op1, 1);
3108 return simplify_gen_binary (MINUS, mode,
3109 simplify_gen_binary (MULT, mode,
3110 in1, in2),
3111 XEXP (op0, 0));
3114 /* If one of the operands is a PLUS or a MINUS, see if we can
3115 simplify this by the associative law. This will, for example,
3116 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
3117 Don't use the associative law for floating point.
3118 The inaccuracy makes it nonassociative,
3119 and subtle programs can break if operations are associated. */
3121 if (INTEGRAL_MODE_P (mode)
3122 && (plus_minus_operand_p (op0)
3123 || plus_minus_operand_p (op1))
3124 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
3125 return tem;
3127 /* Handle vector series. */
3128 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
3130 tem = simplify_binary_operation_series (code, mode, op0, op1);
3131 if (tem)
3132 return tem;
3134 break;
3136 case MULT:
3137 if (trueop1 == constm1_rtx)
3138 return simplify_gen_unary (NEG, mode, op0, mode);
3140 if (GET_CODE (op0) == NEG)
3142 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
3143 /* If op1 is a MULT as well and simplify_unary_operation
3144 just moved the NEG to the second operand, simplify_gen_binary
3145 below could through simplify_associative_operation move
3146 the NEG around again and recurse endlessly. */
3147 if (temp
3148 && GET_CODE (op1) == MULT
3149 && GET_CODE (temp) == MULT
3150 && XEXP (op1, 0) == XEXP (temp, 0)
3151 && GET_CODE (XEXP (temp, 1)) == NEG
3152 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
3153 temp = NULL_RTX;
3154 if (temp)
3155 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
3157 if (GET_CODE (op1) == NEG)
3159 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
3160 /* If op0 is a MULT as well and simplify_unary_operation
3161 just moved the NEG to the second operand, simplify_gen_binary
3162 below could through simplify_associative_operation move
3163 the NEG around again and recurse endlessly. */
3164 if (temp
3165 && GET_CODE (op0) == MULT
3166 && GET_CODE (temp) == MULT
3167 && XEXP (op0, 0) == XEXP (temp, 0)
3168 && GET_CODE (XEXP (temp, 1)) == NEG
3169 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
3170 temp = NULL_RTX;
3171 if (temp)
3172 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
3175 /* Maybe simplify x * 0 to 0. The reduction is not valid if
3176 x is NaN, since x * 0 is then also NaN. Nor is it valid
3177 when the mode has signed zeros, since multiplying a negative
3178 number by 0 will give -0, not 0. */
3179 if (!HONOR_NANS (mode)
3180 && !HONOR_SIGNED_ZEROS (mode)
3181 && trueop1 == CONST0_RTX (mode)
3182 && ! side_effects_p (op0))
3183 return op1;
3185 /* In IEEE floating point, x*1 is not equivalent to x for
3186 signalling NaNs. */
3187 if (!HONOR_SNANS (mode)
3188 && trueop1 == CONST1_RTX (mode))
3189 return op0;
3191 /* Convert multiply by constant power of two into shift. */
3192 if (mem_depth == 0 && CONST_SCALAR_INT_P (trueop1))
3194 val = wi::exact_log2 (rtx_mode_t (trueop1, mode));
3195 if (val >= 0)
3196 return simplify_gen_binary (ASHIFT, mode, op0,
3197 gen_int_shift_amount (mode, val));
3200 /* x*2 is x+x and x*(-1) is -x */
3201 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3202 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
3203 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
3204 && GET_MODE (op0) == mode)
3206 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
3208 if (real_equal (d1, &dconst2))
3209 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
3211 if (!HONOR_SNANS (mode)
3212 && real_equal (d1, &dconstm1))
3213 return simplify_gen_unary (NEG, mode, op0, mode);
3216 /* Optimize -x * -x as x * x. */
3217 if (FLOAT_MODE_P (mode)
3218 && GET_CODE (op0) == NEG
3219 && GET_CODE (op1) == NEG
3220 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
3221 && !side_effects_p (XEXP (op0, 0)))
3222 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
3224 /* Likewise, optimize abs(x) * abs(x) as x * x. */
3225 if (SCALAR_FLOAT_MODE_P (mode)
3226 && GET_CODE (op0) == ABS
3227 && GET_CODE (op1) == ABS
3228 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
3229 && !side_effects_p (XEXP (op0, 0)))
3230 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
3232 /* Reassociate multiplication, but for floating point MULTs
3233 only when the user specifies unsafe math optimizations. */
3234 if (! FLOAT_MODE_P (mode)
3235 || flag_unsafe_math_optimizations)
3237 tem = simplify_associative_operation (code, mode, op0, op1);
3238 if (tem)
3239 return tem;
3241 break;
3243 case IOR:
3244 if (trueop1 == CONST0_RTX (mode))
3245 return op0;
3246 if (INTEGRAL_MODE_P (mode)
3247 && trueop1 == CONSTM1_RTX (mode)
3248 && !side_effects_p (op0))
3249 return op1;
3250 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3251 return op0;
3252 /* A | (~A) -> -1 */
3253 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3254 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3255 && ! side_effects_p (op0)
3256 && SCALAR_INT_MODE_P (mode))
3257 return constm1_rtx;
3259 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
3260 if (CONST_INT_P (op1)
3261 && HWI_COMPUTABLE_MODE_P (mode)
3262 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
3263 && !side_effects_p (op0))
3264 return op1;
3266 /* Canonicalize (X & C1) | C2. */
3267 if (GET_CODE (op0) == AND
3268 && CONST_INT_P (trueop1)
3269 && CONST_INT_P (XEXP (op0, 1)))
3271 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
3272 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
3273 HOST_WIDE_INT c2 = INTVAL (trueop1);
3275 /* If (C1&C2) == C1, then (X&C1)|C2 becomes C2. */
3276 if ((c1 & c2) == c1
3277 && !side_effects_p (XEXP (op0, 0)))
3278 return trueop1;
3280 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
3281 if (((c1|c2) & mask) == mask)
3282 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
3285 /* Convert (A & B) | A to A. */
3286 if (GET_CODE (op0) == AND
3287 && (rtx_equal_p (XEXP (op0, 0), op1)
3288 || rtx_equal_p (XEXP (op0, 1), op1))
3289 && ! side_effects_p (XEXP (op0, 0))
3290 && ! side_effects_p (XEXP (op0, 1)))
3291 return op1;
3293 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
3294 mode size to (rotate A CX). */
3296 if (GET_CODE (op1) == ASHIFT
3297 || GET_CODE (op1) == SUBREG)
3299 opleft = op1;
3300 opright = op0;
3302 else
3304 opright = op1;
3305 opleft = op0;
3308 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
3309 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
3310 && CONST_INT_P (XEXP (opleft, 1))
3311 && CONST_INT_P (XEXP (opright, 1))
3312 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
3313 == GET_MODE_UNIT_PRECISION (mode)))
3314 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
3316 /* Same, but for ashift that has been "simplified" to a wider mode
3317 by simplify_shift_const. */
3319 if (GET_CODE (opleft) == SUBREG
3320 && is_a <scalar_int_mode> (mode, &int_mode)
3321 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (opleft)),
3322 &inner_mode)
3323 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
3324 && GET_CODE (opright) == LSHIFTRT
3325 && GET_CODE (XEXP (opright, 0)) == SUBREG
3326 && known_eq (SUBREG_BYTE (opleft), SUBREG_BYTE (XEXP (opright, 0)))
3327 && GET_MODE_SIZE (int_mode) < GET_MODE_SIZE (inner_mode)
3328 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
3329 SUBREG_REG (XEXP (opright, 0)))
3330 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
3331 && CONST_INT_P (XEXP (opright, 1))
3332 && (INTVAL (XEXP (SUBREG_REG (opleft), 1))
3333 + INTVAL (XEXP (opright, 1))
3334 == GET_MODE_PRECISION (int_mode)))
3335 return gen_rtx_ROTATE (int_mode, XEXP (opright, 0),
3336 XEXP (SUBREG_REG (opleft), 1));
3338 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
3339 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
3340 the PLUS does not affect any of the bits in OP1: then we can do
3341 the IOR as a PLUS and we can associate. This is valid if OP1
3342 can be safely shifted left C bits. */
3343 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
3344 && GET_CODE (XEXP (op0, 0)) == PLUS
3345 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
3346 && CONST_INT_P (XEXP (op0, 1))
3347 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
3349 int count = INTVAL (XEXP (op0, 1));
3350 HOST_WIDE_INT mask = UINTVAL (trueop1) << count;
3352 if (mask >> count == INTVAL (trueop1)
3353 && trunc_int_for_mode (mask, mode) == mask
3354 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
3355 return simplify_gen_binary (ASHIFTRT, mode,
3356 plus_constant (mode, XEXP (op0, 0),
3357 mask),
3358 XEXP (op0, 1));
3361 /* The following happens with bitfield merging.
3362 (X & C) | ((X | Y) & ~C) -> X | (Y & ~C) */
3363 if (GET_CODE (op0) == AND
3364 && GET_CODE (op1) == AND
3365 && CONST_INT_P (XEXP (op0, 1))
3366 && CONST_INT_P (XEXP (op1, 1))
3367 && (INTVAL (XEXP (op0, 1))
3368 == ~INTVAL (XEXP (op1, 1))))
3370 /* The IOR may be on both sides. */
3371 rtx top0 = NULL_RTX, top1 = NULL_RTX;
3372 if (GET_CODE (XEXP (op1, 0)) == IOR)
3373 top0 = op0, top1 = op1;
3374 else if (GET_CODE (XEXP (op0, 0)) == IOR)
3375 top0 = op1, top1 = op0;
3376 if (top0 && top1)
3378 /* X may be on either side of the inner IOR. */
3379 rtx tem = NULL_RTX;
3380 if (rtx_equal_p (XEXP (top0, 0),
3381 XEXP (XEXP (top1, 0), 0)))
3382 tem = XEXP (XEXP (top1, 0), 1);
3383 else if (rtx_equal_p (XEXP (top0, 0),
3384 XEXP (XEXP (top1, 0), 1)))
3385 tem = XEXP (XEXP (top1, 0), 0);
3386 if (tem)
3387 return simplify_gen_binary (IOR, mode, XEXP (top0, 0),
3388 simplify_gen_binary
3389 (AND, mode, tem, XEXP (top1, 1)));
3393 /* Convert (ior (and A C) (and B C)) into (and (ior A B) C). */
3394 if (GET_CODE (op0) == GET_CODE (op1)
3395 && (GET_CODE (op0) == AND
3396 || GET_CODE (op0) == IOR
3397 || GET_CODE (op0) == LSHIFTRT
3398 || GET_CODE (op0) == ASHIFTRT
3399 || GET_CODE (op0) == ASHIFT
3400 || GET_CODE (op0) == ROTATE
3401 || GET_CODE (op0) == ROTATERT))
3403 tem = simplify_distributive_operation (code, mode, op0, op1);
3404 if (tem)
3405 return tem;
3408 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3409 if (tem)
3410 return tem;
3412 tem = simplify_associative_operation (code, mode, op0, op1);
3413 if (tem)
3414 return tem;
3416 tem = simplify_logical_relational_operation (code, mode, op0, op1);
3417 if (tem)
3418 return tem;
3419 break;
3421 case XOR:
3422 if (trueop1 == CONST0_RTX (mode))
3423 return op0;
3424 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
3425 return simplify_gen_unary (NOT, mode, op0, mode);
3426 if (rtx_equal_p (trueop0, trueop1)
3427 && ! side_effects_p (op0)
3428 && GET_MODE_CLASS (mode) != MODE_CC)
3429 return CONST0_RTX (mode);
3431 /* Canonicalize XOR of the most significant bit to PLUS. */
3432 if (CONST_SCALAR_INT_P (op1)
3433 && mode_signbit_p (mode, op1))
3434 return simplify_gen_binary (PLUS, mode, op0, op1);
3435 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
3436 if (CONST_SCALAR_INT_P (op1)
3437 && GET_CODE (op0) == PLUS
3438 && CONST_SCALAR_INT_P (XEXP (op0, 1))
3439 && mode_signbit_p (mode, XEXP (op0, 1)))
3440 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
3441 simplify_gen_binary (XOR, mode, op1,
3442 XEXP (op0, 1)));
3444 /* If we are XORing two things that have no bits in common,
3445 convert them into an IOR. This helps to detect rotation encoded
3446 using those methods and possibly other simplifications. */
3448 if (HWI_COMPUTABLE_MODE_P (mode)
3449 && (nonzero_bits (op0, mode)
3450 & nonzero_bits (op1, mode)) == 0)
3451 return (simplify_gen_binary (IOR, mode, op0, op1));
3453 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
3454 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
3455 (NOT y). */
3457 int num_negated = 0;
3459 if (GET_CODE (op0) == NOT)
3460 num_negated++, op0 = XEXP (op0, 0);
3461 if (GET_CODE (op1) == NOT)
3462 num_negated++, op1 = XEXP (op1, 0);
3464 if (num_negated == 2)
3465 return simplify_gen_binary (XOR, mode, op0, op1);
3466 else if (num_negated == 1)
3467 return simplify_gen_unary (NOT, mode,
3468 simplify_gen_binary (XOR, mode, op0, op1),
3469 mode);
3472 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
3473 correspond to a machine insn or result in further simplifications
3474 if B is a constant. */
3476 if (GET_CODE (op0) == AND
3477 && rtx_equal_p (XEXP (op0, 1), op1)
3478 && ! side_effects_p (op1))
3479 return simplify_gen_binary (AND, mode,
3480 simplify_gen_unary (NOT, mode,
3481 XEXP (op0, 0), mode),
3482 op1);
3484 else if (GET_CODE (op0) == AND
3485 && rtx_equal_p (XEXP (op0, 0), op1)
3486 && ! side_effects_p (op1))
3487 return simplify_gen_binary (AND, mode,
3488 simplify_gen_unary (NOT, mode,
3489 XEXP (op0, 1), mode),
3490 op1);
3492 /* Given (xor (ior (xor A B) C) D), where B, C and D are
3493 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
3494 out bits inverted twice and not set by C. Similarly, given
3495 (xor (and (xor A B) C) D), simplify without inverting C in
3496 the xor operand: (xor (and A C) (B&C)^D).
3498 else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
3499 && GET_CODE (XEXP (op0, 0)) == XOR
3500 && CONST_INT_P (op1)
3501 && CONST_INT_P (XEXP (op0, 1))
3502 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
3504 enum rtx_code op = GET_CODE (op0);
3505 rtx a = XEXP (XEXP (op0, 0), 0);
3506 rtx b = XEXP (XEXP (op0, 0), 1);
3507 rtx c = XEXP (op0, 1);
3508 rtx d = op1;
3509 HOST_WIDE_INT bval = INTVAL (b);
3510 HOST_WIDE_INT cval = INTVAL (c);
3511 HOST_WIDE_INT dval = INTVAL (d);
3512 HOST_WIDE_INT xcval;
3514 if (op == IOR)
3515 xcval = ~cval;
3516 else
3517 xcval = cval;
3519 return simplify_gen_binary (XOR, mode,
3520 simplify_gen_binary (op, mode, a, c),
3521 gen_int_mode ((bval & xcval) ^ dval,
3522 mode));
3525 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
3526 we can transform like this:
3527 (A&B)^C == ~(A&B)&C | ~C&(A&B)
3528 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
3529 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
3530 Attempt a few simplifications when B and C are both constants. */
3531 if (GET_CODE (op0) == AND
3532 && CONST_INT_P (op1)
3533 && CONST_INT_P (XEXP (op0, 1)))
3535 rtx a = XEXP (op0, 0);
3536 rtx b = XEXP (op0, 1);
3537 rtx c = op1;
3538 HOST_WIDE_INT bval = INTVAL (b);
3539 HOST_WIDE_INT cval = INTVAL (c);
3541 /* Instead of computing ~A&C, we compute its negated value,
3542 ~(A|~C). If it yields -1, ~A&C is zero, so we can
3543 optimize for sure. If it does not simplify, we still try
3544 to compute ~A&C below, but since that always allocates
3545 RTL, we don't try that before committing to returning a
3546 simplified expression. */
3547 rtx n_na_c = simplify_binary_operation (IOR, mode, a,
3548 GEN_INT (~cval));
3550 if ((~cval & bval) == 0)
3552 rtx na_c = NULL_RTX;
3553 if (n_na_c)
3554 na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
3555 else
3557 /* If ~A does not simplify, don't bother: we don't
3558 want to simplify 2 operations into 3, and if na_c
3559 were to simplify with na, n_na_c would have
3560 simplified as well. */
3561 rtx na = simplify_unary_operation (NOT, mode, a, mode);
3562 if (na)
3563 na_c = simplify_gen_binary (AND, mode, na, c);
3566 /* Try to simplify ~A&C | ~B&C. */
3567 if (na_c != NULL_RTX)
3568 return simplify_gen_binary (IOR, mode, na_c,
3569 gen_int_mode (~bval & cval, mode));
3571 else
3573 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
3574 if (n_na_c == CONSTM1_RTX (mode))
3576 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
3577 gen_int_mode (~cval & bval,
3578 mode));
3579 return simplify_gen_binary (IOR, mode, a_nc_b,
3580 gen_int_mode (~bval & cval,
3581 mode));
3586 /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
3587 do (ior (and A ~C) (and B C)) which is a machine instruction on some
3588 machines, and also has shorter instruction path length. */
3589 if (GET_CODE (op0) == AND
3590 && GET_CODE (XEXP (op0, 0)) == XOR
3591 && CONST_INT_P (XEXP (op0, 1))
3592 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), trueop1))
3594 rtx a = trueop1;
3595 rtx b = XEXP (XEXP (op0, 0), 1);
3596 rtx c = XEXP (op0, 1);
3597 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
3598 rtx a_nc = simplify_gen_binary (AND, mode, a, nc);
3599 rtx bc = simplify_gen_binary (AND, mode, b, c);
3600 return simplify_gen_binary (IOR, mode, a_nc, bc);
3602 /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C)) */
3603 else if (GET_CODE (op0) == AND
3604 && GET_CODE (XEXP (op0, 0)) == XOR
3605 && CONST_INT_P (XEXP (op0, 1))
3606 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), trueop1))
3608 rtx a = XEXP (XEXP (op0, 0), 0);
3609 rtx b = trueop1;
3610 rtx c = XEXP (op0, 1);
3611 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
3612 rtx b_nc = simplify_gen_binary (AND, mode, b, nc);
3613 rtx ac = simplify_gen_binary (AND, mode, a, c);
3614 return simplify_gen_binary (IOR, mode, ac, b_nc);
3617 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
3618 comparison if STORE_FLAG_VALUE is 1. */
3619 if (STORE_FLAG_VALUE == 1
3620 && trueop1 == const1_rtx
3621 && COMPARISON_P (op0)
3622 && (reversed = reversed_comparison (op0, mode)))
3623 return reversed;
3625 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
3626 is (lt foo (const_int 0)), so we can perform the above
3627 simplification if STORE_FLAG_VALUE is 1. */
3629 if (is_a <scalar_int_mode> (mode, &int_mode)
3630 && STORE_FLAG_VALUE == 1
3631 && trueop1 == const1_rtx
3632 && GET_CODE (op0) == LSHIFTRT
3633 && CONST_INT_P (XEXP (op0, 1))
3634 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (int_mode) - 1)
3635 return gen_rtx_GE (int_mode, XEXP (op0, 0), const0_rtx);
3637 /* (xor (comparison foo bar) (const_int sign-bit))
3638 when STORE_FLAG_VALUE is the sign bit. */
3639 if (is_a <scalar_int_mode> (mode, &int_mode)
3640 && val_signbit_p (int_mode, STORE_FLAG_VALUE)
3641 && trueop1 == const_true_rtx
3642 && COMPARISON_P (op0)
3643 && (reversed = reversed_comparison (op0, int_mode)))
3644 return reversed;
3646 /* Convert (xor (and A C) (and B C)) into (and (xor A B) C). */
3647 if (GET_CODE (op0) == GET_CODE (op1)
3648 && (GET_CODE (op0) == AND
3649 || GET_CODE (op0) == LSHIFTRT
3650 || GET_CODE (op0) == ASHIFTRT
3651 || GET_CODE (op0) == ASHIFT
3652 || GET_CODE (op0) == ROTATE
3653 || GET_CODE (op0) == ROTATERT))
3655 tem = simplify_distributive_operation (code, mode, op0, op1);
3656 if (tem)
3657 return tem;
3660 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3661 if (tem)
3662 return tem;
3664 tem = simplify_associative_operation (code, mode, op0, op1);
3665 if (tem)
3666 return tem;
3667 break;
3669 case AND:
3670 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3671 return trueop1;
3672 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
3673 return op0;
3674 if (HWI_COMPUTABLE_MODE_P (mode))
3676 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
3677 HOST_WIDE_INT nzop1;
3678 if (CONST_INT_P (trueop1))
3680 HOST_WIDE_INT val1 = INTVAL (trueop1);
3681 /* If we are turning off bits already known off in OP0, we need
3682 not do an AND. */
3683 if ((nzop0 & ~val1) == 0)
3684 return op0;
3686 nzop1 = nonzero_bits (trueop1, mode);
3687 /* If we are clearing all the nonzero bits, the result is zero. */
3688 if ((nzop1 & nzop0) == 0
3689 && !side_effects_p (op0) && !side_effects_p (op1))
3690 return CONST0_RTX (mode);
3692 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
3693 && GET_MODE_CLASS (mode) != MODE_CC)
3694 return op0;
3695 /* A & (~A) -> 0 */
3696 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3697 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3698 && ! side_effects_p (op0)
3699 && GET_MODE_CLASS (mode) != MODE_CC)
3700 return CONST0_RTX (mode);
3702 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3703 there are no nonzero bits of C outside of X's mode. */
3704 if ((GET_CODE (op0) == SIGN_EXTEND
3705 || GET_CODE (op0) == ZERO_EXTEND)
3706 && CONST_INT_P (trueop1)
3707 && HWI_COMPUTABLE_MODE_P (mode)
3708 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
3709 & UINTVAL (trueop1)) == 0)
3711 machine_mode imode = GET_MODE (XEXP (op0, 0));
3712 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
3713 gen_int_mode (INTVAL (trueop1),
3714 imode));
3715 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
3718 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3719 we might be able to further simplify the AND with X and potentially
3720 remove the truncation altogether. */
3721 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
3723 rtx x = XEXP (op0, 0);
3724 machine_mode xmode = GET_MODE (x);
3725 tem = simplify_gen_binary (AND, xmode, x,
3726 gen_int_mode (INTVAL (trueop1), xmode));
3727 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
3730 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3731 if (GET_CODE (op0) == IOR
3732 && CONST_INT_P (trueop1)
3733 && CONST_INT_P (XEXP (op0, 1)))
3735 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
3736 return simplify_gen_binary (IOR, mode,
3737 simplify_gen_binary (AND, mode,
3738 XEXP (op0, 0), op1),
3739 gen_int_mode (tmp, mode));
3742 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3743 insn (and may simplify more). */
3744 if (GET_CODE (op0) == XOR
3745 && rtx_equal_p (XEXP (op0, 0), op1)
3746 && ! side_effects_p (op1))
3747 return simplify_gen_binary (AND, mode,
3748 simplify_gen_unary (NOT, mode,
3749 XEXP (op0, 1), mode),
3750 op1);
3752 if (GET_CODE (op0) == XOR
3753 && rtx_equal_p (XEXP (op0, 1), op1)
3754 && ! side_effects_p (op1))
3755 return simplify_gen_binary (AND, mode,
3756 simplify_gen_unary (NOT, mode,
3757 XEXP (op0, 0), mode),
3758 op1);
3760 /* Similarly for (~(A ^ B)) & A. */
3761 if (GET_CODE (op0) == NOT
3762 && GET_CODE (XEXP (op0, 0)) == XOR
3763 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3764 && ! side_effects_p (op1))
3765 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3767 if (GET_CODE (op0) == NOT
3768 && GET_CODE (XEXP (op0, 0)) == XOR
3769 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3770 && ! side_effects_p (op1))
3771 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3773 /* Convert (A | B) & A to A. */
3774 if (GET_CODE (op0) == IOR
3775 && (rtx_equal_p (XEXP (op0, 0), op1)
3776 || rtx_equal_p (XEXP (op0, 1), op1))
3777 && ! side_effects_p (XEXP (op0, 0))
3778 && ! side_effects_p (XEXP (op0, 1)))
3779 return op1;
3781 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3782 ((A & N) + B) & M -> (A + B) & M
3783 Similarly if (N & M) == 0,
3784 ((A | N) + B) & M -> (A + B) & M
3785 and for - instead of + and/or ^ instead of |.
3786 Also, if (N & M) == 0, then
3787 (A +- N) & M -> A & M. */
3788 if (CONST_INT_P (trueop1)
3789 && HWI_COMPUTABLE_MODE_P (mode)
3790 && ~UINTVAL (trueop1)
3791 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3792 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3794 rtx pmop[2];
3795 int which;
3797 pmop[0] = XEXP (op0, 0);
3798 pmop[1] = XEXP (op0, 1);
3800 if (CONST_INT_P (pmop[1])
3801 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3802 return simplify_gen_binary (AND, mode, pmop[0], op1);
3804 for (which = 0; which < 2; which++)
3806 tem = pmop[which];
3807 switch (GET_CODE (tem))
3809 case AND:
3810 if (CONST_INT_P (XEXP (tem, 1))
3811 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3812 == UINTVAL (trueop1))
3813 pmop[which] = XEXP (tem, 0);
3814 break;
3815 case IOR:
3816 case XOR:
3817 if (CONST_INT_P (XEXP (tem, 1))
3818 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3819 pmop[which] = XEXP (tem, 0);
3820 break;
3821 default:
3822 break;
3826 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3828 tem = simplify_gen_binary (GET_CODE (op0), mode,
3829 pmop[0], pmop[1]);
3830 return simplify_gen_binary (code, mode, tem, op1);
3834 /* (and X (ior (not X) Y) -> (and X Y) */
3835 if (GET_CODE (op1) == IOR
3836 && GET_CODE (XEXP (op1, 0)) == NOT
3837 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
3838 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3840 /* (and (ior (not X) Y) X) -> (and X Y) */
3841 if (GET_CODE (op0) == IOR
3842 && GET_CODE (XEXP (op0, 0)) == NOT
3843 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
3844 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3846 /* (and X (ior Y (not X)) -> (and X Y) */
3847 if (GET_CODE (op1) == IOR
3848 && GET_CODE (XEXP (op1, 1)) == NOT
3849 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3850 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3852 /* (and (ior Y (not X)) X) -> (and X Y) */
3853 if (GET_CODE (op0) == IOR
3854 && GET_CODE (XEXP (op0, 1)) == NOT
3855 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3856 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3858 /* Convert (and (ior A C) (ior B C)) into (ior (and A B) C). */
3859 if (GET_CODE (op0) == GET_CODE (op1)
3860 && (GET_CODE (op0) == AND
3861 || GET_CODE (op0) == IOR
3862 || GET_CODE (op0) == LSHIFTRT
3863 || GET_CODE (op0) == ASHIFTRT
3864 || GET_CODE (op0) == ASHIFT
3865 || GET_CODE (op0) == ROTATE
3866 || GET_CODE (op0) == ROTATERT))
3868 tem = simplify_distributive_operation (code, mode, op0, op1);
3869 if (tem)
3870 return tem;
3873 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3874 if (tem)
3875 return tem;
3877 tem = simplify_associative_operation (code, mode, op0, op1);
3878 if (tem)
3879 return tem;
3880 break;
3882 case UDIV:
3883 /* 0/x is 0 (or x&0 if x has side-effects). */
3884 if (trueop0 == CONST0_RTX (mode)
3885 && !cfun->can_throw_non_call_exceptions)
3887 if (side_effects_p (op1))
3888 return simplify_gen_binary (AND, mode, op1, trueop0);
3889 return trueop0;
3891 /* x/1 is x. */
3892 if (trueop1 == CONST1_RTX (mode))
3894 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3895 if (tem)
3896 return tem;
3898 /* Convert divide by power of two into shift. */
3899 if (CONST_INT_P (trueop1)
3900 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3901 return simplify_gen_binary (LSHIFTRT, mode, op0,
3902 gen_int_shift_amount (mode, val));
3903 break;
3905 case DIV:
3906 /* Handle floating point and integers separately. */
3907 if (SCALAR_FLOAT_MODE_P (mode))
3909 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3910 safe for modes with NaNs, since 0.0 / 0.0 will then be
3911 NaN rather than 0.0. Nor is it safe for modes with signed
3912 zeros, since dividing 0 by a negative number gives -0.0 */
3913 if (trueop0 == CONST0_RTX (mode)
3914 && !HONOR_NANS (mode)
3915 && !HONOR_SIGNED_ZEROS (mode)
3916 && ! side_effects_p (op1))
3917 return op0;
3918 /* x/1.0 is x. */
3919 if (trueop1 == CONST1_RTX (mode)
3920 && !HONOR_SNANS (mode))
3921 return op0;
3923 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3924 && trueop1 != CONST0_RTX (mode))
3926 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
3928 /* x/-1.0 is -x. */
3929 if (real_equal (d1, &dconstm1)
3930 && !HONOR_SNANS (mode))
3931 return simplify_gen_unary (NEG, mode, op0, mode);
3933 /* Change FP division by a constant into multiplication.
3934 Only do this with -freciprocal-math. */
3935 if (flag_reciprocal_math
3936 && !real_equal (d1, &dconst0))
3938 REAL_VALUE_TYPE d;
3939 real_arithmetic (&d, RDIV_EXPR, &dconst1, d1);
3940 tem = const_double_from_real_value (d, mode);
3941 return simplify_gen_binary (MULT, mode, op0, tem);
3945 else if (SCALAR_INT_MODE_P (mode))
3947 /* 0/x is 0 (or x&0 if x has side-effects). */
3948 if (trueop0 == CONST0_RTX (mode)
3949 && !cfun->can_throw_non_call_exceptions)
3951 if (side_effects_p (op1))
3952 return simplify_gen_binary (AND, mode, op1, trueop0);
3953 return trueop0;
3955 /* x/1 is x. */
3956 if (trueop1 == CONST1_RTX (mode))
3958 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3959 if (tem)
3960 return tem;
3962 /* x/-1 is -x. */
3963 if (trueop1 == constm1_rtx)
3965 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3966 if (x)
3967 return simplify_gen_unary (NEG, mode, x, mode);
3970 break;
3972 case UMOD:
3973 /* 0%x is 0 (or x&0 if x has side-effects). */
3974 if (trueop0 == CONST0_RTX (mode))
3976 if (side_effects_p (op1))
3977 return simplify_gen_binary (AND, mode, op1, trueop0);
3978 return trueop0;
3980 /* x%1 is 0 (of x&0 if x has side-effects). */
3981 if (trueop1 == CONST1_RTX (mode))
3983 if (side_effects_p (op0))
3984 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3985 return CONST0_RTX (mode);
3987 /* Implement modulus by power of two as AND. */
3988 if (CONST_INT_P (trueop1)
3989 && exact_log2 (UINTVAL (trueop1)) > 0)
3990 return simplify_gen_binary (AND, mode, op0,
3991 gen_int_mode (UINTVAL (trueop1) - 1,
3992 mode));
3993 break;
3995 case MOD:
3996 /* 0%x is 0 (or x&0 if x has side-effects). */
3997 if (trueop0 == CONST0_RTX (mode))
3999 if (side_effects_p (op1))
4000 return simplify_gen_binary (AND, mode, op1, trueop0);
4001 return trueop0;
4003 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
4004 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
4006 if (side_effects_p (op0))
4007 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
4008 return CONST0_RTX (mode);
4010 break;
4012 case ROTATERT:
4013 case ROTATE:
4014 if (trueop1 == CONST0_RTX (mode))
4015 return op0;
4016 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
4017 prefer left rotation, if op1 is from bitsize / 2 + 1 to
4018 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
4019 amount instead. */
4020 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
4021 if (CONST_INT_P (trueop1)
4022 && IN_RANGE (INTVAL (trueop1),
4023 GET_MODE_UNIT_PRECISION (mode) / 2 + (code == ROTATE),
4024 GET_MODE_UNIT_PRECISION (mode) - 1))
4026 int new_amount = GET_MODE_UNIT_PRECISION (mode) - INTVAL (trueop1);
4027 rtx new_amount_rtx = gen_int_shift_amount (mode, new_amount);
4028 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
4029 mode, op0, new_amount_rtx);
4031 #endif
4032 /* FALLTHRU */
4033 case ASHIFTRT:
4034 if (trueop1 == CONST0_RTX (mode))
4035 return op0;
4036 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
4037 return op0;
4038 /* Rotating ~0 always results in ~0. */
4039 if (CONST_INT_P (trueop0)
4040 && HWI_COMPUTABLE_MODE_P (mode)
4041 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
4042 && ! side_effects_p (op1))
4043 return op0;
4045 canonicalize_shift:
4046 /* Given:
4047 scalar modes M1, M2
4048 scalar constants c1, c2
4049 size (M2) > size (M1)
4050 c1 == size (M2) - size (M1)
4051 optimize:
4052 ([a|l]shiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
4053 <low_part>)
4054 (const_int <c2>))
4056 (subreg:M1 ([a|l]shiftrt:M2 (reg:M2) (const_int <c1 + c2>))
4057 <low_part>). */
4058 if ((code == ASHIFTRT || code == LSHIFTRT)
4059 && is_a <scalar_int_mode> (mode, &int_mode)
4060 && SUBREG_P (op0)
4061 && CONST_INT_P (op1)
4062 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
4063 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op0)),
4064 &inner_mode)
4065 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
4066 && GET_MODE_BITSIZE (inner_mode) > GET_MODE_BITSIZE (int_mode)
4067 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
4068 == GET_MODE_BITSIZE (inner_mode) - GET_MODE_BITSIZE (int_mode))
4069 && subreg_lowpart_p (op0))
4071 rtx tmp = gen_int_shift_amount
4072 (inner_mode, INTVAL (XEXP (SUBREG_REG (op0), 1)) + INTVAL (op1));
4074 /* Combine would usually zero out the value when combining two
4075 local shifts and the range becomes larger or equal to the mode.
4076 However since we fold away one of the shifts here combine won't
4077 see it so we should immediately zero the result if it's out of
4078 range. */
4079 if (code == LSHIFTRT
4080 && INTVAL (tmp) >= GET_MODE_BITSIZE (inner_mode))
4081 tmp = const0_rtx;
4082 else
4083 tmp = simplify_gen_binary (code,
4084 inner_mode,
4085 XEXP (SUBREG_REG (op0), 0),
4086 tmp);
4088 return lowpart_subreg (int_mode, tmp, inner_mode);
4091 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
4093 val = INTVAL (op1) & (GET_MODE_UNIT_PRECISION (mode) - 1);
4094 if (val != INTVAL (op1))
4095 return simplify_gen_binary (code, mode, op0,
4096 gen_int_shift_amount (mode, val));
4098 break;
4100 case SS_ASHIFT:
4101 if (CONST_INT_P (trueop0)
4102 && HWI_COMPUTABLE_MODE_P (mode)
4103 && (UINTVAL (trueop0) == (GET_MODE_MASK (mode) >> 1)
4104 || mode_signbit_p (mode, trueop0))
4105 && ! side_effects_p (op1))
4106 return op0;
4107 goto simplify_ashift;
4109 case US_ASHIFT:
4110 if (CONST_INT_P (trueop0)
4111 && HWI_COMPUTABLE_MODE_P (mode)
4112 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
4113 && ! side_effects_p (op1))
4114 return op0;
4115 /* FALLTHRU */
4117 case ASHIFT:
4118 simplify_ashift:
4119 if (trueop1 == CONST0_RTX (mode))
4120 return op0;
4121 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
4122 return op0;
4123 if (mem_depth
4124 && code == ASHIFT
4125 && CONST_INT_P (trueop1)
4126 && is_a <scalar_int_mode> (mode, &int_mode)
4127 && IN_RANGE (UINTVAL (trueop1),
4128 1, GET_MODE_PRECISION (int_mode) - 1))
4130 auto c = (wi::one (GET_MODE_PRECISION (int_mode))
4131 << UINTVAL (trueop1));
4132 rtx new_op1 = immed_wide_int_const (c, int_mode);
4133 return simplify_gen_binary (MULT, int_mode, op0, new_op1);
4135 goto canonicalize_shift;
4137 case LSHIFTRT:
4138 if (trueop1 == CONST0_RTX (mode))
4139 return op0;
4140 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
4141 return op0;
4142 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
4143 if (GET_CODE (op0) == CLZ
4144 && is_a <scalar_int_mode> (GET_MODE (XEXP (op0, 0)), &inner_mode)
4145 && CONST_INT_P (trueop1)
4146 && STORE_FLAG_VALUE == 1
4147 && INTVAL (trueop1) < GET_MODE_UNIT_PRECISION (mode))
4149 unsigned HOST_WIDE_INT zero_val = 0;
4151 if (CLZ_DEFINED_VALUE_AT_ZERO (inner_mode, zero_val)
4152 && zero_val == GET_MODE_PRECISION (inner_mode)
4153 && INTVAL (trueop1) == exact_log2 (zero_val))
4154 return simplify_gen_relational (EQ, mode, inner_mode,
4155 XEXP (op0, 0), const0_rtx);
4157 goto canonicalize_shift;
4159 case SMIN:
4160 if (HWI_COMPUTABLE_MODE_P (mode)
4161 && mode_signbit_p (mode, trueop1)
4162 && ! side_effects_p (op0))
4163 return op1;
4164 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
4165 return op0;
4166 tem = simplify_associative_operation (code, mode, op0, op1);
4167 if (tem)
4168 return tem;
4169 break;
4171 case SMAX:
4172 if (HWI_COMPUTABLE_MODE_P (mode)
4173 && CONST_INT_P (trueop1)
4174 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
4175 && ! side_effects_p (op0))
4176 return op1;
4177 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
4178 return op0;
4179 tem = simplify_associative_operation (code, mode, op0, op1);
4180 if (tem)
4181 return tem;
4182 break;
4184 case UMIN:
4185 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
4186 return op1;
4187 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
4188 return op0;
4189 tem = simplify_associative_operation (code, mode, op0, op1);
4190 if (tem)
4191 return tem;
4192 break;
4194 case UMAX:
4195 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
4196 return op1;
4197 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
4198 return op0;
4199 tem = simplify_associative_operation (code, mode, op0, op1);
4200 if (tem)
4201 return tem;
4202 break;
4204 case SS_PLUS:
4205 case US_PLUS:
4206 case SS_MINUS:
4207 case US_MINUS:
4208 /* Simplify x +/- 0 to x, if possible. */
4209 if (trueop1 == CONST0_RTX (mode))
4210 return op0;
4211 return 0;
4213 case SS_MULT:
4214 case US_MULT:
4215 /* Simplify x * 0 to 0, if possible. */
4216 if (trueop1 == CONST0_RTX (mode)
4217 && !side_effects_p (op0))
4218 return op1;
4220 /* Simplify x * 1 to x, if possible. */
4221 if (trueop1 == CONST1_RTX (mode))
4222 return op0;
4223 return 0;
4225 case SMUL_HIGHPART:
4226 case UMUL_HIGHPART:
4227 /* Simplify x * 0 to 0, if possible. */
4228 if (trueop1 == CONST0_RTX (mode)
4229 && !side_effects_p (op0))
4230 return op1;
4231 return 0;
4233 case SS_DIV:
4234 case US_DIV:
4235 /* Simplify x / 1 to x, if possible. */
4236 if (trueop1 == CONST1_RTX (mode))
4237 return op0;
4238 return 0;
4240 case VEC_SERIES:
4241 if (op1 == CONST0_RTX (GET_MODE_INNER (mode)))
4242 return gen_vec_duplicate (mode, op0);
4243 if (valid_for_const_vector_p (mode, op0)
4244 && valid_for_const_vector_p (mode, op1))
4245 return gen_const_vec_series (mode, op0, op1);
4246 return 0;
4248 case VEC_SELECT:
4249 if (!VECTOR_MODE_P (mode))
4251 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
4252 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
4253 gcc_assert (GET_CODE (trueop1) == PARALLEL);
4254 gcc_assert (XVECLEN (trueop1, 0) == 1);
4256 /* We can't reason about selections made at runtime. */
4257 if (!CONST_INT_P (XVECEXP (trueop1, 0, 0)))
4258 return 0;
4260 if (vec_duplicate_p (trueop0, &elt0))
4261 return elt0;
4263 if (GET_CODE (trueop0) == CONST_VECTOR)
4264 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
4265 (trueop1, 0, 0)));
4267 /* Extract a scalar element from a nested VEC_SELECT expression
4268 (with optional nested VEC_CONCAT expression). Some targets
4269 (i386) extract scalar element from a vector using chain of
4270 nested VEC_SELECT expressions. When input operand is a memory
4271 operand, this operation can be simplified to a simple scalar
4272 load from an offseted memory address. */
4273 int n_elts;
4274 if (GET_CODE (trueop0) == VEC_SELECT
4275 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0, 0)))
4276 .is_constant (&n_elts)))
4278 rtx op0 = XEXP (trueop0, 0);
4279 rtx op1 = XEXP (trueop0, 1);
4281 int i = INTVAL (XVECEXP (trueop1, 0, 0));
4282 int elem;
4284 rtvec vec;
4285 rtx tmp_op, tmp;
4287 gcc_assert (GET_CODE (op1) == PARALLEL);
4288 gcc_assert (i < n_elts);
4290 /* Select element, pointed by nested selector. */
4291 elem = INTVAL (XVECEXP (op1, 0, i));
4293 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
4294 if (GET_CODE (op0) == VEC_CONCAT)
4296 rtx op00 = XEXP (op0, 0);
4297 rtx op01 = XEXP (op0, 1);
4299 machine_mode mode00, mode01;
4300 int n_elts00, n_elts01;
4302 mode00 = GET_MODE (op00);
4303 mode01 = GET_MODE (op01);
4305 /* Find out the number of elements of each operand.
4306 Since the concatenated result has a constant number
4307 of elements, the operands must too. */
4308 n_elts00 = GET_MODE_NUNITS (mode00).to_constant ();
4309 n_elts01 = GET_MODE_NUNITS (mode01).to_constant ();
4311 gcc_assert (n_elts == n_elts00 + n_elts01);
4313 /* Select correct operand of VEC_CONCAT
4314 and adjust selector. */
4315 if (elem < n_elts01)
4316 tmp_op = op00;
4317 else
4319 tmp_op = op01;
4320 elem -= n_elts00;
4323 else
4324 tmp_op = op0;
4326 vec = rtvec_alloc (1);
4327 RTVEC_ELT (vec, 0) = GEN_INT (elem);
4329 tmp = gen_rtx_fmt_ee (code, mode,
4330 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
4331 return tmp;
4334 else
4336 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
4337 gcc_assert (GET_MODE_INNER (mode)
4338 == GET_MODE_INNER (GET_MODE (trueop0)));
4339 gcc_assert (GET_CODE (trueop1) == PARALLEL);
4341 if (vec_duplicate_p (trueop0, &elt0))
4342 /* It doesn't matter which elements are selected by trueop1,
4343 because they are all the same. */
4344 return gen_vec_duplicate (mode, elt0);
4346 if (GET_CODE (trueop0) == CONST_VECTOR)
4348 unsigned n_elts = XVECLEN (trueop1, 0);
4349 rtvec v = rtvec_alloc (n_elts);
4350 unsigned int i;
4352 gcc_assert (known_eq (n_elts, GET_MODE_NUNITS (mode)));
4353 for (i = 0; i < n_elts; i++)
4355 rtx x = XVECEXP (trueop1, 0, i);
4357 if (!CONST_INT_P (x))
4358 return 0;
4360 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
4361 INTVAL (x));
4364 return gen_rtx_CONST_VECTOR (mode, v);
4367 /* Recognize the identity. */
4368 if (GET_MODE (trueop0) == mode)
4370 bool maybe_ident = true;
4371 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
4373 rtx j = XVECEXP (trueop1, 0, i);
4374 if (!CONST_INT_P (j) || INTVAL (j) != i)
4376 maybe_ident = false;
4377 break;
4380 if (maybe_ident)
4381 return trueop0;
4384 /* If we select a low-part subreg, return that. */
4385 if (vec_series_lowpart_p (mode, GET_MODE (trueop0), trueop1))
4387 rtx new_rtx = lowpart_subreg (mode, trueop0,
4388 GET_MODE (trueop0));
4389 if (new_rtx != NULL_RTX)
4390 return new_rtx;
4393 /* If we build {a,b} then permute it, build the result directly. */
4394 if (XVECLEN (trueop1, 0) == 2
4395 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
4396 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
4397 && GET_CODE (trueop0) == VEC_CONCAT
4398 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
4399 && GET_MODE (XEXP (trueop0, 0)) == mode
4400 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
4401 && GET_MODE (XEXP (trueop0, 1)) == mode)
4403 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
4404 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
4405 rtx subop0, subop1;
4407 gcc_assert (i0 < 4 && i1 < 4);
4408 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
4409 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
4411 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
4414 if (XVECLEN (trueop1, 0) == 2
4415 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
4416 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
4417 && GET_CODE (trueop0) == VEC_CONCAT
4418 && GET_MODE (trueop0) == mode)
4420 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
4421 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
4422 rtx subop0, subop1;
4424 gcc_assert (i0 < 2 && i1 < 2);
4425 subop0 = XEXP (trueop0, i0);
4426 subop1 = XEXP (trueop0, i1);
4428 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
4431 /* If we select one half of a vec_concat, return that. */
4432 int l0, l1;
4433 if (GET_CODE (trueop0) == VEC_CONCAT
4434 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0, 0)))
4435 .is_constant (&l0))
4436 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0, 1)))
4437 .is_constant (&l1))
4438 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
4440 rtx subop0 = XEXP (trueop0, 0);
4441 rtx subop1 = XEXP (trueop0, 1);
4442 machine_mode mode0 = GET_MODE (subop0);
4443 machine_mode mode1 = GET_MODE (subop1);
4444 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
4445 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
4447 bool success = true;
4448 for (int i = 1; i < l0; ++i)
4450 rtx j = XVECEXP (trueop1, 0, i);
4451 if (!CONST_INT_P (j) || INTVAL (j) != i)
4453 success = false;
4454 break;
4457 if (success)
4458 return subop0;
4460 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
4462 bool success = true;
4463 for (int i = 1; i < l1; ++i)
4465 rtx j = XVECEXP (trueop1, 0, i);
4466 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
4468 success = false;
4469 break;
4472 if (success)
4473 return subop1;
4477 /* Simplify vec_select of a subreg of X to just a vec_select of X
4478 when X has same component mode as vec_select. */
4479 unsigned HOST_WIDE_INT subreg_offset = 0;
4480 if (GET_CODE (trueop0) == SUBREG
4481 && GET_MODE_INNER (mode)
4482 == GET_MODE_INNER (GET_MODE (SUBREG_REG (trueop0)))
4483 && GET_MODE_NUNITS (mode).is_constant (&l1)
4484 && constant_multiple_p (subreg_memory_offset (trueop0),
4485 GET_MODE_UNIT_BITSIZE (mode),
4486 &subreg_offset))
4488 poly_uint64 nunits
4489 = GET_MODE_NUNITS (GET_MODE (SUBREG_REG (trueop0)));
4490 bool success = true;
4491 for (int i = 0; i != l1; i++)
4493 rtx idx = XVECEXP (trueop1, 0, i);
4494 if (!CONST_INT_P (idx)
4495 || maybe_ge (UINTVAL (idx) + subreg_offset, nunits))
4497 success = false;
4498 break;
4502 if (success)
4504 rtx par = trueop1;
4505 if (subreg_offset)
4507 rtvec vec = rtvec_alloc (l1);
4508 for (int i = 0; i < l1; i++)
4509 RTVEC_ELT (vec, i)
4510 = GEN_INT (INTVAL (XVECEXP (trueop1, 0, i))
4511 + subreg_offset);
4512 par = gen_rtx_PARALLEL (VOIDmode, vec);
4514 return gen_rtx_VEC_SELECT (mode, SUBREG_REG (trueop0), par);
4519 if (XVECLEN (trueop1, 0) == 1
4520 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
4521 && GET_CODE (trueop0) == VEC_CONCAT)
4523 rtx vec = trueop0;
4524 offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
4526 /* Try to find the element in the VEC_CONCAT. */
4527 while (GET_MODE (vec) != mode
4528 && GET_CODE (vec) == VEC_CONCAT)
4530 poly_int64 vec_size;
4532 if (CONST_INT_P (XEXP (vec, 0)))
4534 /* vec_concat of two const_ints doesn't make sense with
4535 respect to modes. */
4536 if (CONST_INT_P (XEXP (vec, 1)))
4537 return 0;
4539 vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
4540 - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
4542 else
4543 vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
4545 if (known_lt (offset, vec_size))
4546 vec = XEXP (vec, 0);
4547 else if (known_ge (offset, vec_size))
4549 offset -= vec_size;
4550 vec = XEXP (vec, 1);
4552 else
4553 break;
4554 vec = avoid_constant_pool_reference (vec);
4557 if (GET_MODE (vec) == mode)
4558 return vec;
4561 /* If we select elements in a vec_merge that all come from the same
4562 operand, select from that operand directly. */
4563 if (GET_CODE (op0) == VEC_MERGE)
4565 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
4566 if (CONST_INT_P (trueop02))
4568 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
4569 bool all_operand0 = true;
4570 bool all_operand1 = true;
4571 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
4573 rtx j = XVECEXP (trueop1, 0, i);
4574 if (sel & (HOST_WIDE_INT_1U << UINTVAL (j)))
4575 all_operand1 = false;
4576 else
4577 all_operand0 = false;
4579 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
4580 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
4581 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
4582 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
4586 /* If we have two nested selects that are inverses of each
4587 other, replace them with the source operand. */
4588 if (GET_CODE (trueop0) == VEC_SELECT
4589 && GET_MODE (XEXP (trueop0, 0)) == mode)
4591 rtx op0_subop1 = XEXP (trueop0, 1);
4592 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
4593 gcc_assert (known_eq (XVECLEN (trueop1, 0), GET_MODE_NUNITS (mode)));
4595 /* Apply the outer ordering vector to the inner one. (The inner
4596 ordering vector is expressly permitted to be of a different
4597 length than the outer one.) If the result is { 0, 1, ..., n-1 }
4598 then the two VEC_SELECTs cancel. */
4599 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
4601 rtx x = XVECEXP (trueop1, 0, i);
4602 if (!CONST_INT_P (x))
4603 return 0;
4604 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
4605 if (!CONST_INT_P (y) || i != INTVAL (y))
4606 return 0;
4608 return XEXP (trueop0, 0);
4611 return 0;
4612 case VEC_CONCAT:
4614 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
4615 ? GET_MODE (trueop0)
4616 : GET_MODE_INNER (mode));
4617 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
4618 ? GET_MODE (trueop1)
4619 : GET_MODE_INNER (mode));
4621 gcc_assert (VECTOR_MODE_P (mode));
4622 gcc_assert (known_eq (GET_MODE_SIZE (op0_mode)
4623 + GET_MODE_SIZE (op1_mode),
4624 GET_MODE_SIZE (mode)));
4626 if (VECTOR_MODE_P (op0_mode))
4627 gcc_assert (GET_MODE_INNER (mode)
4628 == GET_MODE_INNER (op0_mode));
4629 else
4630 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
4632 if (VECTOR_MODE_P (op1_mode))
4633 gcc_assert (GET_MODE_INNER (mode)
4634 == GET_MODE_INNER (op1_mode));
4635 else
4636 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
4638 unsigned int n_elts, in_n_elts;
4639 if ((GET_CODE (trueop0) == CONST_VECTOR
4640 || CONST_SCALAR_INT_P (trueop0)
4641 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
4642 && (GET_CODE (trueop1) == CONST_VECTOR
4643 || CONST_SCALAR_INT_P (trueop1)
4644 || CONST_DOUBLE_AS_FLOAT_P (trueop1))
4645 && GET_MODE_NUNITS (mode).is_constant (&n_elts)
4646 && GET_MODE_NUNITS (op0_mode).is_constant (&in_n_elts))
4648 rtvec v = rtvec_alloc (n_elts);
4649 unsigned int i;
4650 for (i = 0; i < n_elts; i++)
4652 if (i < in_n_elts)
4654 if (!VECTOR_MODE_P (op0_mode))
4655 RTVEC_ELT (v, i) = trueop0;
4656 else
4657 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
4659 else
4661 if (!VECTOR_MODE_P (op1_mode))
4662 RTVEC_ELT (v, i) = trueop1;
4663 else
4664 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
4665 i - in_n_elts);
4669 return gen_rtx_CONST_VECTOR (mode, v);
4672 /* Try to merge two VEC_SELECTs from the same vector into a single one.
4673 Restrict the transformation to avoid generating a VEC_SELECT with a
4674 mode unrelated to its operand. */
4675 if (GET_CODE (trueop0) == VEC_SELECT
4676 && GET_CODE (trueop1) == VEC_SELECT
4677 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
4678 && GET_MODE_INNER (GET_MODE (XEXP (trueop0, 0)))
4679 == GET_MODE_INNER(mode))
4681 rtx par0 = XEXP (trueop0, 1);
4682 rtx par1 = XEXP (trueop1, 1);
4683 int len0 = XVECLEN (par0, 0);
4684 int len1 = XVECLEN (par1, 0);
4685 rtvec vec = rtvec_alloc (len0 + len1);
4686 for (int i = 0; i < len0; i++)
4687 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
4688 for (int i = 0; i < len1; i++)
4689 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
4690 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
4691 gen_rtx_PARALLEL (VOIDmode, vec));
4694 return 0;
4696 default:
4697 gcc_unreachable ();
4700 if (mode == GET_MODE (op0)
4701 && mode == GET_MODE (op1)
4702 && vec_duplicate_p (op0, &elt0)
4703 && vec_duplicate_p (op1, &elt1))
4705 /* Try applying the operator to ELT and see if that simplifies.
4706 We can duplicate the result if so.
4708 The reason we don't use simplify_gen_binary is that it isn't
4709 necessarily a win to convert things like:
4711 (plus:V (vec_duplicate:V (reg:S R1))
4712 (vec_duplicate:V (reg:S R2)))
4716 (vec_duplicate:V (plus:S (reg:S R1) (reg:S R2)))
4718 The first might be done entirely in vector registers while the
4719 second might need a move between register files. */
4720 tem = simplify_binary_operation (code, GET_MODE_INNER (mode),
4721 elt0, elt1);
4722 if (tem)
4723 return gen_vec_duplicate (mode, tem);
4726 return 0;
4729 /* Return true if binary operation OP distributes over addition in operand
4730 OPNO, with the other operand being held constant. OPNO counts from 1. */
4732 static bool
4733 distributes_over_addition_p (rtx_code op, int opno)
4735 switch (op)
4737 case PLUS:
4738 case MINUS:
4739 case MULT:
4740 return true;
4742 case ASHIFT:
4743 return opno == 1;
4745 default:
4746 return false;
4751 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
4752 rtx op0, rtx op1)
4754 if (VECTOR_MODE_P (mode)
4755 && code != VEC_CONCAT
4756 && GET_CODE (op0) == CONST_VECTOR
4757 && GET_CODE (op1) == CONST_VECTOR)
4759 bool step_ok_p;
4760 if (CONST_VECTOR_STEPPED_P (op0)
4761 && CONST_VECTOR_STEPPED_P (op1))
4762 /* We can operate directly on the encoding if:
4764 a3 - a2 == a2 - a1 && b3 - b2 == b2 - b1
4765 implies
4766 (a3 op b3) - (a2 op b2) == (a2 op b2) - (a1 op b1)
4768 Addition and subtraction are the supported operators
4769 for which this is true. */
4770 step_ok_p = (code == PLUS || code == MINUS);
4771 else if (CONST_VECTOR_STEPPED_P (op0))
4772 /* We can operate directly on stepped encodings if:
4774 a3 - a2 == a2 - a1
4775 implies:
4776 (a3 op c) - (a2 op c) == (a2 op c) - (a1 op c)
4778 which is true if (x -> x op c) distributes over addition. */
4779 step_ok_p = distributes_over_addition_p (code, 1);
4780 else
4781 /* Similarly in reverse. */
4782 step_ok_p = distributes_over_addition_p (code, 2);
4783 rtx_vector_builder builder;
4784 if (!builder.new_binary_operation (mode, op0, op1, step_ok_p))
4785 return 0;
4787 unsigned int count = builder.encoded_nelts ();
4788 for (unsigned int i = 0; i < count; i++)
4790 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
4791 CONST_VECTOR_ELT (op0, i),
4792 CONST_VECTOR_ELT (op1, i));
4793 if (!x || !valid_for_const_vector_p (mode, x))
4794 return 0;
4795 builder.quick_push (x);
4797 return builder.build ();
4800 if (VECTOR_MODE_P (mode)
4801 && code == VEC_CONCAT
4802 && (CONST_SCALAR_INT_P (op0)
4803 || CONST_FIXED_P (op0)
4804 || CONST_DOUBLE_AS_FLOAT_P (op0))
4805 && (CONST_SCALAR_INT_P (op1)
4806 || CONST_DOUBLE_AS_FLOAT_P (op1)
4807 || CONST_FIXED_P (op1)))
4809 /* Both inputs have a constant number of elements, so the result
4810 must too. */
4811 unsigned n_elts = GET_MODE_NUNITS (mode).to_constant ();
4812 rtvec v = rtvec_alloc (n_elts);
4814 gcc_assert (n_elts >= 2);
4815 if (n_elts == 2)
4817 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
4818 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
4820 RTVEC_ELT (v, 0) = op0;
4821 RTVEC_ELT (v, 1) = op1;
4823 else
4825 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0)).to_constant ();
4826 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1)).to_constant ();
4827 unsigned i;
4829 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
4830 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
4831 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
4833 for (i = 0; i < op0_n_elts; ++i)
4834 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op0, i);
4835 for (i = 0; i < op1_n_elts; ++i)
4836 RTVEC_ELT (v, op0_n_elts+i) = CONST_VECTOR_ELT (op1, i);
4839 return gen_rtx_CONST_VECTOR (mode, v);
4842 if (SCALAR_FLOAT_MODE_P (mode)
4843 && CONST_DOUBLE_AS_FLOAT_P (op0)
4844 && CONST_DOUBLE_AS_FLOAT_P (op1)
4845 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
4847 if (code == AND
4848 || code == IOR
4849 || code == XOR)
4851 long tmp0[4];
4852 long tmp1[4];
4853 REAL_VALUE_TYPE r;
4854 int i;
4856 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
4857 GET_MODE (op0));
4858 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
4859 GET_MODE (op1));
4860 for (i = 0; i < 4; i++)
4862 switch (code)
4864 case AND:
4865 tmp0[i] &= tmp1[i];
4866 break;
4867 case IOR:
4868 tmp0[i] |= tmp1[i];
4869 break;
4870 case XOR:
4871 tmp0[i] ^= tmp1[i];
4872 break;
4873 default:
4874 gcc_unreachable ();
4877 real_from_target (&r, tmp0, mode);
4878 return const_double_from_real_value (r, mode);
4880 else
4882 REAL_VALUE_TYPE f0, f1, value, result;
4883 const REAL_VALUE_TYPE *opr0, *opr1;
4884 bool inexact;
4886 opr0 = CONST_DOUBLE_REAL_VALUE (op0);
4887 opr1 = CONST_DOUBLE_REAL_VALUE (op1);
4889 if (HONOR_SNANS (mode)
4890 && (REAL_VALUE_ISSIGNALING_NAN (*opr0)
4891 || REAL_VALUE_ISSIGNALING_NAN (*opr1)))
4892 return 0;
4894 real_convert (&f0, mode, opr0);
4895 real_convert (&f1, mode, opr1);
4897 if (code == DIV
4898 && real_equal (&f1, &dconst0)
4899 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
4900 return 0;
4902 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4903 && flag_trapping_math
4904 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
4906 int s0 = REAL_VALUE_NEGATIVE (f0);
4907 int s1 = REAL_VALUE_NEGATIVE (f1);
4909 switch (code)
4911 case PLUS:
4912 /* Inf + -Inf = NaN plus exception. */
4913 if (s0 != s1)
4914 return 0;
4915 break;
4916 case MINUS:
4917 /* Inf - Inf = NaN plus exception. */
4918 if (s0 == s1)
4919 return 0;
4920 break;
4921 case DIV:
4922 /* Inf / Inf = NaN plus exception. */
4923 return 0;
4924 default:
4925 break;
4929 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4930 && flag_trapping_math
4931 && ((REAL_VALUE_ISINF (f0) && real_equal (&f1, &dconst0))
4932 || (REAL_VALUE_ISINF (f1)
4933 && real_equal (&f0, &dconst0))))
4934 /* Inf * 0 = NaN plus exception. */
4935 return 0;
4937 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
4938 &f0, &f1);
4939 real_convert (&result, mode, &value);
4941 /* Don't constant fold this floating point operation if
4942 the result has overflowed and flag_trapping_math. */
4944 if (flag_trapping_math
4945 && MODE_HAS_INFINITIES (mode)
4946 && REAL_VALUE_ISINF (result)
4947 && !REAL_VALUE_ISINF (f0)
4948 && !REAL_VALUE_ISINF (f1))
4949 /* Overflow plus exception. */
4950 return 0;
4952 /* Don't constant fold this floating point operation if the
4953 result may dependent upon the run-time rounding mode and
4954 flag_rounding_math is set, or if GCC's software emulation
4955 is unable to accurately represent the result. */
4957 if ((flag_rounding_math
4958 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
4959 && (inexact || !real_identical (&result, &value)))
4960 return NULL_RTX;
4962 return const_double_from_real_value (result, mode);
4966 /* We can fold some multi-word operations. */
4967 scalar_int_mode int_mode;
4968 if (is_a <scalar_int_mode> (mode, &int_mode)
4969 && CONST_SCALAR_INT_P (op0)
4970 && CONST_SCALAR_INT_P (op1)
4971 && GET_MODE_PRECISION (int_mode) <= MAX_BITSIZE_MODE_ANY_INT)
4973 wide_int result;
4974 wi::overflow_type overflow;
4975 rtx_mode_t pop0 = rtx_mode_t (op0, int_mode);
4976 rtx_mode_t pop1 = rtx_mode_t (op1, int_mode);
4978 #if TARGET_SUPPORTS_WIDE_INT == 0
4979 /* This assert keeps the simplification from producing a result
4980 that cannot be represented in a CONST_DOUBLE but a lot of
4981 upstream callers expect that this function never fails to
4982 simplify something and so you if you added this to the test
4983 above the code would die later anyway. If this assert
4984 happens, you just need to make the port support wide int. */
4985 gcc_assert (GET_MODE_PRECISION (int_mode) <= HOST_BITS_PER_DOUBLE_INT);
4986 #endif
4987 switch (code)
4989 case MINUS:
4990 result = wi::sub (pop0, pop1);
4991 break;
4993 case PLUS:
4994 result = wi::add (pop0, pop1);
4995 break;
4997 case MULT:
4998 result = wi::mul (pop0, pop1);
4999 break;
5001 case DIV:
5002 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
5003 if (overflow)
5004 return NULL_RTX;
5005 break;
5007 case MOD:
5008 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
5009 if (overflow)
5010 return NULL_RTX;
5011 break;
5013 case UDIV:
5014 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
5015 if (overflow)
5016 return NULL_RTX;
5017 break;
5019 case UMOD:
5020 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
5021 if (overflow)
5022 return NULL_RTX;
5023 break;
5025 case AND:
5026 result = wi::bit_and (pop0, pop1);
5027 break;
5029 case IOR:
5030 result = wi::bit_or (pop0, pop1);
5031 break;
5033 case XOR:
5034 result = wi::bit_xor (pop0, pop1);
5035 break;
5037 case SMIN:
5038 result = wi::smin (pop0, pop1);
5039 break;
5041 case SMAX:
5042 result = wi::smax (pop0, pop1);
5043 break;
5045 case UMIN:
5046 result = wi::umin (pop0, pop1);
5047 break;
5049 case UMAX:
5050 result = wi::umax (pop0, pop1);
5051 break;
5053 case LSHIFTRT:
5054 case ASHIFTRT:
5055 case ASHIFT:
5056 case SS_ASHIFT:
5057 case US_ASHIFT:
5059 wide_int wop1 = pop1;
5060 if (SHIFT_COUNT_TRUNCATED)
5061 wop1 = wi::umod_trunc (wop1, GET_MODE_PRECISION (int_mode));
5062 else if (wi::geu_p (wop1, GET_MODE_PRECISION (int_mode)))
5063 return NULL_RTX;
5065 switch (code)
5067 case LSHIFTRT:
5068 result = wi::lrshift (pop0, wop1);
5069 break;
5071 case ASHIFTRT:
5072 result = wi::arshift (pop0, wop1);
5073 break;
5075 case ASHIFT:
5076 result = wi::lshift (pop0, wop1);
5077 break;
5079 case SS_ASHIFT:
5080 if (wi::leu_p (wop1, wi::clrsb (pop0)))
5081 result = wi::lshift (pop0, wop1);
5082 else if (wi::neg_p (pop0))
5083 result = wi::min_value (int_mode, SIGNED);
5084 else
5085 result = wi::max_value (int_mode, SIGNED);
5086 break;
5088 case US_ASHIFT:
5089 if (wi::eq_p (pop0, 0))
5090 result = pop0;
5091 else if (wi::leu_p (wop1, wi::clz (pop0)))
5092 result = wi::lshift (pop0, wop1);
5093 else
5094 result = wi::max_value (int_mode, UNSIGNED);
5095 break;
5097 default:
5098 gcc_unreachable ();
5100 break;
5102 case ROTATE:
5103 case ROTATERT:
5105 if (wi::neg_p (pop1))
5106 return NULL_RTX;
5108 switch (code)
5110 case ROTATE:
5111 result = wi::lrotate (pop0, pop1);
5112 break;
5114 case ROTATERT:
5115 result = wi::rrotate (pop0, pop1);
5116 break;
5118 default:
5119 gcc_unreachable ();
5121 break;
5124 case SS_PLUS:
5125 result = wi::add (pop0, pop1, SIGNED, &overflow);
5126 clamp_signed_saturation:
5127 if (overflow == wi::OVF_OVERFLOW)
5128 result = wi::max_value (GET_MODE_PRECISION (int_mode), SIGNED);
5129 else if (overflow == wi::OVF_UNDERFLOW)
5130 result = wi::min_value (GET_MODE_PRECISION (int_mode), SIGNED);
5131 else if (overflow != wi::OVF_NONE)
5132 return NULL_RTX;
5133 break;
5135 case US_PLUS:
5136 result = wi::add (pop0, pop1, UNSIGNED, &overflow);
5137 clamp_unsigned_saturation:
5138 if (overflow != wi::OVF_NONE)
5139 result = wi::max_value (GET_MODE_PRECISION (int_mode), UNSIGNED);
5140 break;
5142 case SS_MINUS:
5143 result = wi::sub (pop0, pop1, SIGNED, &overflow);
5144 goto clamp_signed_saturation;
5146 case US_MINUS:
5147 result = wi::sub (pop0, pop1, UNSIGNED, &overflow);
5148 if (overflow != wi::OVF_NONE)
5149 result = wi::min_value (GET_MODE_PRECISION (int_mode), UNSIGNED);
5150 break;
5152 case SS_MULT:
5153 result = wi::mul (pop0, pop1, SIGNED, &overflow);
5154 goto clamp_signed_saturation;
5156 case US_MULT:
5157 result = wi::mul (pop0, pop1, UNSIGNED, &overflow);
5158 goto clamp_unsigned_saturation;
5160 case SMUL_HIGHPART:
5161 result = wi::mul_high (pop0, pop1, SIGNED);
5162 break;
5164 case UMUL_HIGHPART:
5165 result = wi::mul_high (pop0, pop1, UNSIGNED);
5166 break;
5168 default:
5169 return NULL_RTX;
5171 return immed_wide_int_const (result, int_mode);
5174 /* Handle polynomial integers. */
5175 if (NUM_POLY_INT_COEFFS > 1
5176 && is_a <scalar_int_mode> (mode, &int_mode)
5177 && poly_int_rtx_p (op0)
5178 && poly_int_rtx_p (op1))
5180 poly_wide_int result;
5181 switch (code)
5183 case PLUS:
5184 result = wi::to_poly_wide (op0, mode) + wi::to_poly_wide (op1, mode);
5185 break;
5187 case MINUS:
5188 result = wi::to_poly_wide (op0, mode) - wi::to_poly_wide (op1, mode);
5189 break;
5191 case MULT:
5192 if (CONST_SCALAR_INT_P (op1))
5193 result = wi::to_poly_wide (op0, mode) * rtx_mode_t (op1, mode);
5194 else
5195 return NULL_RTX;
5196 break;
5198 case ASHIFT:
5199 if (CONST_SCALAR_INT_P (op1))
5201 wide_int shift = rtx_mode_t (op1, mode);
5202 if (SHIFT_COUNT_TRUNCATED)
5203 shift = wi::umod_trunc (shift, GET_MODE_PRECISION (int_mode));
5204 else if (wi::geu_p (shift, GET_MODE_PRECISION (int_mode)))
5205 return NULL_RTX;
5206 result = wi::to_poly_wide (op0, mode) << shift;
5208 else
5209 return NULL_RTX;
5210 break;
5212 case IOR:
5213 if (!CONST_SCALAR_INT_P (op1)
5214 || !can_ior_p (wi::to_poly_wide (op0, mode),
5215 rtx_mode_t (op1, mode), &result))
5216 return NULL_RTX;
5217 break;
5219 default:
5220 return NULL_RTX;
5222 return immed_wide_int_const (result, int_mode);
5225 return NULL_RTX;
5230 /* Return a positive integer if X should sort after Y. The value
5231 returned is 1 if and only if X and Y are both regs. */
5233 static int
5234 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
5236 int result;
5238 result = (commutative_operand_precedence (y)
5239 - commutative_operand_precedence (x));
5240 if (result)
5241 return result + result;
5243 /* Group together equal REGs to do more simplification. */
5244 if (REG_P (x) && REG_P (y))
5245 return REGNO (x) > REGNO (y);
5247 return 0;
5250 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
5251 operands may be another PLUS or MINUS.
5253 Rather than test for specific case, we do this by a brute-force method
5254 and do all possible simplifications until no more changes occur. Then
5255 we rebuild the operation.
5257 May return NULL_RTX when no changes were made. */
5260 simplify_context::simplify_plus_minus (rtx_code code, machine_mode mode,
5261 rtx op0, rtx op1)
5263 struct simplify_plus_minus_op_data
5265 rtx op;
5266 short neg;
5267 } ops[16];
5268 rtx result, tem;
5269 int n_ops = 2;
5270 int changed, n_constants, canonicalized = 0;
5271 int i, j;
5273 memset (ops, 0, sizeof ops);
5275 /* Set up the two operands and then expand them until nothing has been
5276 changed. If we run out of room in our array, give up; this should
5277 almost never happen. */
5279 ops[0].op = op0;
5280 ops[0].neg = 0;
5281 ops[1].op = op1;
5282 ops[1].neg = (code == MINUS);
5286 changed = 0;
5287 n_constants = 0;
5289 for (i = 0; i < n_ops; i++)
5291 rtx this_op = ops[i].op;
5292 int this_neg = ops[i].neg;
5293 enum rtx_code this_code = GET_CODE (this_op);
5295 switch (this_code)
5297 case PLUS:
5298 case MINUS:
5299 if (n_ops == ARRAY_SIZE (ops))
5300 return NULL_RTX;
5302 ops[n_ops].op = XEXP (this_op, 1);
5303 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
5304 n_ops++;
5306 ops[i].op = XEXP (this_op, 0);
5307 changed = 1;
5308 /* If this operand was negated then we will potentially
5309 canonicalize the expression. Similarly if we don't
5310 place the operands adjacent we're re-ordering the
5311 expression and thus might be performing a
5312 canonicalization. Ignore register re-ordering.
5313 ??? It might be better to shuffle the ops array here,
5314 but then (plus (plus (A, B), plus (C, D))) wouldn't
5315 be seen as non-canonical. */
5316 if (this_neg
5317 || (i != n_ops - 2
5318 && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
5319 canonicalized = 1;
5320 break;
5322 case NEG:
5323 ops[i].op = XEXP (this_op, 0);
5324 ops[i].neg = ! this_neg;
5325 changed = 1;
5326 canonicalized = 1;
5327 break;
5329 case CONST:
5330 if (n_ops != ARRAY_SIZE (ops)
5331 && GET_CODE (XEXP (this_op, 0)) == PLUS
5332 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
5333 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
5335 ops[i].op = XEXP (XEXP (this_op, 0), 0);
5336 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
5337 ops[n_ops].neg = this_neg;
5338 n_ops++;
5339 changed = 1;
5340 canonicalized = 1;
5342 break;
5344 case NOT:
5345 /* ~a -> (-a - 1) */
5346 if (n_ops != ARRAY_SIZE (ops))
5348 ops[n_ops].op = CONSTM1_RTX (mode);
5349 ops[n_ops++].neg = this_neg;
5350 ops[i].op = XEXP (this_op, 0);
5351 ops[i].neg = !this_neg;
5352 changed = 1;
5353 canonicalized = 1;
5355 break;
5357 CASE_CONST_SCALAR_INT:
5358 case CONST_POLY_INT:
5359 n_constants++;
5360 if (this_neg)
5362 ops[i].op = neg_poly_int_rtx (mode, this_op);
5363 ops[i].neg = 0;
5364 changed = 1;
5365 canonicalized = 1;
5367 break;
5369 default:
5370 break;
5374 while (changed);
5376 if (n_constants > 1)
5377 canonicalized = 1;
5379 gcc_assert (n_ops >= 2);
5381 /* If we only have two operands, we can avoid the loops. */
5382 if (n_ops == 2)
5384 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
5385 rtx lhs, rhs;
5387 /* Get the two operands. Be careful with the order, especially for
5388 the cases where code == MINUS. */
5389 if (ops[0].neg && ops[1].neg)
5391 lhs = gen_rtx_NEG (mode, ops[0].op);
5392 rhs = ops[1].op;
5394 else if (ops[0].neg)
5396 lhs = ops[1].op;
5397 rhs = ops[0].op;
5399 else
5401 lhs = ops[0].op;
5402 rhs = ops[1].op;
5405 return simplify_const_binary_operation (code, mode, lhs, rhs);
5408 /* Now simplify each pair of operands until nothing changes. */
5409 while (1)
5411 /* Insertion sort is good enough for a small array. */
5412 for (i = 1; i < n_ops; i++)
5414 struct simplify_plus_minus_op_data save;
5415 int cmp;
5417 j = i - 1;
5418 cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
5419 if (cmp <= 0)
5420 continue;
5421 /* Just swapping registers doesn't count as canonicalization. */
5422 if (cmp != 1)
5423 canonicalized = 1;
5425 save = ops[i];
5427 ops[j + 1] = ops[j];
5428 while (j--
5429 && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
5430 ops[j + 1] = save;
5433 changed = 0;
5434 for (i = n_ops - 1; i > 0; i--)
5435 for (j = i - 1; j >= 0; j--)
5437 rtx lhs = ops[j].op, rhs = ops[i].op;
5438 int lneg = ops[j].neg, rneg = ops[i].neg;
5440 if (lhs != 0 && rhs != 0)
5442 enum rtx_code ncode = PLUS;
5444 if (lneg != rneg)
5446 ncode = MINUS;
5447 if (lneg)
5448 std::swap (lhs, rhs);
5450 else if (swap_commutative_operands_p (lhs, rhs))
5451 std::swap (lhs, rhs);
5453 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
5454 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
5456 rtx tem_lhs, tem_rhs;
5458 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
5459 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
5460 tem = simplify_binary_operation (ncode, mode, tem_lhs,
5461 tem_rhs);
5463 if (tem && !CONSTANT_P (tem))
5464 tem = gen_rtx_CONST (GET_MODE (tem), tem);
5466 else
5467 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
5469 if (tem)
5471 /* Reject "simplifications" that just wrap the two
5472 arguments in a CONST. Failure to do so can result
5473 in infinite recursion with simplify_binary_operation
5474 when it calls us to simplify CONST operations.
5475 Also, if we find such a simplification, don't try
5476 any more combinations with this rhs: We must have
5477 something like symbol+offset, ie. one of the
5478 trivial CONST expressions we handle later. */
5479 if (GET_CODE (tem) == CONST
5480 && GET_CODE (XEXP (tem, 0)) == ncode
5481 && XEXP (XEXP (tem, 0), 0) == lhs
5482 && XEXP (XEXP (tem, 0), 1) == rhs)
5483 break;
5484 lneg &= rneg;
5485 if (GET_CODE (tem) == NEG)
5486 tem = XEXP (tem, 0), lneg = !lneg;
5487 if (poly_int_rtx_p (tem) && lneg)
5488 tem = neg_poly_int_rtx (mode, tem), lneg = 0;
5490 ops[i].op = tem;
5491 ops[i].neg = lneg;
5492 ops[j].op = NULL_RTX;
5493 changed = 1;
5494 canonicalized = 1;
5499 if (!changed)
5500 break;
5502 /* Pack all the operands to the lower-numbered entries. */
5503 for (i = 0, j = 0; j < n_ops; j++)
5504 if (ops[j].op)
5506 ops[i] = ops[j];
5507 i++;
5509 n_ops = i;
5512 /* If nothing changed, check that rematerialization of rtl instructions
5513 is still required. */
5514 if (!canonicalized)
5516 /* Perform rematerialization if only all operands are registers and
5517 all operations are PLUS. */
5518 /* ??? Also disallow (non-global, non-frame) fixed registers to work
5519 around rs6000 and how it uses the CA register. See PR67145. */
5520 for (i = 0; i < n_ops; i++)
5521 if (ops[i].neg
5522 || !REG_P (ops[i].op)
5523 || (REGNO (ops[i].op) < FIRST_PSEUDO_REGISTER
5524 && fixed_regs[REGNO (ops[i].op)]
5525 && !global_regs[REGNO (ops[i].op)]
5526 && ops[i].op != frame_pointer_rtx
5527 && ops[i].op != arg_pointer_rtx
5528 && ops[i].op != stack_pointer_rtx))
5529 return NULL_RTX;
5530 goto gen_result;
5533 /* Create (minus -C X) instead of (neg (const (plus X C))). */
5534 if (n_ops == 2
5535 && CONST_INT_P (ops[1].op)
5536 && CONSTANT_P (ops[0].op)
5537 && ops[0].neg)
5538 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
5540 /* We suppressed creation of trivial CONST expressions in the
5541 combination loop to avoid recursion. Create one manually now.
5542 The combination loop should have ensured that there is exactly
5543 one CONST_INT, and the sort will have ensured that it is last
5544 in the array and that any other constant will be next-to-last. */
5546 if (n_ops > 1
5547 && poly_int_rtx_p (ops[n_ops - 1].op)
5548 && CONSTANT_P (ops[n_ops - 2].op))
5550 rtx value = ops[n_ops - 1].op;
5551 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
5552 value = neg_poly_int_rtx (mode, value);
5553 if (CONST_INT_P (value))
5555 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
5556 INTVAL (value));
5557 n_ops--;
5561 /* Put a non-negated operand first, if possible. */
5563 for (i = 0; i < n_ops && ops[i].neg; i++)
5564 continue;
5565 if (i == n_ops)
5566 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
5567 else if (i != 0)
5569 tem = ops[0].op;
5570 ops[0] = ops[i];
5571 ops[i].op = tem;
5572 ops[i].neg = 1;
5575 /* Now make the result by performing the requested operations. */
5576 gen_result:
5577 result = ops[0].op;
5578 for (i = 1; i < n_ops; i++)
5579 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
5580 mode, result, ops[i].op);
5582 return result;
5585 /* Check whether an operand is suitable for calling simplify_plus_minus. */
5586 static bool
5587 plus_minus_operand_p (const_rtx x)
5589 return GET_CODE (x) == PLUS
5590 || GET_CODE (x) == MINUS
5591 || (GET_CODE (x) == CONST
5592 && GET_CODE (XEXP (x, 0)) == PLUS
5593 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
5594 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
5597 /* Like simplify_binary_operation except used for relational operators.
5598 MODE is the mode of the result. If MODE is VOIDmode, both operands must
5599 not also be VOIDmode.
5601 CMP_MODE specifies in which mode the comparison is done in, so it is
5602 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
5603 the operands or, if both are VOIDmode, the operands are compared in
5604 "infinite precision". */
5606 simplify_context::simplify_relational_operation (rtx_code code,
5607 machine_mode mode,
5608 machine_mode cmp_mode,
5609 rtx op0, rtx op1)
5611 rtx tem, trueop0, trueop1;
5613 if (cmp_mode == VOIDmode)
5614 cmp_mode = GET_MODE (op0);
5615 if (cmp_mode == VOIDmode)
5616 cmp_mode = GET_MODE (op1);
5618 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
5619 if (tem)
5620 return relational_result (mode, cmp_mode, tem);
5622 /* For the following tests, ensure const0_rtx is op1. */
5623 if (swap_commutative_operands_p (op0, op1)
5624 || (op0 == const0_rtx && op1 != const0_rtx))
5625 std::swap (op0, op1), code = swap_condition (code);
5627 /* If op0 is a compare, extract the comparison arguments from it. */
5628 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
5629 return simplify_gen_relational (code, mode, VOIDmode,
5630 XEXP (op0, 0), XEXP (op0, 1));
5632 if (GET_MODE_CLASS (cmp_mode) == MODE_CC)
5633 return NULL_RTX;
5635 trueop0 = avoid_constant_pool_reference (op0);
5636 trueop1 = avoid_constant_pool_reference (op1);
5637 return simplify_relational_operation_1 (code, mode, cmp_mode,
5638 trueop0, trueop1);
5641 /* This part of simplify_relational_operation is only used when CMP_MODE
5642 is not in class MODE_CC (i.e. it is a real comparison).
5644 MODE is the mode of the result, while CMP_MODE specifies in which
5645 mode the comparison is done in, so it is the mode of the operands. */
5648 simplify_context::simplify_relational_operation_1 (rtx_code code,
5649 machine_mode mode,
5650 machine_mode cmp_mode,
5651 rtx op0, rtx op1)
5653 enum rtx_code op0code = GET_CODE (op0);
5655 if (op1 == const0_rtx && COMPARISON_P (op0))
5657 /* If op0 is a comparison, extract the comparison arguments
5658 from it. */
5659 if (code == NE)
5661 if (GET_MODE (op0) == mode)
5662 return simplify_rtx (op0);
5663 else
5664 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
5665 XEXP (op0, 0), XEXP (op0, 1));
5667 else if (code == EQ)
5669 enum rtx_code new_code = reversed_comparison_code (op0, NULL);
5670 if (new_code != UNKNOWN)
5671 return simplify_gen_relational (new_code, mode, VOIDmode,
5672 XEXP (op0, 0), XEXP (op0, 1));
5676 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
5677 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
5678 if ((code == LTU || code == GEU)
5679 && GET_CODE (op0) == PLUS
5680 && CONST_INT_P (XEXP (op0, 1))
5681 && (rtx_equal_p (op1, XEXP (op0, 0))
5682 || rtx_equal_p (op1, XEXP (op0, 1)))
5683 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
5684 && XEXP (op0, 1) != const0_rtx)
5686 rtx new_cmp
5687 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
5688 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
5689 cmp_mode, XEXP (op0, 0), new_cmp);
5692 /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
5693 transformed into (LTU a -C). */
5694 if (code == GTU && GET_CODE (op0) == PLUS && CONST_INT_P (op1)
5695 && CONST_INT_P (XEXP (op0, 1))
5696 && (UINTVAL (op1) == UINTVAL (XEXP (op0, 1)) - 1)
5697 && XEXP (op0, 1) != const0_rtx)
5699 rtx new_cmp
5700 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
5701 return simplify_gen_relational (LTU, mode, cmp_mode,
5702 XEXP (op0, 0), new_cmp);
5705 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
5706 if ((code == LTU || code == GEU)
5707 && GET_CODE (op0) == PLUS
5708 && rtx_equal_p (op1, XEXP (op0, 1))
5709 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
5710 && !rtx_equal_p (op1, XEXP (op0, 0)))
5711 return simplify_gen_relational (code, mode, cmp_mode, op0,
5712 copy_rtx (XEXP (op0, 0)));
5714 if (op1 == const0_rtx)
5716 /* Canonicalize (GTU x 0) as (NE x 0). */
5717 if (code == GTU)
5718 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
5719 /* Canonicalize (LEU x 0) as (EQ x 0). */
5720 if (code == LEU)
5721 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
5723 else if (op1 == const1_rtx)
5725 switch (code)
5727 case GE:
5728 /* Canonicalize (GE x 1) as (GT x 0). */
5729 return simplify_gen_relational (GT, mode, cmp_mode,
5730 op0, const0_rtx);
5731 case GEU:
5732 /* Canonicalize (GEU x 1) as (NE x 0). */
5733 return simplify_gen_relational (NE, mode, cmp_mode,
5734 op0, const0_rtx);
5735 case LT:
5736 /* Canonicalize (LT x 1) as (LE x 0). */
5737 return simplify_gen_relational (LE, mode, cmp_mode,
5738 op0, const0_rtx);
5739 case LTU:
5740 /* Canonicalize (LTU x 1) as (EQ x 0). */
5741 return simplify_gen_relational (EQ, mode, cmp_mode,
5742 op0, const0_rtx);
5743 default:
5744 break;
5747 else if (op1 == constm1_rtx)
5749 /* Canonicalize (LE x -1) as (LT x 0). */
5750 if (code == LE)
5751 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
5752 /* Canonicalize (GT x -1) as (GE x 0). */
5753 if (code == GT)
5754 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
5757 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
5758 if ((code == EQ || code == NE)
5759 && (op0code == PLUS || op0code == MINUS)
5760 && CONSTANT_P (op1)
5761 && CONSTANT_P (XEXP (op0, 1))
5762 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
5764 rtx x = XEXP (op0, 0);
5765 rtx c = XEXP (op0, 1);
5766 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
5767 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
5769 /* Detect an infinite recursive condition, where we oscillate at this
5770 simplification case between:
5771 A + B == C <---> C - B == A,
5772 where A, B, and C are all constants with non-simplifiable expressions,
5773 usually SYMBOL_REFs. */
5774 if (GET_CODE (tem) == invcode
5775 && CONSTANT_P (x)
5776 && rtx_equal_p (c, XEXP (tem, 1)))
5777 return NULL_RTX;
5779 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
5782 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
5783 the same as (zero_extract:SI FOO (const_int 1) BAR). */
5784 scalar_int_mode int_mode, int_cmp_mode;
5785 if (code == NE
5786 && op1 == const0_rtx
5787 && is_int_mode (mode, &int_mode)
5788 && is_a <scalar_int_mode> (cmp_mode, &int_cmp_mode)
5789 /* ??? Work-around BImode bugs in the ia64 backend. */
5790 && int_mode != BImode
5791 && int_cmp_mode != BImode
5792 && nonzero_bits (op0, int_cmp_mode) == 1
5793 && STORE_FLAG_VALUE == 1)
5794 return GET_MODE_SIZE (int_mode) > GET_MODE_SIZE (int_cmp_mode)
5795 ? simplify_gen_unary (ZERO_EXTEND, int_mode, op0, int_cmp_mode)
5796 : lowpart_subreg (int_mode, op0, int_cmp_mode);
5798 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
5799 if ((code == EQ || code == NE)
5800 && op1 == const0_rtx
5801 && op0code == XOR)
5802 return simplify_gen_relational (code, mode, cmp_mode,
5803 XEXP (op0, 0), XEXP (op0, 1));
5805 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
5806 if ((code == EQ || code == NE)
5807 && op0code == XOR
5808 && rtx_equal_p (XEXP (op0, 0), op1)
5809 && !side_effects_p (XEXP (op0, 0)))
5810 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
5811 CONST0_RTX (mode));
5813 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
5814 if ((code == EQ || code == NE)
5815 && op0code == XOR
5816 && rtx_equal_p (XEXP (op0, 1), op1)
5817 && !side_effects_p (XEXP (op0, 1)))
5818 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
5819 CONST0_RTX (mode));
5821 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
5822 if ((code == EQ || code == NE)
5823 && op0code == XOR
5824 && CONST_SCALAR_INT_P (op1)
5825 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
5826 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
5827 simplify_gen_binary (XOR, cmp_mode,
5828 XEXP (op0, 1), op1));
5830 /* Simplify eq/ne (and/ior x y) x/y) for targets with a BICS instruction or
5831 constant folding if x/y is a constant. */
5832 if ((code == EQ || code == NE)
5833 && (op0code == AND || op0code == IOR)
5834 && !side_effects_p (op1)
5835 && op1 != CONST0_RTX (cmp_mode))
5837 /* Both (eq/ne (and x y) x) and (eq/ne (ior x y) y) simplify to
5838 (eq/ne (and (not y) x) 0). */
5839 if ((op0code == AND && rtx_equal_p (XEXP (op0, 0), op1))
5840 || (op0code == IOR && rtx_equal_p (XEXP (op0, 1), op1)))
5842 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1),
5843 cmp_mode);
5844 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
5846 return simplify_gen_relational (code, mode, cmp_mode, lhs,
5847 CONST0_RTX (cmp_mode));
5850 /* Both (eq/ne (and x y) y) and (eq/ne (ior x y) x) simplify to
5851 (eq/ne (and (not x) y) 0). */
5852 if ((op0code == AND && rtx_equal_p (XEXP (op0, 1), op1))
5853 || (op0code == IOR && rtx_equal_p (XEXP (op0, 0), op1)))
5855 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0),
5856 cmp_mode);
5857 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
5859 return simplify_gen_relational (code, mode, cmp_mode, lhs,
5860 CONST0_RTX (cmp_mode));
5864 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
5865 if ((code == EQ || code == NE)
5866 && GET_CODE (op0) == BSWAP
5867 && CONST_SCALAR_INT_P (op1))
5868 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
5869 simplify_gen_unary (BSWAP, cmp_mode,
5870 op1, cmp_mode));
5872 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
5873 if ((code == EQ || code == NE)
5874 && GET_CODE (op0) == BSWAP
5875 && GET_CODE (op1) == BSWAP)
5876 return simplify_gen_relational (code, mode, cmp_mode,
5877 XEXP (op0, 0), XEXP (op1, 0));
5879 if (op0code == POPCOUNT && op1 == const0_rtx)
5880 switch (code)
5882 case EQ:
5883 case LE:
5884 case LEU:
5885 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
5886 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
5887 XEXP (op0, 0), const0_rtx);
5889 case NE:
5890 case GT:
5891 case GTU:
5892 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
5893 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
5894 XEXP (op0, 0), const0_rtx);
5896 default:
5897 break;
5900 return NULL_RTX;
5903 enum
5905 CMP_EQ = 1,
5906 CMP_LT = 2,
5907 CMP_GT = 4,
5908 CMP_LTU = 8,
5909 CMP_GTU = 16
5913 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
5914 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
5915 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
5916 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
5917 For floating-point comparisons, assume that the operands were ordered. */
5919 static rtx
5920 comparison_result (enum rtx_code code, int known_results)
5922 switch (code)
5924 case EQ:
5925 case UNEQ:
5926 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
5927 case NE:
5928 case LTGT:
5929 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
5931 case LT:
5932 case UNLT:
5933 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
5934 case GE:
5935 case UNGE:
5936 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
5938 case GT:
5939 case UNGT:
5940 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
5941 case LE:
5942 case UNLE:
5943 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
5945 case LTU:
5946 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
5947 case GEU:
5948 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
5950 case GTU:
5951 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
5952 case LEU:
5953 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
5955 case ORDERED:
5956 return const_true_rtx;
5957 case UNORDERED:
5958 return const0_rtx;
5959 default:
5960 gcc_unreachable ();
5964 /* Check if the given comparison (done in the given MODE) is actually
5965 a tautology or a contradiction. If the mode is VOIDmode, the
5966 comparison is done in "infinite precision". If no simplification
5967 is possible, this function returns zero. Otherwise, it returns
5968 either const_true_rtx or const0_rtx. */
5971 simplify_const_relational_operation (enum rtx_code code,
5972 machine_mode mode,
5973 rtx op0, rtx op1)
5975 rtx tem;
5976 rtx trueop0;
5977 rtx trueop1;
5979 gcc_assert (mode != VOIDmode
5980 || (GET_MODE (op0) == VOIDmode
5981 && GET_MODE (op1) == VOIDmode));
5983 /* If op0 is a compare, extract the comparison arguments from it. */
5984 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
5986 op1 = XEXP (op0, 1);
5987 op0 = XEXP (op0, 0);
5989 if (GET_MODE (op0) != VOIDmode)
5990 mode = GET_MODE (op0);
5991 else if (GET_MODE (op1) != VOIDmode)
5992 mode = GET_MODE (op1);
5993 else
5994 return 0;
5997 /* We can't simplify MODE_CC values since we don't know what the
5998 actual comparison is. */
5999 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
6000 return 0;
6002 /* Make sure the constant is second. */
6003 if (swap_commutative_operands_p (op0, op1))
6005 std::swap (op0, op1);
6006 code = swap_condition (code);
6009 trueop0 = avoid_constant_pool_reference (op0);
6010 trueop1 = avoid_constant_pool_reference (op1);
6012 /* For integer comparisons of A and B maybe we can simplify A - B and can
6013 then simplify a comparison of that with zero. If A and B are both either
6014 a register or a CONST_INT, this can't help; testing for these cases will
6015 prevent infinite recursion here and speed things up.
6017 We can only do this for EQ and NE comparisons as otherwise we may
6018 lose or introduce overflow which we cannot disregard as undefined as
6019 we do not know the signedness of the operation on either the left or
6020 the right hand side of the comparison. */
6022 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
6023 && (code == EQ || code == NE)
6024 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
6025 && (REG_P (op1) || CONST_INT_P (trueop1)))
6026 && (tem = simplify_binary_operation (MINUS, mode, op0, op1)) != 0
6027 /* We cannot do this if tem is a nonzero address. */
6028 && ! nonzero_address_p (tem))
6029 return simplify_const_relational_operation (signed_condition (code),
6030 mode, tem, const0_rtx);
6032 if (! HONOR_NANS (mode) && code == ORDERED)
6033 return const_true_rtx;
6035 if (! HONOR_NANS (mode) && code == UNORDERED)
6036 return const0_rtx;
6038 /* For modes without NaNs, if the two operands are equal, we know the
6039 result except if they have side-effects. Even with NaNs we know
6040 the result of unordered comparisons and, if signaling NaNs are
6041 irrelevant, also the result of LT/GT/LTGT. */
6042 if ((! HONOR_NANS (trueop0)
6043 || code == UNEQ || code == UNLE || code == UNGE
6044 || ((code == LT || code == GT || code == LTGT)
6045 && ! HONOR_SNANS (trueop0)))
6046 && rtx_equal_p (trueop0, trueop1)
6047 && ! side_effects_p (trueop0))
6048 return comparison_result (code, CMP_EQ);
6050 /* If the operands are floating-point constants, see if we can fold
6051 the result. */
6052 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
6053 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
6054 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
6056 const REAL_VALUE_TYPE *d0 = CONST_DOUBLE_REAL_VALUE (trueop0);
6057 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
6059 /* Comparisons are unordered iff at least one of the values is NaN. */
6060 if (REAL_VALUE_ISNAN (*d0) || REAL_VALUE_ISNAN (*d1))
6061 switch (code)
6063 case UNEQ:
6064 case UNLT:
6065 case UNGT:
6066 case UNLE:
6067 case UNGE:
6068 case NE:
6069 case UNORDERED:
6070 return const_true_rtx;
6071 case EQ:
6072 case LT:
6073 case GT:
6074 case LE:
6075 case GE:
6076 case LTGT:
6077 case ORDERED:
6078 return const0_rtx;
6079 default:
6080 return 0;
6083 return comparison_result (code,
6084 (real_equal (d0, d1) ? CMP_EQ :
6085 real_less (d0, d1) ? CMP_LT : CMP_GT));
6088 /* Otherwise, see if the operands are both integers. */
6089 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
6090 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
6092 /* It would be nice if we really had a mode here. However, the
6093 largest int representable on the target is as good as
6094 infinite. */
6095 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
6096 rtx_mode_t ptrueop0 = rtx_mode_t (trueop0, cmode);
6097 rtx_mode_t ptrueop1 = rtx_mode_t (trueop1, cmode);
6099 if (wi::eq_p (ptrueop0, ptrueop1))
6100 return comparison_result (code, CMP_EQ);
6101 else
6103 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
6104 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
6105 return comparison_result (code, cr);
6109 /* Optimize comparisons with upper and lower bounds. */
6110 scalar_int_mode int_mode;
6111 if (CONST_INT_P (trueop1)
6112 && is_a <scalar_int_mode> (mode, &int_mode)
6113 && HWI_COMPUTABLE_MODE_P (int_mode)
6114 && !side_effects_p (trueop0))
6116 int sign;
6117 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, int_mode);
6118 HOST_WIDE_INT val = INTVAL (trueop1);
6119 HOST_WIDE_INT mmin, mmax;
6121 if (code == GEU
6122 || code == LEU
6123 || code == GTU
6124 || code == LTU)
6125 sign = 0;
6126 else
6127 sign = 1;
6129 /* Get a reduced range if the sign bit is zero. */
6130 if (nonzero <= (GET_MODE_MASK (int_mode) >> 1))
6132 mmin = 0;
6133 mmax = nonzero;
6135 else
6137 rtx mmin_rtx, mmax_rtx;
6138 get_mode_bounds (int_mode, sign, int_mode, &mmin_rtx, &mmax_rtx);
6140 mmin = INTVAL (mmin_rtx);
6141 mmax = INTVAL (mmax_rtx);
6142 if (sign)
6144 unsigned int sign_copies
6145 = num_sign_bit_copies (trueop0, int_mode);
6147 mmin >>= (sign_copies - 1);
6148 mmax >>= (sign_copies - 1);
6152 switch (code)
6154 /* x >= y is always true for y <= mmin, always false for y > mmax. */
6155 case GEU:
6156 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
6157 return const_true_rtx;
6158 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
6159 return const0_rtx;
6160 break;
6161 case GE:
6162 if (val <= mmin)
6163 return const_true_rtx;
6164 if (val > mmax)
6165 return const0_rtx;
6166 break;
6168 /* x <= y is always true for y >= mmax, always false for y < mmin. */
6169 case LEU:
6170 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
6171 return const_true_rtx;
6172 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
6173 return const0_rtx;
6174 break;
6175 case LE:
6176 if (val >= mmax)
6177 return const_true_rtx;
6178 if (val < mmin)
6179 return const0_rtx;
6180 break;
6182 case EQ:
6183 /* x == y is always false for y out of range. */
6184 if (val < mmin || val > mmax)
6185 return const0_rtx;
6186 break;
6188 /* x > y is always false for y >= mmax, always true for y < mmin. */
6189 case GTU:
6190 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
6191 return const0_rtx;
6192 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
6193 return const_true_rtx;
6194 break;
6195 case GT:
6196 if (val >= mmax)
6197 return const0_rtx;
6198 if (val < mmin)
6199 return const_true_rtx;
6200 break;
6202 /* x < y is always false for y <= mmin, always true for y > mmax. */
6203 case LTU:
6204 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
6205 return const0_rtx;
6206 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
6207 return const_true_rtx;
6208 break;
6209 case LT:
6210 if (val <= mmin)
6211 return const0_rtx;
6212 if (val > mmax)
6213 return const_true_rtx;
6214 break;
6216 case NE:
6217 /* x != y is always true for y out of range. */
6218 if (val < mmin || val > mmax)
6219 return const_true_rtx;
6220 break;
6222 default:
6223 break;
6227 /* Optimize integer comparisons with zero. */
6228 if (is_a <scalar_int_mode> (mode, &int_mode)
6229 && trueop1 == const0_rtx
6230 && !side_effects_p (trueop0))
6232 /* Some addresses are known to be nonzero. We don't know
6233 their sign, but equality comparisons are known. */
6234 if (nonzero_address_p (trueop0))
6236 if (code == EQ || code == LEU)
6237 return const0_rtx;
6238 if (code == NE || code == GTU)
6239 return const_true_rtx;
6242 /* See if the first operand is an IOR with a constant. If so, we
6243 may be able to determine the result of this comparison. */
6244 if (GET_CODE (op0) == IOR)
6246 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
6247 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
6249 int sign_bitnum = GET_MODE_PRECISION (int_mode) - 1;
6250 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
6251 && (UINTVAL (inner_const)
6252 & (HOST_WIDE_INT_1U
6253 << sign_bitnum)));
6255 switch (code)
6257 case EQ:
6258 case LEU:
6259 return const0_rtx;
6260 case NE:
6261 case GTU:
6262 return const_true_rtx;
6263 case LT:
6264 case LE:
6265 if (has_sign)
6266 return const_true_rtx;
6267 break;
6268 case GT:
6269 case GE:
6270 if (has_sign)
6271 return const0_rtx;
6272 break;
6273 default:
6274 break;
6280 /* Optimize comparison of ABS with zero. */
6281 if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
6282 && (GET_CODE (trueop0) == ABS
6283 || (GET_CODE (trueop0) == FLOAT_EXTEND
6284 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
6286 switch (code)
6288 case LT:
6289 /* Optimize abs(x) < 0.0. */
6290 if (!INTEGRAL_MODE_P (mode) && !HONOR_SNANS (mode))
6291 return const0_rtx;
6292 break;
6294 case GE:
6295 /* Optimize abs(x) >= 0.0. */
6296 if (!INTEGRAL_MODE_P (mode) && !HONOR_NANS (mode))
6297 return const_true_rtx;
6298 break;
6300 case UNGE:
6301 /* Optimize ! (abs(x) < 0.0). */
6302 return const_true_rtx;
6304 default:
6305 break;
6309 return 0;
6312 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
6313 where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
6314 or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
6315 can be simplified to that or NULL_RTX if not.
6316 Assume X is compared against zero with CMP_CODE and the true
6317 arm is TRUE_VAL and the false arm is FALSE_VAL. */
6320 simplify_context::simplify_cond_clz_ctz (rtx x, rtx_code cmp_code,
6321 rtx true_val, rtx false_val)
6323 if (cmp_code != EQ && cmp_code != NE)
6324 return NULL_RTX;
6326 /* Result on X == 0 and X !=0 respectively. */
6327 rtx on_zero, on_nonzero;
6328 if (cmp_code == EQ)
6330 on_zero = true_val;
6331 on_nonzero = false_val;
6333 else
6335 on_zero = false_val;
6336 on_nonzero = true_val;
6339 rtx_code op_code = GET_CODE (on_nonzero);
6340 if ((op_code != CLZ && op_code != CTZ)
6341 || !rtx_equal_p (XEXP (on_nonzero, 0), x)
6342 || !CONST_INT_P (on_zero))
6343 return NULL_RTX;
6345 HOST_WIDE_INT op_val;
6346 scalar_int_mode mode ATTRIBUTE_UNUSED
6347 = as_a <scalar_int_mode> (GET_MODE (XEXP (on_nonzero, 0)));
6348 if (((op_code == CLZ && CLZ_DEFINED_VALUE_AT_ZERO (mode, op_val))
6349 || (op_code == CTZ && CTZ_DEFINED_VALUE_AT_ZERO (mode, op_val)))
6350 && op_val == INTVAL (on_zero))
6351 return on_nonzero;
6353 return NULL_RTX;
6356 /* Try to simplify X given that it appears within operand OP of a
6357 VEC_MERGE operation whose mask is MASK. X need not use the same
6358 vector mode as the VEC_MERGE, but it must have the same number of
6359 elements.
6361 Return the simplified X on success, otherwise return NULL_RTX. */
6364 simplify_context::simplify_merge_mask (rtx x, rtx mask, int op)
6366 gcc_assert (VECTOR_MODE_P (GET_MODE (x)));
6367 poly_uint64 nunits = GET_MODE_NUNITS (GET_MODE (x));
6368 if (GET_CODE (x) == VEC_MERGE && rtx_equal_p (XEXP (x, 2), mask))
6370 if (side_effects_p (XEXP (x, 1 - op)))
6371 return NULL_RTX;
6373 return XEXP (x, op);
6375 if (UNARY_P (x)
6376 && VECTOR_MODE_P (GET_MODE (XEXP (x, 0)))
6377 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 0))), nunits))
6379 rtx top0 = simplify_merge_mask (XEXP (x, 0), mask, op);
6380 if (top0)
6381 return simplify_gen_unary (GET_CODE (x), GET_MODE (x), top0,
6382 GET_MODE (XEXP (x, 0)));
6384 if (BINARY_P (x)
6385 && VECTOR_MODE_P (GET_MODE (XEXP (x, 0)))
6386 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 0))), nunits)
6387 && VECTOR_MODE_P (GET_MODE (XEXP (x, 1)))
6388 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 1))), nunits))
6390 rtx top0 = simplify_merge_mask (XEXP (x, 0), mask, op);
6391 rtx top1 = simplify_merge_mask (XEXP (x, 1), mask, op);
6392 if (top0 || top1)
6394 if (COMPARISON_P (x))
6395 return simplify_gen_relational (GET_CODE (x), GET_MODE (x),
6396 GET_MODE (XEXP (x, 0)) != VOIDmode
6397 ? GET_MODE (XEXP (x, 0))
6398 : GET_MODE (XEXP (x, 1)),
6399 top0 ? top0 : XEXP (x, 0),
6400 top1 ? top1 : XEXP (x, 1));
6401 else
6402 return simplify_gen_binary (GET_CODE (x), GET_MODE (x),
6403 top0 ? top0 : XEXP (x, 0),
6404 top1 ? top1 : XEXP (x, 1));
6407 if (GET_RTX_CLASS (GET_CODE (x)) == RTX_TERNARY
6408 && VECTOR_MODE_P (GET_MODE (XEXP (x, 0)))
6409 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 0))), nunits)
6410 && VECTOR_MODE_P (GET_MODE (XEXP (x, 1)))
6411 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 1))), nunits)
6412 && VECTOR_MODE_P (GET_MODE (XEXP (x, 2)))
6413 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 2))), nunits))
6415 rtx top0 = simplify_merge_mask (XEXP (x, 0), mask, op);
6416 rtx top1 = simplify_merge_mask (XEXP (x, 1), mask, op);
6417 rtx top2 = simplify_merge_mask (XEXP (x, 2), mask, op);
6418 if (top0 || top1 || top2)
6419 return simplify_gen_ternary (GET_CODE (x), GET_MODE (x),
6420 GET_MODE (XEXP (x, 0)),
6421 top0 ? top0 : XEXP (x, 0),
6422 top1 ? top1 : XEXP (x, 1),
6423 top2 ? top2 : XEXP (x, 2));
6425 return NULL_RTX;
6429 /* Simplify CODE, an operation with result mode MODE and three operands,
6430 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
6431 a constant. Return 0 if no simplifications is possible. */
6434 simplify_context::simplify_ternary_operation (rtx_code code, machine_mode mode,
6435 machine_mode op0_mode,
6436 rtx op0, rtx op1, rtx op2)
6438 bool any_change = false;
6439 rtx tem, trueop2;
6440 scalar_int_mode int_mode, int_op0_mode;
6441 unsigned int n_elts;
6443 switch (code)
6445 case FMA:
6446 /* Simplify negations around the multiplication. */
6447 /* -a * -b + c => a * b + c. */
6448 if (GET_CODE (op0) == NEG)
6450 tem = simplify_unary_operation (NEG, mode, op1, mode);
6451 if (tem)
6452 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
6454 else if (GET_CODE (op1) == NEG)
6456 tem = simplify_unary_operation (NEG, mode, op0, mode);
6457 if (tem)
6458 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
6461 /* Canonicalize the two multiplication operands. */
6462 /* a * -b + c => -b * a + c. */
6463 if (swap_commutative_operands_p (op0, op1))
6464 std::swap (op0, op1), any_change = true;
6466 if (any_change)
6467 return gen_rtx_FMA (mode, op0, op1, op2);
6468 return NULL_RTX;
6470 case SIGN_EXTRACT:
6471 case ZERO_EXTRACT:
6472 if (CONST_INT_P (op0)
6473 && CONST_INT_P (op1)
6474 && CONST_INT_P (op2)
6475 && is_a <scalar_int_mode> (mode, &int_mode)
6476 && INTVAL (op1) + INTVAL (op2) <= GET_MODE_PRECISION (int_mode)
6477 && HWI_COMPUTABLE_MODE_P (int_mode))
6479 /* Extracting a bit-field from a constant */
6480 unsigned HOST_WIDE_INT val = UINTVAL (op0);
6481 HOST_WIDE_INT op1val = INTVAL (op1);
6482 HOST_WIDE_INT op2val = INTVAL (op2);
6483 if (!BITS_BIG_ENDIAN)
6484 val >>= op2val;
6485 else if (is_a <scalar_int_mode> (op0_mode, &int_op0_mode))
6486 val >>= GET_MODE_PRECISION (int_op0_mode) - op2val - op1val;
6487 else
6488 /* Not enough information to calculate the bit position. */
6489 break;
6491 if (HOST_BITS_PER_WIDE_INT != op1val)
6493 /* First zero-extend. */
6494 val &= (HOST_WIDE_INT_1U << op1val) - 1;
6495 /* If desired, propagate sign bit. */
6496 if (code == SIGN_EXTRACT
6497 && (val & (HOST_WIDE_INT_1U << (op1val - 1)))
6498 != 0)
6499 val |= ~ ((HOST_WIDE_INT_1U << op1val) - 1);
6502 return gen_int_mode (val, int_mode);
6504 break;
6506 case IF_THEN_ELSE:
6507 if (CONST_INT_P (op0))
6508 return op0 != const0_rtx ? op1 : op2;
6510 /* Convert c ? a : a into "a". */
6511 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
6512 return op1;
6514 /* Convert a != b ? a : b into "a". */
6515 if (GET_CODE (op0) == NE
6516 && ! side_effects_p (op0)
6517 && ! HONOR_NANS (mode)
6518 && ! HONOR_SIGNED_ZEROS (mode)
6519 && ((rtx_equal_p (XEXP (op0, 0), op1)
6520 && rtx_equal_p (XEXP (op0, 1), op2))
6521 || (rtx_equal_p (XEXP (op0, 0), op2)
6522 && rtx_equal_p (XEXP (op0, 1), op1))))
6523 return op1;
6525 /* Convert a == b ? a : b into "b". */
6526 if (GET_CODE (op0) == EQ
6527 && ! side_effects_p (op0)
6528 && ! HONOR_NANS (mode)
6529 && ! HONOR_SIGNED_ZEROS (mode)
6530 && ((rtx_equal_p (XEXP (op0, 0), op1)
6531 && rtx_equal_p (XEXP (op0, 1), op2))
6532 || (rtx_equal_p (XEXP (op0, 0), op2)
6533 && rtx_equal_p (XEXP (op0, 1), op1))))
6534 return op2;
6536 /* Convert (!c) != {0,...,0} ? a : b into
6537 c != {0,...,0} ? b : a for vector modes. */
6538 if (VECTOR_MODE_P (GET_MODE (op1))
6539 && GET_CODE (op0) == NE
6540 && GET_CODE (XEXP (op0, 0)) == NOT
6541 && GET_CODE (XEXP (op0, 1)) == CONST_VECTOR)
6543 rtx cv = XEXP (op0, 1);
6544 int nunits;
6545 bool ok = true;
6546 if (!CONST_VECTOR_NUNITS (cv).is_constant (&nunits))
6547 ok = false;
6548 else
6549 for (int i = 0; i < nunits; ++i)
6550 if (CONST_VECTOR_ELT (cv, i) != const0_rtx)
6552 ok = false;
6553 break;
6555 if (ok)
6557 rtx new_op0 = gen_rtx_NE (GET_MODE (op0),
6558 XEXP (XEXP (op0, 0), 0),
6559 XEXP (op0, 1));
6560 rtx retval = gen_rtx_IF_THEN_ELSE (mode, new_op0, op2, op1);
6561 return retval;
6565 /* Convert x == 0 ? N : clz (x) into clz (x) when
6566 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
6567 Similarly for ctz (x). */
6568 if (COMPARISON_P (op0) && !side_effects_p (op0)
6569 && XEXP (op0, 1) == const0_rtx)
6571 rtx simplified
6572 = simplify_cond_clz_ctz (XEXP (op0, 0), GET_CODE (op0),
6573 op1, op2);
6574 if (simplified)
6575 return simplified;
6578 if (COMPARISON_P (op0) && ! side_effects_p (op0))
6580 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
6581 ? GET_MODE (XEXP (op0, 1))
6582 : GET_MODE (XEXP (op0, 0)));
6583 rtx temp;
6585 /* Look for happy constants in op1 and op2. */
6586 if (CONST_INT_P (op1) && CONST_INT_P (op2))
6588 HOST_WIDE_INT t = INTVAL (op1);
6589 HOST_WIDE_INT f = INTVAL (op2);
6591 if (t == STORE_FLAG_VALUE && f == 0)
6592 code = GET_CODE (op0);
6593 else if (t == 0 && f == STORE_FLAG_VALUE)
6595 enum rtx_code tmp;
6596 tmp = reversed_comparison_code (op0, NULL);
6597 if (tmp == UNKNOWN)
6598 break;
6599 code = tmp;
6601 else
6602 break;
6604 return simplify_gen_relational (code, mode, cmp_mode,
6605 XEXP (op0, 0), XEXP (op0, 1));
6608 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
6609 cmp_mode, XEXP (op0, 0),
6610 XEXP (op0, 1));
6612 /* See if any simplifications were possible. */
6613 if (temp)
6615 if (CONST_INT_P (temp))
6616 return temp == const0_rtx ? op2 : op1;
6617 else if (temp)
6618 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
6621 break;
6623 case VEC_MERGE:
6624 gcc_assert (GET_MODE (op0) == mode);
6625 gcc_assert (GET_MODE (op1) == mode);
6626 gcc_assert (VECTOR_MODE_P (mode));
6627 trueop2 = avoid_constant_pool_reference (op2);
6628 if (CONST_INT_P (trueop2)
6629 && GET_MODE_NUNITS (mode).is_constant (&n_elts))
6631 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
6632 unsigned HOST_WIDE_INT mask;
6633 if (n_elts == HOST_BITS_PER_WIDE_INT)
6634 mask = -1;
6635 else
6636 mask = (HOST_WIDE_INT_1U << n_elts) - 1;
6638 if (!(sel & mask) && !side_effects_p (op0))
6639 return op1;
6640 if ((sel & mask) == mask && !side_effects_p (op1))
6641 return op0;
6643 rtx trueop0 = avoid_constant_pool_reference (op0);
6644 rtx trueop1 = avoid_constant_pool_reference (op1);
6645 if (GET_CODE (trueop0) == CONST_VECTOR
6646 && GET_CODE (trueop1) == CONST_VECTOR)
6648 rtvec v = rtvec_alloc (n_elts);
6649 unsigned int i;
6651 for (i = 0; i < n_elts; i++)
6652 RTVEC_ELT (v, i) = ((sel & (HOST_WIDE_INT_1U << i))
6653 ? CONST_VECTOR_ELT (trueop0, i)
6654 : CONST_VECTOR_ELT (trueop1, i));
6655 return gen_rtx_CONST_VECTOR (mode, v);
6658 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
6659 if no element from a appears in the result. */
6660 if (GET_CODE (op0) == VEC_MERGE)
6662 tem = avoid_constant_pool_reference (XEXP (op0, 2));
6663 if (CONST_INT_P (tem))
6665 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
6666 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
6667 return simplify_gen_ternary (code, mode, mode,
6668 XEXP (op0, 1), op1, op2);
6669 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
6670 return simplify_gen_ternary (code, mode, mode,
6671 XEXP (op0, 0), op1, op2);
6674 if (GET_CODE (op1) == VEC_MERGE)
6676 tem = avoid_constant_pool_reference (XEXP (op1, 2));
6677 if (CONST_INT_P (tem))
6679 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
6680 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
6681 return simplify_gen_ternary (code, mode, mode,
6682 op0, XEXP (op1, 1), op2);
6683 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
6684 return simplify_gen_ternary (code, mode, mode,
6685 op0, XEXP (op1, 0), op2);
6689 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
6690 with a. */
6691 if (GET_CODE (op0) == VEC_DUPLICATE
6692 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
6693 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
6694 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (op0, 0))), 1))
6696 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
6697 if (CONST_INT_P (tem) && CONST_INT_P (op2))
6699 if (XEXP (XEXP (op0, 0), 0) == op1
6700 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
6701 return op1;
6704 /* Replace (vec_merge (vec_duplicate (X)) (const_vector [A, B])
6705 (const_int N))
6706 with (vec_concat (X) (B)) if N == 1 or
6707 (vec_concat (A) (X)) if N == 2. */
6708 if (GET_CODE (op0) == VEC_DUPLICATE
6709 && GET_CODE (op1) == CONST_VECTOR
6710 && known_eq (CONST_VECTOR_NUNITS (op1), 2)
6711 && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
6712 && IN_RANGE (sel, 1, 2))
6714 rtx newop0 = XEXP (op0, 0);
6715 rtx newop1 = CONST_VECTOR_ELT (op1, 2 - sel);
6716 if (sel == 2)
6717 std::swap (newop0, newop1);
6718 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
6720 /* Replace (vec_merge (vec_duplicate x) (vec_concat (y) (z)) (const_int N))
6721 with (vec_concat x z) if N == 1, or (vec_concat y x) if N == 2.
6722 Only applies for vectors of two elements. */
6723 if (GET_CODE (op0) == VEC_DUPLICATE
6724 && GET_CODE (op1) == VEC_CONCAT
6725 && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
6726 && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
6727 && IN_RANGE (sel, 1, 2))
6729 rtx newop0 = XEXP (op0, 0);
6730 rtx newop1 = XEXP (op1, 2 - sel);
6731 rtx otherop = XEXP (op1, sel - 1);
6732 if (sel == 2)
6733 std::swap (newop0, newop1);
6734 /* Don't want to throw away the other part of the vec_concat if
6735 it has side-effects. */
6736 if (!side_effects_p (otherop))
6737 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
6740 /* Replace:
6742 (vec_merge:outer (vec_duplicate:outer x:inner)
6743 (subreg:outer y:inner 0)
6744 (const_int N))
6746 with (vec_concat:outer x:inner y:inner) if N == 1,
6747 or (vec_concat:outer y:inner x:inner) if N == 2.
6749 Implicitly, this means we have a paradoxical subreg, but such
6750 a check is cheap, so make it anyway.
6752 Only applies for vectors of two elements. */
6753 if (GET_CODE (op0) == VEC_DUPLICATE
6754 && GET_CODE (op1) == SUBREG
6755 && GET_MODE (op1) == GET_MODE (op0)
6756 && GET_MODE (SUBREG_REG (op1)) == GET_MODE (XEXP (op0, 0))
6757 && paradoxical_subreg_p (op1)
6758 && subreg_lowpart_p (op1)
6759 && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
6760 && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
6761 && IN_RANGE (sel, 1, 2))
6763 rtx newop0 = XEXP (op0, 0);
6764 rtx newop1 = SUBREG_REG (op1);
6765 if (sel == 2)
6766 std::swap (newop0, newop1);
6767 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
6770 /* Same as above but with switched operands:
6771 Replace (vec_merge:outer (subreg:outer x:inner 0)
6772 (vec_duplicate:outer y:inner)
6773 (const_int N))
6775 with (vec_concat:outer x:inner y:inner) if N == 1,
6776 or (vec_concat:outer y:inner x:inner) if N == 2. */
6777 if (GET_CODE (op1) == VEC_DUPLICATE
6778 && GET_CODE (op0) == SUBREG
6779 && GET_MODE (op0) == GET_MODE (op1)
6780 && GET_MODE (SUBREG_REG (op0)) == GET_MODE (XEXP (op1, 0))
6781 && paradoxical_subreg_p (op0)
6782 && subreg_lowpart_p (op0)
6783 && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
6784 && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
6785 && IN_RANGE (sel, 1, 2))
6787 rtx newop0 = SUBREG_REG (op0);
6788 rtx newop1 = XEXP (op1, 0);
6789 if (sel == 2)
6790 std::swap (newop0, newop1);
6791 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
6794 /* Replace (vec_merge (vec_duplicate x) (vec_duplicate y)
6795 (const_int n))
6796 with (vec_concat x y) or (vec_concat y x) depending on value
6797 of N. */
6798 if (GET_CODE (op0) == VEC_DUPLICATE
6799 && GET_CODE (op1) == VEC_DUPLICATE
6800 && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
6801 && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
6802 && IN_RANGE (sel, 1, 2))
6804 rtx newop0 = XEXP (op0, 0);
6805 rtx newop1 = XEXP (op1, 0);
6806 if (sel == 2)
6807 std::swap (newop0, newop1);
6809 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
6813 if (rtx_equal_p (op0, op1)
6814 && !side_effects_p (op2) && !side_effects_p (op1))
6815 return op0;
6817 if (!side_effects_p (op2))
6819 rtx top0
6820 = may_trap_p (op0) ? NULL_RTX : simplify_merge_mask (op0, op2, 0);
6821 rtx top1
6822 = may_trap_p (op1) ? NULL_RTX : simplify_merge_mask (op1, op2, 1);
6823 if (top0 || top1)
6824 return simplify_gen_ternary (code, mode, mode,
6825 top0 ? top0 : op0,
6826 top1 ? top1 : op1, op2);
6829 break;
6831 default:
6832 gcc_unreachable ();
6835 return 0;
6838 /* Try to calculate NUM_BYTES bytes of the target memory image of X,
6839 starting at byte FIRST_BYTE. Return true on success and add the
6840 bytes to BYTES, such that each byte has BITS_PER_UNIT bits and such
6841 that the bytes follow target memory order. Leave BYTES unmodified
6842 on failure.
6844 MODE is the mode of X. The caller must reserve NUM_BYTES bytes in
6845 BYTES before calling this function. */
6847 bool
6848 native_encode_rtx (machine_mode mode, rtx x, vec<target_unit> &bytes,
6849 unsigned int first_byte, unsigned int num_bytes)
6851 /* Check the mode is sensible. */
6852 gcc_assert (GET_MODE (x) == VOIDmode
6853 ? is_a <scalar_int_mode> (mode)
6854 : mode == GET_MODE (x));
6856 if (GET_CODE (x) == CONST_VECTOR)
6858 /* CONST_VECTOR_ELT follows target memory order, so no shuffling
6859 is necessary. The only complication is that MODE_VECTOR_BOOL
6860 vectors can have several elements per byte. */
6861 unsigned int elt_bits = vector_element_size (GET_MODE_BITSIZE (mode),
6862 GET_MODE_NUNITS (mode));
6863 unsigned int elt = first_byte * BITS_PER_UNIT / elt_bits;
6864 if (elt_bits < BITS_PER_UNIT)
6866 /* This is the only case in which elements can be smaller than
6867 a byte. */
6868 gcc_assert (GET_MODE_CLASS (mode) == MODE_VECTOR_BOOL);
6869 for (unsigned int i = 0; i < num_bytes; ++i)
6871 target_unit value = 0;
6872 for (unsigned int j = 0; j < BITS_PER_UNIT; j += elt_bits)
6874 value |= (INTVAL (CONST_VECTOR_ELT (x, elt)) & 1) << j;
6875 elt += 1;
6877 bytes.quick_push (value);
6879 return true;
6882 unsigned int start = bytes.length ();
6883 unsigned int elt_bytes = GET_MODE_UNIT_SIZE (mode);
6884 /* Make FIRST_BYTE relative to ELT. */
6885 first_byte %= elt_bytes;
6886 while (num_bytes > 0)
6888 /* Work out how many bytes we want from element ELT. */
6889 unsigned int chunk_bytes = MIN (num_bytes, elt_bytes - first_byte);
6890 if (!native_encode_rtx (GET_MODE_INNER (mode),
6891 CONST_VECTOR_ELT (x, elt), bytes,
6892 first_byte, chunk_bytes))
6894 bytes.truncate (start);
6895 return false;
6897 elt += 1;
6898 first_byte = 0;
6899 num_bytes -= chunk_bytes;
6901 return true;
6904 /* All subsequent cases are limited to scalars. */
6905 scalar_mode smode;
6906 if (!is_a <scalar_mode> (mode, &smode))
6907 return false;
6909 /* Make sure that the region is in range. */
6910 unsigned int end_byte = first_byte + num_bytes;
6911 unsigned int mode_bytes = GET_MODE_SIZE (smode);
6912 gcc_assert (end_byte <= mode_bytes);
6914 if (CONST_SCALAR_INT_P (x))
6916 /* The target memory layout is affected by both BYTES_BIG_ENDIAN
6917 and WORDS_BIG_ENDIAN. Use the subreg machinery to get the lsb
6918 position of each byte. */
6919 rtx_mode_t value (x, smode);
6920 wide_int_ref value_wi (value);
6921 for (unsigned int byte = first_byte; byte < end_byte; ++byte)
6923 /* Always constant because the inputs are. */
6924 unsigned int lsb
6925 = subreg_size_lsb (1, mode_bytes, byte).to_constant ();
6926 /* Operate directly on the encoding rather than using
6927 wi::extract_uhwi, so that we preserve the sign or zero
6928 extension for modes that are not a whole number of bits in
6929 size. (Zero extension is only used for the combination of
6930 innermode == BImode && STORE_FLAG_VALUE == 1). */
6931 unsigned int elt = lsb / HOST_BITS_PER_WIDE_INT;
6932 unsigned int shift = lsb % HOST_BITS_PER_WIDE_INT;
6933 unsigned HOST_WIDE_INT uhwi = value_wi.elt (elt);
6934 bytes.quick_push (uhwi >> shift);
6936 return true;
6939 if (CONST_DOUBLE_P (x))
6941 /* real_to_target produces an array of integers in target memory order.
6942 All integers before the last one have 32 bits; the last one may
6943 have 32 bits or fewer, depending on whether the mode bitsize
6944 is divisible by 32. Each of these integers is then laid out
6945 in target memory as any other integer would be. */
6946 long el32[MAX_BITSIZE_MODE_ANY_MODE / 32];
6947 real_to_target (el32, CONST_DOUBLE_REAL_VALUE (x), smode);
6949 /* The (maximum) number of target bytes per element of el32. */
6950 unsigned int bytes_per_el32 = 32 / BITS_PER_UNIT;
6951 gcc_assert (bytes_per_el32 != 0);
6953 /* Build up the integers in a similar way to the CONST_SCALAR_INT_P
6954 handling above. */
6955 for (unsigned int byte = first_byte; byte < end_byte; ++byte)
6957 unsigned int index = byte / bytes_per_el32;
6958 unsigned int subbyte = byte % bytes_per_el32;
6959 unsigned int int_bytes = MIN (bytes_per_el32,
6960 mode_bytes - index * bytes_per_el32);
6961 /* Always constant because the inputs are. */
6962 unsigned int lsb
6963 = subreg_size_lsb (1, int_bytes, subbyte).to_constant ();
6964 bytes.quick_push ((unsigned long) el32[index] >> lsb);
6966 return true;
6969 if (GET_CODE (x) == CONST_FIXED)
6971 for (unsigned int byte = first_byte; byte < end_byte; ++byte)
6973 /* Always constant because the inputs are. */
6974 unsigned int lsb
6975 = subreg_size_lsb (1, mode_bytes, byte).to_constant ();
6976 unsigned HOST_WIDE_INT piece = CONST_FIXED_VALUE_LOW (x);
6977 if (lsb >= HOST_BITS_PER_WIDE_INT)
6979 lsb -= HOST_BITS_PER_WIDE_INT;
6980 piece = CONST_FIXED_VALUE_HIGH (x);
6982 bytes.quick_push (piece >> lsb);
6984 return true;
6987 return false;
6990 /* Read a vector of mode MODE from the target memory image given by BYTES,
6991 starting at byte FIRST_BYTE. The vector is known to be encodable using
6992 NPATTERNS interleaved patterns with NELTS_PER_PATTERN elements each,
6993 and BYTES is known to have enough bytes to supply NPATTERNS *
6994 NELTS_PER_PATTERN vector elements. Each element of BYTES contains
6995 BITS_PER_UNIT bits and the bytes are in target memory order.
6997 Return the vector on success, otherwise return NULL_RTX. */
7000 native_decode_vector_rtx (machine_mode mode, const vec<target_unit> &bytes,
7001 unsigned int first_byte, unsigned int npatterns,
7002 unsigned int nelts_per_pattern)
7004 rtx_vector_builder builder (mode, npatterns, nelts_per_pattern);
7006 unsigned int elt_bits = vector_element_size (GET_MODE_BITSIZE (mode),
7007 GET_MODE_NUNITS (mode));
7008 if (elt_bits < BITS_PER_UNIT)
7010 /* This is the only case in which elements can be smaller than a byte.
7011 Element 0 is always in the lsb of the containing byte. */
7012 gcc_assert (GET_MODE_CLASS (mode) == MODE_VECTOR_BOOL);
7013 for (unsigned int i = 0; i < builder.encoded_nelts (); ++i)
7015 unsigned int bit_index = first_byte * BITS_PER_UNIT + i * elt_bits;
7016 unsigned int byte_index = bit_index / BITS_PER_UNIT;
7017 unsigned int lsb = bit_index % BITS_PER_UNIT;
7018 builder.quick_push (bytes[byte_index] & (1 << lsb)
7019 ? CONST1_RTX (BImode)
7020 : CONST0_RTX (BImode));
7023 else
7025 for (unsigned int i = 0; i < builder.encoded_nelts (); ++i)
7027 rtx x = native_decode_rtx (GET_MODE_INNER (mode), bytes, first_byte);
7028 if (!x)
7029 return NULL_RTX;
7030 builder.quick_push (x);
7031 first_byte += elt_bits / BITS_PER_UNIT;
7034 return builder.build ();
7037 /* Read an rtx of mode MODE from the target memory image given by BYTES,
7038 starting at byte FIRST_BYTE. Each element of BYTES contains BITS_PER_UNIT
7039 bits and the bytes are in target memory order. The image has enough
7040 values to specify all bytes of MODE.
7042 Return the rtx on success, otherwise return NULL_RTX. */
7045 native_decode_rtx (machine_mode mode, const vec<target_unit> &bytes,
7046 unsigned int first_byte)
7048 if (VECTOR_MODE_P (mode))
7050 /* If we know at compile time how many elements there are,
7051 pull each element directly from BYTES. */
7052 unsigned int nelts;
7053 if (GET_MODE_NUNITS (mode).is_constant (&nelts))
7054 return native_decode_vector_rtx (mode, bytes, first_byte, nelts, 1);
7055 return NULL_RTX;
7058 scalar_int_mode imode;
7059 if (is_a <scalar_int_mode> (mode, &imode)
7060 && GET_MODE_PRECISION (imode) <= MAX_BITSIZE_MODE_ANY_INT)
7062 /* Pull the bytes msb first, so that we can use simple
7063 shift-and-insert wide_int operations. */
7064 unsigned int size = GET_MODE_SIZE (imode);
7065 wide_int result (wi::zero (GET_MODE_PRECISION (imode)));
7066 for (unsigned int i = 0; i < size; ++i)
7068 unsigned int lsb = (size - i - 1) * BITS_PER_UNIT;
7069 /* Always constant because the inputs are. */
7070 unsigned int subbyte
7071 = subreg_size_offset_from_lsb (1, size, lsb).to_constant ();
7072 result <<= BITS_PER_UNIT;
7073 result |= bytes[first_byte + subbyte];
7075 return immed_wide_int_const (result, imode);
7078 scalar_float_mode fmode;
7079 if (is_a <scalar_float_mode> (mode, &fmode))
7081 /* We need to build an array of integers in target memory order.
7082 All integers before the last one have 32 bits; the last one may
7083 have 32 bits or fewer, depending on whether the mode bitsize
7084 is divisible by 32. */
7085 long el32[MAX_BITSIZE_MODE_ANY_MODE / 32];
7086 unsigned int num_el32 = CEIL (GET_MODE_BITSIZE (fmode), 32);
7087 memset (el32, 0, num_el32 * sizeof (long));
7089 /* The (maximum) number of target bytes per element of el32. */
7090 unsigned int bytes_per_el32 = 32 / BITS_PER_UNIT;
7091 gcc_assert (bytes_per_el32 != 0);
7093 unsigned int mode_bytes = GET_MODE_SIZE (fmode);
7094 for (unsigned int byte = 0; byte < mode_bytes; ++byte)
7096 unsigned int index = byte / bytes_per_el32;
7097 unsigned int subbyte = byte % bytes_per_el32;
7098 unsigned int int_bytes = MIN (bytes_per_el32,
7099 mode_bytes - index * bytes_per_el32);
7100 /* Always constant because the inputs are. */
7101 unsigned int lsb
7102 = subreg_size_lsb (1, int_bytes, subbyte).to_constant ();
7103 el32[index] |= (unsigned long) bytes[first_byte + byte] << lsb;
7105 REAL_VALUE_TYPE r;
7106 real_from_target (&r, el32, fmode);
7107 return const_double_from_real_value (r, fmode);
7110 if (ALL_SCALAR_FIXED_POINT_MODE_P (mode))
7112 scalar_mode smode = as_a <scalar_mode> (mode);
7113 FIXED_VALUE_TYPE f;
7114 f.data.low = 0;
7115 f.data.high = 0;
7116 f.mode = smode;
7118 unsigned int mode_bytes = GET_MODE_SIZE (smode);
7119 for (unsigned int byte = 0; byte < mode_bytes; ++byte)
7121 /* Always constant because the inputs are. */
7122 unsigned int lsb
7123 = subreg_size_lsb (1, mode_bytes, byte).to_constant ();
7124 unsigned HOST_WIDE_INT unit = bytes[first_byte + byte];
7125 if (lsb >= HOST_BITS_PER_WIDE_INT)
7126 f.data.high |= unit << (lsb - HOST_BITS_PER_WIDE_INT);
7127 else
7128 f.data.low |= unit << lsb;
7130 return CONST_FIXED_FROM_FIXED_VALUE (f, mode);
7133 return NULL_RTX;
7136 /* Simplify a byte offset BYTE into CONST_VECTOR X. The main purpose
7137 is to convert a runtime BYTE value into a constant one. */
7139 static poly_uint64
7140 simplify_const_vector_byte_offset (rtx x, poly_uint64 byte)
7142 /* Cope with MODE_VECTOR_BOOL by operating on bits rather than bytes. */
7143 machine_mode mode = GET_MODE (x);
7144 unsigned int elt_bits = vector_element_size (GET_MODE_BITSIZE (mode),
7145 GET_MODE_NUNITS (mode));
7146 /* The number of bits needed to encode one element from each pattern. */
7147 unsigned int sequence_bits = CONST_VECTOR_NPATTERNS (x) * elt_bits;
7149 /* Identify the start point in terms of a sequence number and a byte offset
7150 within that sequence. */
7151 poly_uint64 first_sequence;
7152 unsigned HOST_WIDE_INT subbit;
7153 if (can_div_trunc_p (byte * BITS_PER_UNIT, sequence_bits,
7154 &first_sequence, &subbit))
7156 unsigned int nelts_per_pattern = CONST_VECTOR_NELTS_PER_PATTERN (x);
7157 if (nelts_per_pattern == 1)
7158 /* This is a duplicated vector, so the value of FIRST_SEQUENCE
7159 doesn't matter. */
7160 byte = subbit / BITS_PER_UNIT;
7161 else if (nelts_per_pattern == 2 && known_gt (first_sequence, 0U))
7163 /* The subreg drops the first element from each pattern and
7164 only uses the second element. Find the first sequence
7165 that starts on a byte boundary. */
7166 subbit += least_common_multiple (sequence_bits, BITS_PER_UNIT);
7167 byte = subbit / BITS_PER_UNIT;
7170 return byte;
7173 /* Subroutine of simplify_subreg in which:
7175 - X is known to be a CONST_VECTOR
7176 - OUTERMODE is known to be a vector mode
7178 Try to handle the subreg by operating on the CONST_VECTOR encoding
7179 rather than on each individual element of the CONST_VECTOR.
7181 Return the simplified subreg on success, otherwise return NULL_RTX. */
7183 static rtx
7184 simplify_const_vector_subreg (machine_mode outermode, rtx x,
7185 machine_mode innermode, unsigned int first_byte)
7187 /* Paradoxical subregs of vectors have dubious semantics. */
7188 if (paradoxical_subreg_p (outermode, innermode))
7189 return NULL_RTX;
7191 /* We can only preserve the semantics of a stepped pattern if the new
7192 vector element is the same as the original one. */
7193 if (CONST_VECTOR_STEPPED_P (x)
7194 && GET_MODE_INNER (outermode) != GET_MODE_INNER (innermode))
7195 return NULL_RTX;
7197 /* Cope with MODE_VECTOR_BOOL by operating on bits rather than bytes. */
7198 unsigned int x_elt_bits
7199 = vector_element_size (GET_MODE_BITSIZE (innermode),
7200 GET_MODE_NUNITS (innermode));
7201 unsigned int out_elt_bits
7202 = vector_element_size (GET_MODE_BITSIZE (outermode),
7203 GET_MODE_NUNITS (outermode));
7205 /* The number of bits needed to encode one element from every pattern
7206 of the original vector. */
7207 unsigned int x_sequence_bits = CONST_VECTOR_NPATTERNS (x) * x_elt_bits;
7209 /* The number of bits needed to encode one element from every pattern
7210 of the result. */
7211 unsigned int out_sequence_bits
7212 = least_common_multiple (x_sequence_bits, out_elt_bits);
7214 /* Work out the number of interleaved patterns in the output vector
7215 and the number of encoded elements per pattern. */
7216 unsigned int out_npatterns = out_sequence_bits / out_elt_bits;
7217 unsigned int nelts_per_pattern = CONST_VECTOR_NELTS_PER_PATTERN (x);
7219 /* The encoding scheme requires the number of elements to be a multiple
7220 of the number of patterns, so that each pattern appears at least once
7221 and so that the same number of elements appear from each pattern. */
7222 bool ok_p = multiple_p (GET_MODE_NUNITS (outermode), out_npatterns);
7223 unsigned int const_nunits;
7224 if (GET_MODE_NUNITS (outermode).is_constant (&const_nunits)
7225 && (!ok_p || out_npatterns * nelts_per_pattern > const_nunits))
7227 /* Either the encoding is invalid, or applying it would give us
7228 more elements than we need. Just encode each element directly. */
7229 out_npatterns = const_nunits;
7230 nelts_per_pattern = 1;
7232 else if (!ok_p)
7233 return NULL_RTX;
7235 /* Get enough bytes of X to form the new encoding. */
7236 unsigned int buffer_bits = out_npatterns * nelts_per_pattern * out_elt_bits;
7237 unsigned int buffer_bytes = CEIL (buffer_bits, BITS_PER_UNIT);
7238 auto_vec<target_unit, 128> buffer (buffer_bytes);
7239 if (!native_encode_rtx (innermode, x, buffer, first_byte, buffer_bytes))
7240 return NULL_RTX;
7242 /* Reencode the bytes as OUTERMODE. */
7243 return native_decode_vector_rtx (outermode, buffer, 0, out_npatterns,
7244 nelts_per_pattern);
7247 /* Try to simplify a subreg of a constant by encoding the subreg region
7248 as a sequence of target bytes and reading them back in the new mode.
7249 Return the new value on success, otherwise return null.
7251 The subreg has outer mode OUTERMODE, inner mode INNERMODE, inner value X
7252 and byte offset FIRST_BYTE. */
7254 static rtx
7255 simplify_immed_subreg (fixed_size_mode outermode, rtx x,
7256 machine_mode innermode, unsigned int first_byte)
7258 unsigned int buffer_bytes = GET_MODE_SIZE (outermode);
7259 auto_vec<target_unit, 128> buffer (buffer_bytes);
7261 /* Some ports misuse CCmode. */
7262 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (x))
7263 return x;
7265 /* Paradoxical subregs read undefined values for bytes outside of the
7266 inner value. However, we have traditionally always sign-extended
7267 integer constants and zero-extended others. */
7268 unsigned int inner_bytes = buffer_bytes;
7269 if (paradoxical_subreg_p (outermode, innermode))
7271 if (!GET_MODE_SIZE (innermode).is_constant (&inner_bytes))
7272 return NULL_RTX;
7274 target_unit filler = 0;
7275 if (CONST_SCALAR_INT_P (x) && wi::neg_p (rtx_mode_t (x, innermode)))
7276 filler = -1;
7278 /* Add any leading bytes due to big-endian layout. The number of
7279 bytes must be constant because both modes have constant size. */
7280 unsigned int leading_bytes
7281 = -byte_lowpart_offset (outermode, innermode).to_constant ();
7282 for (unsigned int i = 0; i < leading_bytes; ++i)
7283 buffer.quick_push (filler);
7285 if (!native_encode_rtx (innermode, x, buffer, first_byte, inner_bytes))
7286 return NULL_RTX;
7288 /* Add any trailing bytes due to little-endian layout. */
7289 while (buffer.length () < buffer_bytes)
7290 buffer.quick_push (filler);
7292 else if (!native_encode_rtx (innermode, x, buffer, first_byte, inner_bytes))
7293 return NULL_RTX;
7294 rtx ret = native_decode_rtx (outermode, buffer, 0);
7295 if (ret && MODE_COMPOSITE_P (outermode))
7297 auto_vec<target_unit, 128> buffer2 (buffer_bytes);
7298 if (!native_encode_rtx (outermode, ret, buffer2, 0, buffer_bytes))
7299 return NULL_RTX;
7300 for (unsigned int i = 0; i < buffer_bytes; ++i)
7301 if (buffer[i] != buffer2[i])
7302 return NULL_RTX;
7304 return ret;
7307 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
7308 Return 0 if no simplifications are possible. */
7310 simplify_context::simplify_subreg (machine_mode outermode, rtx op,
7311 machine_mode innermode, poly_uint64 byte)
7313 /* Little bit of sanity checking. */
7314 gcc_assert (innermode != VOIDmode);
7315 gcc_assert (outermode != VOIDmode);
7316 gcc_assert (innermode != BLKmode);
7317 gcc_assert (outermode != BLKmode);
7319 gcc_assert (GET_MODE (op) == innermode
7320 || GET_MODE (op) == VOIDmode);
7322 poly_uint64 outersize = GET_MODE_SIZE (outermode);
7323 if (!multiple_p (byte, outersize))
7324 return NULL_RTX;
7326 poly_uint64 innersize = GET_MODE_SIZE (innermode);
7327 if (maybe_ge (byte, innersize))
7328 return NULL_RTX;
7330 if (outermode == innermode && known_eq (byte, 0U))
7331 return op;
7333 if (GET_CODE (op) == CONST_VECTOR)
7334 byte = simplify_const_vector_byte_offset (op, byte);
7336 if (multiple_p (byte, GET_MODE_UNIT_SIZE (innermode)))
7338 rtx elt;
7340 if (VECTOR_MODE_P (outermode)
7341 && GET_MODE_INNER (outermode) == GET_MODE_INNER (innermode)
7342 && vec_duplicate_p (op, &elt))
7343 return gen_vec_duplicate (outermode, elt);
7345 if (outermode == GET_MODE_INNER (innermode)
7346 && vec_duplicate_p (op, &elt))
7347 return elt;
7350 if (CONST_SCALAR_INT_P (op)
7351 || CONST_DOUBLE_AS_FLOAT_P (op)
7352 || CONST_FIXED_P (op)
7353 || GET_CODE (op) == CONST_VECTOR)
7355 unsigned HOST_WIDE_INT cbyte;
7356 if (byte.is_constant (&cbyte))
7358 if (GET_CODE (op) == CONST_VECTOR && VECTOR_MODE_P (outermode))
7360 rtx tmp = simplify_const_vector_subreg (outermode, op,
7361 innermode, cbyte);
7362 if (tmp)
7363 return tmp;
7366 fixed_size_mode fs_outermode;
7367 if (is_a <fixed_size_mode> (outermode, &fs_outermode))
7368 return simplify_immed_subreg (fs_outermode, op, innermode, cbyte);
7372 /* Changing mode twice with SUBREG => just change it once,
7373 or not at all if changing back op starting mode. */
7374 if (GET_CODE (op) == SUBREG)
7376 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
7377 poly_uint64 innermostsize = GET_MODE_SIZE (innermostmode);
7378 rtx newx;
7380 if (outermode == innermostmode
7381 && known_eq (byte, 0U)
7382 && known_eq (SUBREG_BYTE (op), 0))
7383 return SUBREG_REG (op);
7385 /* Work out the memory offset of the final OUTERMODE value relative
7386 to the inner value of OP. */
7387 poly_int64 mem_offset = subreg_memory_offset (outermode,
7388 innermode, byte);
7389 poly_int64 op_mem_offset = subreg_memory_offset (op);
7390 poly_int64 final_offset = mem_offset + op_mem_offset;
7392 /* See whether resulting subreg will be paradoxical. */
7393 if (!paradoxical_subreg_p (outermode, innermostmode))
7395 /* Bail out in case resulting subreg would be incorrect. */
7396 if (maybe_lt (final_offset, 0)
7397 || maybe_ge (poly_uint64 (final_offset), innermostsize)
7398 || !multiple_p (final_offset, outersize))
7399 return NULL_RTX;
7401 else
7403 poly_int64 required_offset = subreg_memory_offset (outermode,
7404 innermostmode, 0);
7405 if (maybe_ne (final_offset, required_offset))
7406 return NULL_RTX;
7407 /* Paradoxical subregs always have byte offset 0. */
7408 final_offset = 0;
7411 /* Recurse for further possible simplifications. */
7412 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
7413 final_offset);
7414 if (newx)
7415 return newx;
7416 if (validate_subreg (outermode, innermostmode,
7417 SUBREG_REG (op), final_offset))
7419 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
7420 if (SUBREG_PROMOTED_VAR_P (op)
7421 && SUBREG_PROMOTED_SIGN (op) >= 0
7422 && GET_MODE_CLASS (outermode) == MODE_INT
7423 && known_ge (outersize, innersize)
7424 && known_le (outersize, innermostsize)
7425 && subreg_lowpart_p (newx))
7427 SUBREG_PROMOTED_VAR_P (newx) = 1;
7428 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
7430 return newx;
7432 return NULL_RTX;
7435 /* SUBREG of a hard register => just change the register number
7436 and/or mode. If the hard register is not valid in that mode,
7437 suppress this simplification. If the hard register is the stack,
7438 frame, or argument pointer, leave this as a SUBREG. */
7440 if (REG_P (op) && HARD_REGISTER_P (op))
7442 unsigned int regno, final_regno;
7444 regno = REGNO (op);
7445 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
7446 if (HARD_REGISTER_NUM_P (final_regno))
7448 rtx x = gen_rtx_REG_offset (op, outermode, final_regno,
7449 subreg_memory_offset (outermode,
7450 innermode, byte));
7452 /* Propagate original regno. We don't have any way to specify
7453 the offset inside original regno, so do so only for lowpart.
7454 The information is used only by alias analysis that cannot
7455 grog partial register anyway. */
7457 if (known_eq (subreg_lowpart_offset (outermode, innermode), byte))
7458 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
7459 return x;
7463 /* If we have a SUBREG of a register that we are replacing and we are
7464 replacing it with a MEM, make a new MEM and try replacing the
7465 SUBREG with it. Don't do this if the MEM has a mode-dependent address
7466 or if we would be widening it. */
7468 if (MEM_P (op)
7469 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
7470 /* Allow splitting of volatile memory references in case we don't
7471 have instruction to move the whole thing. */
7472 && (! MEM_VOLATILE_P (op)
7473 || ! have_insn_for (SET, innermode))
7474 && !(STRICT_ALIGNMENT && MEM_ALIGN (op) < GET_MODE_ALIGNMENT (outermode))
7475 && known_le (outersize, innersize))
7476 return adjust_address_nv (op, outermode, byte);
7478 /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
7479 of two parts. */
7480 if (GET_CODE (op) == CONCAT
7481 || GET_CODE (op) == VEC_CONCAT)
7483 poly_uint64 final_offset;
7484 rtx part, res;
7486 machine_mode part_mode = GET_MODE (XEXP (op, 0));
7487 if (part_mode == VOIDmode)
7488 part_mode = GET_MODE_INNER (GET_MODE (op));
7489 poly_uint64 part_size = GET_MODE_SIZE (part_mode);
7490 if (known_lt (byte, part_size))
7492 part = XEXP (op, 0);
7493 final_offset = byte;
7495 else if (known_ge (byte, part_size))
7497 part = XEXP (op, 1);
7498 final_offset = byte - part_size;
7500 else
7501 return NULL_RTX;
7503 if (maybe_gt (final_offset + outersize, part_size))
7504 return NULL_RTX;
7506 part_mode = GET_MODE (part);
7507 if (part_mode == VOIDmode)
7508 part_mode = GET_MODE_INNER (GET_MODE (op));
7509 res = simplify_subreg (outermode, part, part_mode, final_offset);
7510 if (res)
7511 return res;
7512 if (validate_subreg (outermode, part_mode, part, final_offset))
7513 return gen_rtx_SUBREG (outermode, part, final_offset);
7514 return NULL_RTX;
7517 /* Simplify
7518 (subreg (vec_merge (X)
7519 (vector)
7520 (const_int ((1 << N) | M)))
7521 (N * sizeof (outermode)))
7523 (subreg (X) (N * sizeof (outermode)))
7525 unsigned int idx;
7526 if (constant_multiple_p (byte, GET_MODE_SIZE (outermode), &idx)
7527 && idx < HOST_BITS_PER_WIDE_INT
7528 && GET_CODE (op) == VEC_MERGE
7529 && GET_MODE_INNER (innermode) == outermode
7530 && CONST_INT_P (XEXP (op, 2))
7531 && (UINTVAL (XEXP (op, 2)) & (HOST_WIDE_INT_1U << idx)) != 0)
7532 return simplify_gen_subreg (outermode, XEXP (op, 0), innermode, byte);
7534 /* A SUBREG resulting from a zero extension may fold to zero if
7535 it extracts higher bits that the ZERO_EXTEND's source bits. */
7536 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
7538 poly_uint64 bitpos = subreg_lsb_1 (outermode, innermode, byte);
7539 if (known_ge (bitpos, GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))))
7540 return CONST0_RTX (outermode);
7543 scalar_int_mode int_outermode, int_innermode;
7544 if (is_a <scalar_int_mode> (outermode, &int_outermode)
7545 && is_a <scalar_int_mode> (innermode, &int_innermode)
7546 && known_eq (byte, subreg_lowpart_offset (int_outermode, int_innermode)))
7548 /* Handle polynomial integers. The upper bits of a paradoxical
7549 subreg are undefined, so this is safe regardless of whether
7550 we're truncating or extending. */
7551 if (CONST_POLY_INT_P (op))
7553 poly_wide_int val
7554 = poly_wide_int::from (const_poly_int_value (op),
7555 GET_MODE_PRECISION (int_outermode),
7556 SIGNED);
7557 return immed_wide_int_const (val, int_outermode);
7560 if (GET_MODE_PRECISION (int_outermode)
7561 < GET_MODE_PRECISION (int_innermode))
7563 rtx tem = simplify_truncation (int_outermode, op, int_innermode);
7564 if (tem)
7565 return tem;
7569 /* If OP is a vector comparison and the subreg is not changing the
7570 number of elements or the size of the elements, change the result
7571 of the comparison to the new mode. */
7572 if (COMPARISON_P (op)
7573 && VECTOR_MODE_P (outermode)
7574 && VECTOR_MODE_P (innermode)
7575 && known_eq (GET_MODE_NUNITS (outermode), GET_MODE_NUNITS (innermode))
7576 && known_eq (GET_MODE_UNIT_SIZE (outermode),
7577 GET_MODE_UNIT_SIZE (innermode)))
7578 return simplify_gen_relational (GET_CODE (op), outermode, innermode,
7579 XEXP (op, 0), XEXP (op, 1));
7580 return NULL_RTX;
7583 /* Make a SUBREG operation or equivalent if it folds. */
7586 simplify_context::simplify_gen_subreg (machine_mode outermode, rtx op,
7587 machine_mode innermode,
7588 poly_uint64 byte)
7590 rtx newx;
7592 newx = simplify_subreg (outermode, op, innermode, byte);
7593 if (newx)
7594 return newx;
7596 if (GET_CODE (op) == SUBREG
7597 || GET_CODE (op) == CONCAT
7598 || GET_MODE (op) == VOIDmode)
7599 return NULL_RTX;
7601 if (MODE_COMPOSITE_P (outermode)
7602 && (CONST_SCALAR_INT_P (op)
7603 || CONST_DOUBLE_AS_FLOAT_P (op)
7604 || CONST_FIXED_P (op)
7605 || GET_CODE (op) == CONST_VECTOR))
7606 return NULL_RTX;
7608 if (validate_subreg (outermode, innermode, op, byte))
7609 return gen_rtx_SUBREG (outermode, op, byte);
7611 return NULL_RTX;
7614 /* Generates a subreg to get the least significant part of EXPR (in mode
7615 INNER_MODE) to OUTER_MODE. */
7618 simplify_context::lowpart_subreg (machine_mode outer_mode, rtx expr,
7619 machine_mode inner_mode)
7621 return simplify_gen_subreg (outer_mode, expr, inner_mode,
7622 subreg_lowpart_offset (outer_mode, inner_mode));
7625 /* Generate RTX to select element at INDEX out of vector OP. */
7628 simplify_context::simplify_gen_vec_select (rtx op, unsigned int index)
7630 gcc_assert (VECTOR_MODE_P (GET_MODE (op)));
7632 scalar_mode imode = GET_MODE_INNER (GET_MODE (op));
7634 if (known_eq (index * GET_MODE_SIZE (imode),
7635 subreg_lowpart_offset (imode, GET_MODE (op))))
7637 rtx res = lowpart_subreg (imode, op, GET_MODE (op));
7638 if (res)
7639 return res;
7642 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (index)));
7643 return gen_rtx_VEC_SELECT (imode, op, tmp);
7647 /* Simplify X, an rtx expression.
7649 Return the simplified expression or NULL if no simplifications
7650 were possible.
7652 This is the preferred entry point into the simplification routines;
7653 however, we still allow passes to call the more specific routines.
7655 Right now GCC has three (yes, three) major bodies of RTL simplification
7656 code that need to be unified.
7658 1. fold_rtx in cse.c. This code uses various CSE specific
7659 information to aid in RTL simplification.
7661 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
7662 it uses combine specific information to aid in RTL
7663 simplification.
7665 3. The routines in this file.
7668 Long term we want to only have one body of simplification code; to
7669 get to that state I recommend the following steps:
7671 1. Pour over fold_rtx & simplify_rtx and move any simplifications
7672 which are not pass dependent state into these routines.
7674 2. As code is moved by #1, change fold_rtx & simplify_rtx to
7675 use this routine whenever possible.
7677 3. Allow for pass dependent state to be provided to these
7678 routines and add simplifications based on the pass dependent
7679 state. Remove code from cse.c & combine.c that becomes
7680 redundant/dead.
7682 It will take time, but ultimately the compiler will be easier to
7683 maintain and improve. It's totally silly that when we add a
7684 simplification that it needs to be added to 4 places (3 for RTL
7685 simplification and 1 for tree simplification. */
7688 simplify_rtx (const_rtx x)
7690 const enum rtx_code code = GET_CODE (x);
7691 const machine_mode mode = GET_MODE (x);
7693 switch (GET_RTX_CLASS (code))
7695 case RTX_UNARY:
7696 return simplify_unary_operation (code, mode,
7697 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
7698 case RTX_COMM_ARITH:
7699 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
7700 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
7702 /* Fall through. */
7704 case RTX_BIN_ARITH:
7705 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
7707 case RTX_TERNARY:
7708 case RTX_BITFIELD_OPS:
7709 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
7710 XEXP (x, 0), XEXP (x, 1),
7711 XEXP (x, 2));
7713 case RTX_COMPARE:
7714 case RTX_COMM_COMPARE:
7715 return simplify_relational_operation (code, mode,
7716 ((GET_MODE (XEXP (x, 0))
7717 != VOIDmode)
7718 ? GET_MODE (XEXP (x, 0))
7719 : GET_MODE (XEXP (x, 1))),
7720 XEXP (x, 0),
7721 XEXP (x, 1));
7723 case RTX_EXTRA:
7724 if (code == SUBREG)
7725 return simplify_subreg (mode, SUBREG_REG (x),
7726 GET_MODE (SUBREG_REG (x)),
7727 SUBREG_BYTE (x));
7728 break;
7730 case RTX_OBJ:
7731 if (code == LO_SUM)
7733 /* Convert (lo_sum (high FOO) FOO) to FOO. */
7734 if (GET_CODE (XEXP (x, 0)) == HIGH
7735 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
7736 return XEXP (x, 1);
7738 break;
7740 default:
7741 break;
7743 return NULL;
7746 #if CHECKING_P
7748 namespace selftest {
7750 /* Make a unique pseudo REG of mode MODE for use by selftests. */
7752 static rtx
7753 make_test_reg (machine_mode mode)
7755 static int test_reg_num = LAST_VIRTUAL_REGISTER + 1;
7757 return gen_rtx_REG (mode, test_reg_num++);
7760 static void
7761 test_scalar_int_ops (machine_mode mode)
7763 rtx op0 = make_test_reg (mode);
7764 rtx op1 = make_test_reg (mode);
7765 rtx six = GEN_INT (6);
7767 rtx neg_op0 = simplify_gen_unary (NEG, mode, op0, mode);
7768 rtx not_op0 = simplify_gen_unary (NOT, mode, op0, mode);
7769 rtx bswap_op0 = simplify_gen_unary (BSWAP, mode, op0, mode);
7771 rtx and_op0_op1 = simplify_gen_binary (AND, mode, op0, op1);
7772 rtx ior_op0_op1 = simplify_gen_binary (IOR, mode, op0, op1);
7773 rtx xor_op0_op1 = simplify_gen_binary (XOR, mode, op0, op1);
7775 rtx and_op0_6 = simplify_gen_binary (AND, mode, op0, six);
7776 rtx and_op1_6 = simplify_gen_binary (AND, mode, op1, six);
7778 /* Test some binary identities. */
7779 ASSERT_RTX_EQ (op0, simplify_gen_binary (PLUS, mode, op0, const0_rtx));
7780 ASSERT_RTX_EQ (op0, simplify_gen_binary (PLUS, mode, const0_rtx, op0));
7781 ASSERT_RTX_EQ (op0, simplify_gen_binary (MINUS, mode, op0, const0_rtx));
7782 ASSERT_RTX_EQ (op0, simplify_gen_binary (MULT, mode, op0, const1_rtx));
7783 ASSERT_RTX_EQ (op0, simplify_gen_binary (MULT, mode, const1_rtx, op0));
7784 ASSERT_RTX_EQ (op0, simplify_gen_binary (DIV, mode, op0, const1_rtx));
7785 ASSERT_RTX_EQ (op0, simplify_gen_binary (AND, mode, op0, constm1_rtx));
7786 ASSERT_RTX_EQ (op0, simplify_gen_binary (AND, mode, constm1_rtx, op0));
7787 ASSERT_RTX_EQ (op0, simplify_gen_binary (IOR, mode, op0, const0_rtx));
7788 ASSERT_RTX_EQ (op0, simplify_gen_binary (IOR, mode, const0_rtx, op0));
7789 ASSERT_RTX_EQ (op0, simplify_gen_binary (XOR, mode, op0, const0_rtx));
7790 ASSERT_RTX_EQ (op0, simplify_gen_binary (XOR, mode, const0_rtx, op0));
7791 ASSERT_RTX_EQ (op0, simplify_gen_binary (ASHIFT, mode, op0, const0_rtx));
7792 ASSERT_RTX_EQ (op0, simplify_gen_binary (ROTATE, mode, op0, const0_rtx));
7793 ASSERT_RTX_EQ (op0, simplify_gen_binary (ASHIFTRT, mode, op0, const0_rtx));
7794 ASSERT_RTX_EQ (op0, simplify_gen_binary (LSHIFTRT, mode, op0, const0_rtx));
7795 ASSERT_RTX_EQ (op0, simplify_gen_binary (ROTATERT, mode, op0, const0_rtx));
7797 /* Test some self-inverse operations. */
7798 ASSERT_RTX_EQ (op0, simplify_gen_unary (NEG, mode, neg_op0, mode));
7799 ASSERT_RTX_EQ (op0, simplify_gen_unary (NOT, mode, not_op0, mode));
7800 ASSERT_RTX_EQ (op0, simplify_gen_unary (BSWAP, mode, bswap_op0, mode));
7802 /* Test some reflexive operations. */
7803 ASSERT_RTX_EQ (op0, simplify_gen_binary (AND, mode, op0, op0));
7804 ASSERT_RTX_EQ (op0, simplify_gen_binary (IOR, mode, op0, op0));
7805 ASSERT_RTX_EQ (op0, simplify_gen_binary (SMIN, mode, op0, op0));
7806 ASSERT_RTX_EQ (op0, simplify_gen_binary (SMAX, mode, op0, op0));
7807 ASSERT_RTX_EQ (op0, simplify_gen_binary (UMIN, mode, op0, op0));
7808 ASSERT_RTX_EQ (op0, simplify_gen_binary (UMAX, mode, op0, op0));
7810 ASSERT_RTX_EQ (const0_rtx, simplify_gen_binary (MINUS, mode, op0, op0));
7811 ASSERT_RTX_EQ (const0_rtx, simplify_gen_binary (XOR, mode, op0, op0));
7813 /* Test simplify_distributive_operation. */
7814 ASSERT_RTX_EQ (simplify_gen_binary (AND, mode, xor_op0_op1, six),
7815 simplify_gen_binary (XOR, mode, and_op0_6, and_op1_6));
7816 ASSERT_RTX_EQ (simplify_gen_binary (AND, mode, ior_op0_op1, six),
7817 simplify_gen_binary (IOR, mode, and_op0_6, and_op1_6));
7818 ASSERT_RTX_EQ (simplify_gen_binary (AND, mode, and_op0_op1, six),
7819 simplify_gen_binary (AND, mode, and_op0_6, and_op1_6));
7821 /* Test useless extensions are eliminated. */
7822 ASSERT_RTX_EQ (op0, simplify_gen_unary (TRUNCATE, mode, op0, mode));
7823 ASSERT_RTX_EQ (op0, simplify_gen_unary (ZERO_EXTEND, mode, op0, mode));
7824 ASSERT_RTX_EQ (op0, simplify_gen_unary (SIGN_EXTEND, mode, op0, mode));
7825 ASSERT_RTX_EQ (op0, lowpart_subreg (mode, op0, mode));
7828 /* Verify some simplifications of integer extension/truncation.
7829 Machine mode BMODE is the guaranteed wider than SMODE. */
7831 static void
7832 test_scalar_int_ext_ops (machine_mode bmode, machine_mode smode)
7834 rtx sreg = make_test_reg (smode);
7836 /* Check truncation of extension. */
7837 ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE, smode,
7838 simplify_gen_unary (ZERO_EXTEND, bmode,
7839 sreg, smode),
7840 bmode),
7841 sreg);
7842 ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE, smode,
7843 simplify_gen_unary (SIGN_EXTEND, bmode,
7844 sreg, smode),
7845 bmode),
7846 sreg);
7847 ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE, smode,
7848 lowpart_subreg (bmode, sreg, smode),
7849 bmode),
7850 sreg);
7853 /* Verify more simplifications of integer extension/truncation.
7854 BMODE is wider than MMODE which is wider than SMODE. */
7856 static void
7857 test_scalar_int_ext_ops2 (machine_mode bmode, machine_mode mmode,
7858 machine_mode smode)
7860 rtx breg = make_test_reg (bmode);
7861 rtx mreg = make_test_reg (mmode);
7862 rtx sreg = make_test_reg (smode);
7864 /* Check truncate of truncate. */
7865 ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE, smode,
7866 simplify_gen_unary (TRUNCATE, mmode,
7867 breg, bmode),
7868 mmode),
7869 simplify_gen_unary (TRUNCATE, smode, breg, bmode));
7871 /* Check extension of extension. */
7872 ASSERT_RTX_EQ (simplify_gen_unary (ZERO_EXTEND, bmode,
7873 simplify_gen_unary (ZERO_EXTEND, mmode,
7874 sreg, smode),
7875 mmode),
7876 simplify_gen_unary (ZERO_EXTEND, bmode, sreg, smode));
7877 ASSERT_RTX_EQ (simplify_gen_unary (SIGN_EXTEND, bmode,
7878 simplify_gen_unary (SIGN_EXTEND, mmode,
7879 sreg, smode),
7880 mmode),
7881 simplify_gen_unary (SIGN_EXTEND, bmode, sreg, smode));
7882 ASSERT_RTX_EQ (simplify_gen_unary (SIGN_EXTEND, bmode,
7883 simplify_gen_unary (ZERO_EXTEND, mmode,
7884 sreg, smode),
7885 mmode),
7886 simplify_gen_unary (ZERO_EXTEND, bmode, sreg, smode));
7888 /* Check truncation of extension. */
7889 ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE, smode,
7890 simplify_gen_unary (ZERO_EXTEND, bmode,
7891 mreg, mmode),
7892 bmode),
7893 simplify_gen_unary (TRUNCATE, smode, mreg, mmode));
7894 ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE, smode,
7895 simplify_gen_unary (SIGN_EXTEND, bmode,
7896 mreg, mmode),
7897 bmode),
7898 simplify_gen_unary (TRUNCATE, smode, mreg, mmode));
7899 ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE, smode,
7900 lowpart_subreg (bmode, mreg, mmode),
7901 bmode),
7902 simplify_gen_unary (TRUNCATE, smode, mreg, mmode));
7906 /* Verify some simplifications involving scalar expressions. */
7908 static void
7909 test_scalar_ops ()
7911 for (unsigned int i = 0; i < NUM_MACHINE_MODES; ++i)
7913 machine_mode mode = (machine_mode) i;
7914 if (SCALAR_INT_MODE_P (mode) && mode != BImode)
7915 test_scalar_int_ops (mode);
7918 test_scalar_int_ext_ops (HImode, QImode);
7919 test_scalar_int_ext_ops (SImode, QImode);
7920 test_scalar_int_ext_ops (SImode, HImode);
7921 test_scalar_int_ext_ops (DImode, QImode);
7922 test_scalar_int_ext_ops (DImode, HImode);
7923 test_scalar_int_ext_ops (DImode, SImode);
7925 test_scalar_int_ext_ops2 (SImode, HImode, QImode);
7926 test_scalar_int_ext_ops2 (DImode, HImode, QImode);
7927 test_scalar_int_ext_ops2 (DImode, SImode, QImode);
7928 test_scalar_int_ext_ops2 (DImode, SImode, HImode);
7931 /* Test vector simplifications involving VEC_DUPLICATE in which the
7932 operands and result have vector mode MODE. SCALAR_REG is a pseudo
7933 register that holds one element of MODE. */
7935 static void
7936 test_vector_ops_duplicate (machine_mode mode, rtx scalar_reg)
7938 scalar_mode inner_mode = GET_MODE_INNER (mode);
7939 rtx duplicate = gen_rtx_VEC_DUPLICATE (mode, scalar_reg);
7940 poly_uint64 nunits = GET_MODE_NUNITS (mode);
7941 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
7943 /* Test some simple unary cases with VEC_DUPLICATE arguments. */
7944 rtx not_scalar_reg = gen_rtx_NOT (inner_mode, scalar_reg);
7945 rtx duplicate_not = gen_rtx_VEC_DUPLICATE (mode, not_scalar_reg);
7946 ASSERT_RTX_EQ (duplicate,
7947 simplify_unary_operation (NOT, mode,
7948 duplicate_not, mode));
7950 rtx neg_scalar_reg = gen_rtx_NEG (inner_mode, scalar_reg);
7951 rtx duplicate_neg = gen_rtx_VEC_DUPLICATE (mode, neg_scalar_reg);
7952 ASSERT_RTX_EQ (duplicate,
7953 simplify_unary_operation (NEG, mode,
7954 duplicate_neg, mode));
7956 /* Test some simple binary cases with VEC_DUPLICATE arguments. */
7957 ASSERT_RTX_EQ (duplicate,
7958 simplify_binary_operation (PLUS, mode, duplicate,
7959 CONST0_RTX (mode)));
7961 ASSERT_RTX_EQ (duplicate,
7962 simplify_binary_operation (MINUS, mode, duplicate,
7963 CONST0_RTX (mode)));
7965 ASSERT_RTX_PTR_EQ (CONST0_RTX (mode),
7966 simplify_binary_operation (MINUS, mode, duplicate,
7967 duplicate));
7970 /* Test a scalar VEC_SELECT of a VEC_DUPLICATE. */
7971 rtx zero_par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
7972 ASSERT_RTX_PTR_EQ (scalar_reg,
7973 simplify_binary_operation (VEC_SELECT, inner_mode,
7974 duplicate, zero_par));
7976 unsigned HOST_WIDE_INT const_nunits;
7977 if (nunits.is_constant (&const_nunits))
7979 /* And again with the final element. */
7980 rtx last_index = gen_int_mode (const_nunits - 1, word_mode);
7981 rtx last_par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, last_index));
7982 ASSERT_RTX_PTR_EQ (scalar_reg,
7983 simplify_binary_operation (VEC_SELECT, inner_mode,
7984 duplicate, last_par));
7986 /* Test a scalar subreg of a VEC_MERGE of a VEC_DUPLICATE. */
7987 rtx vector_reg = make_test_reg (mode);
7988 for (unsigned HOST_WIDE_INT i = 0; i < const_nunits; i++)
7990 if (i >= HOST_BITS_PER_WIDE_INT)
7991 break;
7992 rtx mask = GEN_INT ((HOST_WIDE_INT_1U << i) | (i + 1));
7993 rtx vm = gen_rtx_VEC_MERGE (mode, duplicate, vector_reg, mask);
7994 poly_uint64 offset = i * GET_MODE_SIZE (inner_mode);
7995 ASSERT_RTX_EQ (scalar_reg,
7996 simplify_gen_subreg (inner_mode, vm,
7997 mode, offset));
8001 /* Test a scalar subreg of a VEC_DUPLICATE. */
8002 poly_uint64 offset = subreg_lowpart_offset (inner_mode, mode);
8003 ASSERT_RTX_EQ (scalar_reg,
8004 simplify_gen_subreg (inner_mode, duplicate,
8005 mode, offset));
8007 machine_mode narrower_mode;
8008 if (maybe_ne (nunits, 2U)
8009 && multiple_p (nunits, 2)
8010 && mode_for_vector (inner_mode, 2).exists (&narrower_mode)
8011 && VECTOR_MODE_P (narrower_mode))
8013 /* Test VEC_DUPLICATE of a vector. */
8014 rtx_vector_builder nbuilder (narrower_mode, 2, 1);
8015 nbuilder.quick_push (const0_rtx);
8016 nbuilder.quick_push (const1_rtx);
8017 rtx_vector_builder builder (mode, 2, 1);
8018 builder.quick_push (const0_rtx);
8019 builder.quick_push (const1_rtx);
8020 ASSERT_RTX_EQ (builder.build (),
8021 simplify_unary_operation (VEC_DUPLICATE, mode,
8022 nbuilder.build (),
8023 narrower_mode));
8025 /* Test VEC_SELECT of a vector. */
8026 rtx vec_par
8027 = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, const1_rtx, const0_rtx));
8028 rtx narrower_duplicate
8029 = gen_rtx_VEC_DUPLICATE (narrower_mode, scalar_reg);
8030 ASSERT_RTX_EQ (narrower_duplicate,
8031 simplify_binary_operation (VEC_SELECT, narrower_mode,
8032 duplicate, vec_par));
8034 /* Test a vector subreg of a VEC_DUPLICATE. */
8035 poly_uint64 offset = subreg_lowpart_offset (narrower_mode, mode);
8036 ASSERT_RTX_EQ (narrower_duplicate,
8037 simplify_gen_subreg (narrower_mode, duplicate,
8038 mode, offset));
8042 /* Test vector simplifications involving VEC_SERIES in which the
8043 operands and result have vector mode MODE. SCALAR_REG is a pseudo
8044 register that holds one element of MODE. */
8046 static void
8047 test_vector_ops_series (machine_mode mode, rtx scalar_reg)
8049 /* Test unary cases with VEC_SERIES arguments. */
8050 scalar_mode inner_mode = GET_MODE_INNER (mode);
8051 rtx duplicate = gen_rtx_VEC_DUPLICATE (mode, scalar_reg);
8052 rtx neg_scalar_reg = gen_rtx_NEG (inner_mode, scalar_reg);
8053 rtx series_0_r = gen_rtx_VEC_SERIES (mode, const0_rtx, scalar_reg);
8054 rtx series_0_nr = gen_rtx_VEC_SERIES (mode, const0_rtx, neg_scalar_reg);
8055 rtx series_nr_1 = gen_rtx_VEC_SERIES (mode, neg_scalar_reg, const1_rtx);
8056 rtx series_r_m1 = gen_rtx_VEC_SERIES (mode, scalar_reg, constm1_rtx);
8057 rtx series_r_r = gen_rtx_VEC_SERIES (mode, scalar_reg, scalar_reg);
8058 rtx series_nr_nr = gen_rtx_VEC_SERIES (mode, neg_scalar_reg,
8059 neg_scalar_reg);
8060 ASSERT_RTX_EQ (series_0_r,
8061 simplify_unary_operation (NEG, mode, series_0_nr, mode));
8062 ASSERT_RTX_EQ (series_r_m1,
8063 simplify_unary_operation (NEG, mode, series_nr_1, mode));
8064 ASSERT_RTX_EQ (series_r_r,
8065 simplify_unary_operation (NEG, mode, series_nr_nr, mode));
8067 /* Test that a VEC_SERIES with a zero step is simplified away. */
8068 ASSERT_RTX_EQ (duplicate,
8069 simplify_binary_operation (VEC_SERIES, mode,
8070 scalar_reg, const0_rtx));
8072 /* Test PLUS and MINUS with VEC_SERIES. */
8073 rtx series_0_1 = gen_const_vec_series (mode, const0_rtx, const1_rtx);
8074 rtx series_0_m1 = gen_const_vec_series (mode, const0_rtx, constm1_rtx);
8075 rtx series_r_1 = gen_rtx_VEC_SERIES (mode, scalar_reg, const1_rtx);
8076 ASSERT_RTX_EQ (series_r_r,
8077 simplify_binary_operation (PLUS, mode, series_0_r,
8078 duplicate));
8079 ASSERT_RTX_EQ (series_r_1,
8080 simplify_binary_operation (PLUS, mode, duplicate,
8081 series_0_1));
8082 ASSERT_RTX_EQ (series_r_m1,
8083 simplify_binary_operation (PLUS, mode, duplicate,
8084 series_0_m1));
8085 ASSERT_RTX_EQ (series_0_r,
8086 simplify_binary_operation (MINUS, mode, series_r_r,
8087 duplicate));
8088 ASSERT_RTX_EQ (series_r_m1,
8089 simplify_binary_operation (MINUS, mode, duplicate,
8090 series_0_1));
8091 ASSERT_RTX_EQ (series_r_1,
8092 simplify_binary_operation (MINUS, mode, duplicate,
8093 series_0_m1));
8094 ASSERT_RTX_EQ (series_0_m1,
8095 simplify_binary_operation (VEC_SERIES, mode, const0_rtx,
8096 constm1_rtx));
8098 /* Test NEG on constant vector series. */
8099 ASSERT_RTX_EQ (series_0_m1,
8100 simplify_unary_operation (NEG, mode, series_0_1, mode));
8101 ASSERT_RTX_EQ (series_0_1,
8102 simplify_unary_operation (NEG, mode, series_0_m1, mode));
8104 /* Test PLUS and MINUS on constant vector series. */
8105 rtx scalar2 = gen_int_mode (2, inner_mode);
8106 rtx scalar3 = gen_int_mode (3, inner_mode);
8107 rtx series_1_1 = gen_const_vec_series (mode, const1_rtx, const1_rtx);
8108 rtx series_0_2 = gen_const_vec_series (mode, const0_rtx, scalar2);
8109 rtx series_1_3 = gen_const_vec_series (mode, const1_rtx, scalar3);
8110 ASSERT_RTX_EQ (series_1_1,
8111 simplify_binary_operation (PLUS, mode, series_0_1,
8112 CONST1_RTX (mode)));
8113 ASSERT_RTX_EQ (series_0_m1,
8114 simplify_binary_operation (PLUS, mode, CONST0_RTX (mode),
8115 series_0_m1));
8116 ASSERT_RTX_EQ (series_1_3,
8117 simplify_binary_operation (PLUS, mode, series_1_1,
8118 series_0_2));
8119 ASSERT_RTX_EQ (series_0_1,
8120 simplify_binary_operation (MINUS, mode, series_1_1,
8121 CONST1_RTX (mode)));
8122 ASSERT_RTX_EQ (series_1_1,
8123 simplify_binary_operation (MINUS, mode, CONST1_RTX (mode),
8124 series_0_m1));
8125 ASSERT_RTX_EQ (series_1_1,
8126 simplify_binary_operation (MINUS, mode, series_1_3,
8127 series_0_2));
8129 /* Test MULT between constant vectors. */
8130 rtx vec2 = gen_const_vec_duplicate (mode, scalar2);
8131 rtx vec3 = gen_const_vec_duplicate (mode, scalar3);
8132 rtx scalar9 = gen_int_mode (9, inner_mode);
8133 rtx series_3_9 = gen_const_vec_series (mode, scalar3, scalar9);
8134 ASSERT_RTX_EQ (series_0_2,
8135 simplify_binary_operation (MULT, mode, series_0_1, vec2));
8136 ASSERT_RTX_EQ (series_3_9,
8137 simplify_binary_operation (MULT, mode, vec3, series_1_3));
8138 if (!GET_MODE_NUNITS (mode).is_constant ())
8139 ASSERT_FALSE (simplify_binary_operation (MULT, mode, series_0_1,
8140 series_0_1));
8142 /* Test ASHIFT between constant vectors. */
8143 ASSERT_RTX_EQ (series_0_2,
8144 simplify_binary_operation (ASHIFT, mode, series_0_1,
8145 CONST1_RTX (mode)));
8146 if (!GET_MODE_NUNITS (mode).is_constant ())
8147 ASSERT_FALSE (simplify_binary_operation (ASHIFT, mode, CONST1_RTX (mode),
8148 series_0_1));
8151 static rtx
8152 simplify_merge_mask (rtx x, rtx mask, int op)
8154 return simplify_context ().simplify_merge_mask (x, mask, op);
8157 /* Verify simplify_merge_mask works correctly. */
8159 static void
8160 test_vec_merge (machine_mode mode)
8162 rtx op0 = make_test_reg (mode);
8163 rtx op1 = make_test_reg (mode);
8164 rtx op2 = make_test_reg (mode);
8165 rtx op3 = make_test_reg (mode);
8166 rtx op4 = make_test_reg (mode);
8167 rtx op5 = make_test_reg (mode);
8168 rtx mask1 = make_test_reg (SImode);
8169 rtx mask2 = make_test_reg (SImode);
8170 rtx vm1 = gen_rtx_VEC_MERGE (mode, op0, op1, mask1);
8171 rtx vm2 = gen_rtx_VEC_MERGE (mode, op2, op3, mask1);
8172 rtx vm3 = gen_rtx_VEC_MERGE (mode, op4, op5, mask1);
8174 /* Simple vec_merge. */
8175 ASSERT_EQ (op0, simplify_merge_mask (vm1, mask1, 0));
8176 ASSERT_EQ (op1, simplify_merge_mask (vm1, mask1, 1));
8177 ASSERT_EQ (NULL_RTX, simplify_merge_mask (vm1, mask2, 0));
8178 ASSERT_EQ (NULL_RTX, simplify_merge_mask (vm1, mask2, 1));
8180 /* Nested vec_merge.
8181 It's tempting to make this simplify right down to opN, but we don't
8182 because all the simplify_* functions assume that the operands have
8183 already been simplified. */
8184 rtx nvm = gen_rtx_VEC_MERGE (mode, vm1, vm2, mask1);
8185 ASSERT_EQ (vm1, simplify_merge_mask (nvm, mask1, 0));
8186 ASSERT_EQ (vm2, simplify_merge_mask (nvm, mask1, 1));
8188 /* Intermediate unary op. */
8189 rtx unop = gen_rtx_NOT (mode, vm1);
8190 ASSERT_RTX_EQ (gen_rtx_NOT (mode, op0),
8191 simplify_merge_mask (unop, mask1, 0));
8192 ASSERT_RTX_EQ (gen_rtx_NOT (mode, op1),
8193 simplify_merge_mask (unop, mask1, 1));
8195 /* Intermediate binary op. */
8196 rtx binop = gen_rtx_PLUS (mode, vm1, vm2);
8197 ASSERT_RTX_EQ (gen_rtx_PLUS (mode, op0, op2),
8198 simplify_merge_mask (binop, mask1, 0));
8199 ASSERT_RTX_EQ (gen_rtx_PLUS (mode, op1, op3),
8200 simplify_merge_mask (binop, mask1, 1));
8202 /* Intermediate ternary op. */
8203 rtx tenop = gen_rtx_FMA (mode, vm1, vm2, vm3);
8204 ASSERT_RTX_EQ (gen_rtx_FMA (mode, op0, op2, op4),
8205 simplify_merge_mask (tenop, mask1, 0));
8206 ASSERT_RTX_EQ (gen_rtx_FMA (mode, op1, op3, op5),
8207 simplify_merge_mask (tenop, mask1, 1));
8209 /* Side effects. */
8210 rtx badop0 = gen_rtx_PRE_INC (mode, op0);
8211 rtx badvm = gen_rtx_VEC_MERGE (mode, badop0, op1, mask1);
8212 ASSERT_EQ (badop0, simplify_merge_mask (badvm, mask1, 0));
8213 ASSERT_EQ (NULL_RTX, simplify_merge_mask (badvm, mask1, 1));
8215 /* Called indirectly. */
8216 ASSERT_RTX_EQ (gen_rtx_VEC_MERGE (mode, op0, op3, mask1),
8217 simplify_rtx (nvm));
8220 /* Test subregs of integer vector constant X, trying elements in
8221 the range [ELT_BIAS, ELT_BIAS + constant_lower_bound (NELTS)),
8222 where NELTS is the number of elements in X. Subregs involving
8223 elements [ELT_BIAS, ELT_BIAS + FIRST_VALID) are expected to fail. */
8225 static void
8226 test_vector_subregs_modes (rtx x, poly_uint64 elt_bias = 0,
8227 unsigned int first_valid = 0)
8229 machine_mode inner_mode = GET_MODE (x);
8230 scalar_mode int_mode = GET_MODE_INNER (inner_mode);
8232 for (unsigned int modei = 0; modei < NUM_MACHINE_MODES; ++modei)
8234 machine_mode outer_mode = (machine_mode) modei;
8235 if (!VECTOR_MODE_P (outer_mode))
8236 continue;
8238 unsigned int outer_nunits;
8239 if (GET_MODE_INNER (outer_mode) == int_mode
8240 && GET_MODE_NUNITS (outer_mode).is_constant (&outer_nunits)
8241 && multiple_p (GET_MODE_NUNITS (inner_mode), outer_nunits))
8243 /* Test subregs in which the outer mode is a smaller,
8244 constant-sized vector of the same element type. */
8245 unsigned int limit
8246 = constant_lower_bound (GET_MODE_NUNITS (inner_mode));
8247 for (unsigned int elt = 0; elt < limit; elt += outer_nunits)
8249 rtx expected = NULL_RTX;
8250 if (elt >= first_valid)
8252 rtx_vector_builder builder (outer_mode, outer_nunits, 1);
8253 for (unsigned int i = 0; i < outer_nunits; ++i)
8254 builder.quick_push (CONST_VECTOR_ELT (x, elt + i));
8255 expected = builder.build ();
8257 poly_uint64 byte = (elt_bias + elt) * GET_MODE_SIZE (int_mode);
8258 ASSERT_RTX_EQ (expected,
8259 simplify_subreg (outer_mode, x,
8260 inner_mode, byte));
8263 else if (known_eq (GET_MODE_SIZE (outer_mode),
8264 GET_MODE_SIZE (inner_mode))
8265 && known_eq (elt_bias, 0U)
8266 && (GET_MODE_CLASS (outer_mode) != MODE_VECTOR_BOOL
8267 || known_eq (GET_MODE_BITSIZE (outer_mode),
8268 GET_MODE_NUNITS (outer_mode)))
8269 && (!FLOAT_MODE_P (outer_mode)
8270 || (FLOAT_MODE_FORMAT (outer_mode)->ieee_bits
8271 == GET_MODE_UNIT_PRECISION (outer_mode)))
8272 && (GET_MODE_SIZE (inner_mode).is_constant ()
8273 || !CONST_VECTOR_STEPPED_P (x)))
8275 /* Try converting to OUTER_MODE and back. */
8276 rtx outer_x = simplify_subreg (outer_mode, x, inner_mode, 0);
8277 ASSERT_TRUE (outer_x != NULL_RTX);
8278 ASSERT_RTX_EQ (x, simplify_subreg (inner_mode, outer_x,
8279 outer_mode, 0));
8283 if (BYTES_BIG_ENDIAN == WORDS_BIG_ENDIAN)
8285 /* Test each byte in the element range. */
8286 unsigned int limit
8287 = constant_lower_bound (GET_MODE_SIZE (inner_mode));
8288 for (unsigned int i = 0; i < limit; ++i)
8290 unsigned int elt = i / GET_MODE_SIZE (int_mode);
8291 rtx expected = NULL_RTX;
8292 if (elt >= first_valid)
8294 unsigned int byte_shift = i % GET_MODE_SIZE (int_mode);
8295 if (BYTES_BIG_ENDIAN)
8296 byte_shift = GET_MODE_SIZE (int_mode) - byte_shift - 1;
8297 rtx_mode_t vec_elt (CONST_VECTOR_ELT (x, elt), int_mode);
8298 wide_int shifted_elt
8299 = wi::lrshift (vec_elt, byte_shift * BITS_PER_UNIT);
8300 expected = immed_wide_int_const (shifted_elt, QImode);
8302 poly_uint64 byte = elt_bias * GET_MODE_SIZE (int_mode) + i;
8303 ASSERT_RTX_EQ (expected,
8304 simplify_subreg (QImode, x, inner_mode, byte));
8309 /* Test constant subregs of integer vector mode INNER_MODE, using 1
8310 element per pattern. */
8312 static void
8313 test_vector_subregs_repeating (machine_mode inner_mode)
8315 poly_uint64 nunits = GET_MODE_NUNITS (inner_mode);
8316 unsigned int min_nunits = constant_lower_bound (nunits);
8317 scalar_mode int_mode = GET_MODE_INNER (inner_mode);
8318 unsigned int count = gcd (min_nunits, 8);
8320 rtx_vector_builder builder (inner_mode, count, 1);
8321 for (unsigned int i = 0; i < count; ++i)
8322 builder.quick_push (gen_int_mode (8 - i, int_mode));
8323 rtx x = builder.build ();
8325 test_vector_subregs_modes (x);
8326 if (!nunits.is_constant ())
8327 test_vector_subregs_modes (x, nunits - min_nunits);
8330 /* Test constant subregs of integer vector mode INNER_MODE, using 2
8331 elements per pattern. */
8333 static void
8334 test_vector_subregs_fore_back (machine_mode inner_mode)
8336 poly_uint64 nunits = GET_MODE_NUNITS (inner_mode);
8337 unsigned int min_nunits = constant_lower_bound (nunits);
8338 scalar_mode int_mode = GET_MODE_INNER (inner_mode);
8339 unsigned int count = gcd (min_nunits, 4);
8341 rtx_vector_builder builder (inner_mode, count, 2);
8342 for (unsigned int i = 0; i < count; ++i)
8343 builder.quick_push (gen_int_mode (i, int_mode));
8344 for (unsigned int i = 0; i < count; ++i)
8345 builder.quick_push (gen_int_mode (-(int) i, int_mode));
8346 rtx x = builder.build ();
8348 test_vector_subregs_modes (x);
8349 if (!nunits.is_constant ())
8350 test_vector_subregs_modes (x, nunits - min_nunits, count);
8353 /* Test constant subregs of integer vector mode INNER_MODE, using 3
8354 elements per pattern. */
8356 static void
8357 test_vector_subregs_stepped (machine_mode inner_mode)
8359 /* Build { 0, 1, 2, 3, ... }. */
8360 scalar_mode int_mode = GET_MODE_INNER (inner_mode);
8361 rtx_vector_builder builder (inner_mode, 1, 3);
8362 for (unsigned int i = 0; i < 3; ++i)
8363 builder.quick_push (gen_int_mode (i, int_mode));
8364 rtx x = builder.build ();
8366 test_vector_subregs_modes (x);
8369 /* Test constant subregs of integer vector mode INNER_MODE. */
8371 static void
8372 test_vector_subregs (machine_mode inner_mode)
8374 test_vector_subregs_repeating (inner_mode);
8375 test_vector_subregs_fore_back (inner_mode);
8376 test_vector_subregs_stepped (inner_mode);
8379 /* Verify some simplifications involving vectors. */
8381 static void
8382 test_vector_ops ()
8384 for (unsigned int i = 0; i < NUM_MACHINE_MODES; ++i)
8386 machine_mode mode = (machine_mode) i;
8387 if (VECTOR_MODE_P (mode))
8389 rtx scalar_reg = make_test_reg (GET_MODE_INNER (mode));
8390 test_vector_ops_duplicate (mode, scalar_reg);
8391 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
8392 && maybe_gt (GET_MODE_NUNITS (mode), 2))
8394 test_vector_ops_series (mode, scalar_reg);
8395 test_vector_subregs (mode);
8397 test_vec_merge (mode);
8402 template<unsigned int N>
8403 struct simplify_const_poly_int_tests
8405 static void run ();
8408 template<>
8409 struct simplify_const_poly_int_tests<1>
8411 static void run () {}
8414 /* Test various CONST_POLY_INT properties. */
8416 template<unsigned int N>
8417 void
8418 simplify_const_poly_int_tests<N>::run ()
8420 rtx x1 = gen_int_mode (poly_int64 (1, 1), QImode);
8421 rtx x2 = gen_int_mode (poly_int64 (-80, 127), QImode);
8422 rtx x3 = gen_int_mode (poly_int64 (-79, -128), QImode);
8423 rtx x4 = gen_int_mode (poly_int64 (5, 4), QImode);
8424 rtx x5 = gen_int_mode (poly_int64 (30, 24), QImode);
8425 rtx x6 = gen_int_mode (poly_int64 (20, 16), QImode);
8426 rtx x7 = gen_int_mode (poly_int64 (7, 4), QImode);
8427 rtx x8 = gen_int_mode (poly_int64 (30, 24), HImode);
8428 rtx x9 = gen_int_mode (poly_int64 (-30, -24), HImode);
8429 rtx x10 = gen_int_mode (poly_int64 (-31, -24), HImode);
8430 rtx two = GEN_INT (2);
8431 rtx six = GEN_INT (6);
8432 poly_uint64 offset = subreg_lowpart_offset (QImode, HImode);
8434 /* These tests only try limited operation combinations. Fuller arithmetic
8435 testing is done directly on poly_ints. */
8436 ASSERT_EQ (simplify_unary_operation (NEG, HImode, x8, HImode), x9);
8437 ASSERT_EQ (simplify_unary_operation (NOT, HImode, x8, HImode), x10);
8438 ASSERT_EQ (simplify_unary_operation (TRUNCATE, QImode, x8, HImode), x5);
8439 ASSERT_EQ (simplify_binary_operation (PLUS, QImode, x1, x2), x3);
8440 ASSERT_EQ (simplify_binary_operation (MINUS, QImode, x3, x1), x2);
8441 ASSERT_EQ (simplify_binary_operation (MULT, QImode, x4, six), x5);
8442 ASSERT_EQ (simplify_binary_operation (MULT, QImode, six, x4), x5);
8443 ASSERT_EQ (simplify_binary_operation (ASHIFT, QImode, x4, two), x6);
8444 ASSERT_EQ (simplify_binary_operation (IOR, QImode, x4, two), x7);
8445 ASSERT_EQ (simplify_subreg (HImode, x5, QImode, 0), x8);
8446 ASSERT_EQ (simplify_subreg (QImode, x8, HImode, offset), x5);
8449 /* Run all of the selftests within this file. */
8451 void
8452 simplify_rtx_c_tests ()
8454 test_scalar_ops ();
8455 test_vector_ops ();
8456 simplify_const_poly_int_tests<NUM_POLY_INT_COEFFS>::run ();
8459 } // namespace selftest
8461 #endif /* CHECKING_P */