Add a VEC_SERIES rtl code
[official-gcc.git] / gcc / simplify-rtx.c
blob8bee8edc870f0ee50e6d30d5ddba334c361b6e4c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "memmodel.h"
30 #include "optabs.h"
31 #include "emit-rtl.h"
32 #include "recog.h"
33 #include "diagnostic-core.h"
34 #include "varasm.h"
35 #include "flags.h"
36 #include "selftest.h"
37 #include "selftest-rtl.h"
39 /* Simplification and canonicalization of RTL. */
41 /* Much code operates on (low, high) pairs; the low value is an
42 unsigned wide int, the high value a signed wide int. We
43 occasionally need to sign extend from low to high as if low were a
44 signed wide int. */
45 #define HWI_SIGN_EXTEND(low) \
46 ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
48 static rtx neg_const_int (machine_mode, const_rtx);
49 static bool plus_minus_operand_p (const_rtx);
50 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
51 static rtx simplify_immed_subreg (machine_mode, rtx, machine_mode,
52 unsigned int);
53 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
54 rtx, rtx);
55 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
56 machine_mode, rtx, rtx);
57 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
58 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
59 rtx, rtx, rtx, rtx);
61 /* Negate a CONST_INT rtx. */
62 static rtx
63 neg_const_int (machine_mode mode, const_rtx i)
65 unsigned HOST_WIDE_INT val = -UINTVAL (i);
67 if (!HWI_COMPUTABLE_MODE_P (mode)
68 && val == UINTVAL (i))
69 return simplify_const_unary_operation (NEG, mode, CONST_CAST_RTX (i),
70 mode);
71 return gen_int_mode (val, mode);
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
77 bool
78 mode_signbit_p (machine_mode mode, const_rtx x)
80 unsigned HOST_WIDE_INT val;
81 unsigned int width;
82 scalar_int_mode int_mode;
84 if (!is_int_mode (mode, &int_mode))
85 return false;
87 width = GET_MODE_PRECISION (int_mode);
88 if (width == 0)
89 return false;
91 if (width <= HOST_BITS_PER_WIDE_INT
92 && CONST_INT_P (x))
93 val = INTVAL (x);
94 #if TARGET_SUPPORTS_WIDE_INT
95 else if (CONST_WIDE_INT_P (x))
97 unsigned int i;
98 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
99 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
100 return false;
101 for (i = 0; i < elts - 1; i++)
102 if (CONST_WIDE_INT_ELT (x, i) != 0)
103 return false;
104 val = CONST_WIDE_INT_ELT (x, elts - 1);
105 width %= HOST_BITS_PER_WIDE_INT;
106 if (width == 0)
107 width = HOST_BITS_PER_WIDE_INT;
109 #else
110 else if (width <= HOST_BITS_PER_DOUBLE_INT
111 && CONST_DOUBLE_AS_INT_P (x)
112 && CONST_DOUBLE_LOW (x) == 0)
114 val = CONST_DOUBLE_HIGH (x);
115 width -= HOST_BITS_PER_WIDE_INT;
117 #endif
118 else
119 /* X is not an integer constant. */
120 return false;
122 if (width < HOST_BITS_PER_WIDE_INT)
123 val &= (HOST_WIDE_INT_1U << width) - 1;
124 return val == (HOST_WIDE_INT_1U << (width - 1));
127 /* Test whether VAL is equal to the most significant bit of mode MODE
128 (after masking with the mode mask of MODE). Returns false if the
129 precision of MODE is too large to handle. */
131 bool
132 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
134 unsigned int width;
135 scalar_int_mode int_mode;
137 if (!is_int_mode (mode, &int_mode))
138 return false;
140 width = GET_MODE_PRECISION (int_mode);
141 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
142 return false;
144 val &= GET_MODE_MASK (int_mode);
145 return val == (HOST_WIDE_INT_1U << (width - 1));
148 /* Test whether the most significant bit of mode MODE is set in VAL.
149 Returns false if the precision of MODE is too large to handle. */
150 bool
151 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
153 unsigned int width;
155 scalar_int_mode int_mode;
156 if (!is_int_mode (mode, &int_mode))
157 return false;
159 width = GET_MODE_PRECISION (int_mode);
160 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
161 return false;
163 val &= HOST_WIDE_INT_1U << (width - 1);
164 return val != 0;
167 /* Test whether the most significant bit of mode MODE is clear in VAL.
168 Returns false if the precision of MODE is too large to handle. */
169 bool
170 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
172 unsigned int width;
174 scalar_int_mode int_mode;
175 if (!is_int_mode (mode, &int_mode))
176 return false;
178 width = GET_MODE_PRECISION (int_mode);
179 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
180 return false;
182 val &= HOST_WIDE_INT_1U << (width - 1);
183 return val == 0;
186 /* Make a binary operation by properly ordering the operands and
187 seeing if the expression folds. */
190 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
191 rtx op1)
193 rtx tem;
195 /* If this simplifies, do it. */
196 tem = simplify_binary_operation (code, mode, op0, op1);
197 if (tem)
198 return tem;
200 /* Put complex operands first and constants second if commutative. */
201 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
202 && swap_commutative_operands_p (op0, op1))
203 std::swap (op0, op1);
205 return gen_rtx_fmt_ee (code, mode, op0, op1);
208 /* If X is a MEM referencing the constant pool, return the real value.
209 Otherwise return X. */
211 avoid_constant_pool_reference (rtx x)
213 rtx c, tmp, addr;
214 machine_mode cmode;
215 HOST_WIDE_INT offset = 0;
217 switch (GET_CODE (x))
219 case MEM:
220 break;
222 case FLOAT_EXTEND:
223 /* Handle float extensions of constant pool references. */
224 tmp = XEXP (x, 0);
225 c = avoid_constant_pool_reference (tmp);
226 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
227 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c),
228 GET_MODE (x));
229 return x;
231 default:
232 return x;
235 if (GET_MODE (x) == BLKmode)
236 return x;
238 addr = XEXP (x, 0);
240 /* Call target hook to avoid the effects of -fpic etc.... */
241 addr = targetm.delegitimize_address (addr);
243 /* Split the address into a base and integer offset. */
244 if (GET_CODE (addr) == CONST
245 && GET_CODE (XEXP (addr, 0)) == PLUS
246 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
248 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
249 addr = XEXP (XEXP (addr, 0), 0);
252 if (GET_CODE (addr) == LO_SUM)
253 addr = XEXP (addr, 1);
255 /* If this is a constant pool reference, we can turn it into its
256 constant and hope that simplifications happen. */
257 if (GET_CODE (addr) == SYMBOL_REF
258 && CONSTANT_POOL_ADDRESS_P (addr))
260 c = get_pool_constant (addr);
261 cmode = get_pool_mode (addr);
263 /* If we're accessing the constant in a different mode than it was
264 originally stored, attempt to fix that up via subreg simplifications.
265 If that fails we have no choice but to return the original memory. */
266 if (offset == 0 && cmode == GET_MODE (x))
267 return c;
268 else if (offset >= 0 && offset < GET_MODE_SIZE (cmode))
270 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
271 if (tem && CONSTANT_P (tem))
272 return tem;
276 return x;
279 /* Simplify a MEM based on its attributes. This is the default
280 delegitimize_address target hook, and it's recommended that every
281 overrider call it. */
284 delegitimize_mem_from_attrs (rtx x)
286 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
287 use their base addresses as equivalent. */
288 if (MEM_P (x)
289 && MEM_EXPR (x)
290 && MEM_OFFSET_KNOWN_P (x))
292 tree decl = MEM_EXPR (x);
293 machine_mode mode = GET_MODE (x);
294 HOST_WIDE_INT offset = 0;
296 switch (TREE_CODE (decl))
298 default:
299 decl = NULL;
300 break;
302 case VAR_DECL:
303 break;
305 case ARRAY_REF:
306 case ARRAY_RANGE_REF:
307 case COMPONENT_REF:
308 case BIT_FIELD_REF:
309 case REALPART_EXPR:
310 case IMAGPART_EXPR:
311 case VIEW_CONVERT_EXPR:
313 HOST_WIDE_INT bitsize, bitpos;
314 tree toffset;
315 int unsignedp, reversep, volatilep = 0;
317 decl
318 = get_inner_reference (decl, &bitsize, &bitpos, &toffset, &mode,
319 &unsignedp, &reversep, &volatilep);
320 if (bitsize != GET_MODE_BITSIZE (mode)
321 || (bitpos % BITS_PER_UNIT)
322 || (toffset && !tree_fits_shwi_p (toffset)))
323 decl = NULL;
324 else
326 offset += bitpos / BITS_PER_UNIT;
327 if (toffset)
328 offset += tree_to_shwi (toffset);
330 break;
334 if (decl
335 && mode == GET_MODE (x)
336 && VAR_P (decl)
337 && (TREE_STATIC (decl)
338 || DECL_THREAD_LOCAL_P (decl))
339 && DECL_RTL_SET_P (decl)
340 && MEM_P (DECL_RTL (decl)))
342 rtx newx;
344 offset += MEM_OFFSET (x);
346 newx = DECL_RTL (decl);
348 if (MEM_P (newx))
350 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
352 /* Avoid creating a new MEM needlessly if we already had
353 the same address. We do if there's no OFFSET and the
354 old address X is identical to NEWX, or if X is of the
355 form (plus NEWX OFFSET), or the NEWX is of the form
356 (plus Y (const_int Z)) and X is that with the offset
357 added: (plus Y (const_int Z+OFFSET)). */
358 if (!((offset == 0
359 || (GET_CODE (o) == PLUS
360 && GET_CODE (XEXP (o, 1)) == CONST_INT
361 && (offset == INTVAL (XEXP (o, 1))
362 || (GET_CODE (n) == PLUS
363 && GET_CODE (XEXP (n, 1)) == CONST_INT
364 && (INTVAL (XEXP (n, 1)) + offset
365 == INTVAL (XEXP (o, 1)))
366 && (n = XEXP (n, 0))))
367 && (o = XEXP (o, 0))))
368 && rtx_equal_p (o, n)))
369 x = adjust_address_nv (newx, mode, offset);
371 else if (GET_MODE (x) == GET_MODE (newx)
372 && offset == 0)
373 x = newx;
377 return x;
380 /* Make a unary operation by first seeing if it folds and otherwise making
381 the specified operation. */
384 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
385 machine_mode op_mode)
387 rtx tem;
389 /* If this simplifies, use it. */
390 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
391 return tem;
393 return gen_rtx_fmt_e (code, mode, op);
396 /* Likewise for ternary operations. */
399 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
400 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
402 rtx tem;
404 /* If this simplifies, use it. */
405 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
406 op0, op1, op2)))
407 return tem;
409 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
412 /* Likewise, for relational operations.
413 CMP_MODE specifies mode comparison is done in. */
416 simplify_gen_relational (enum rtx_code code, machine_mode mode,
417 machine_mode cmp_mode, rtx op0, rtx op1)
419 rtx tem;
421 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
422 op0, op1)))
423 return tem;
425 return gen_rtx_fmt_ee (code, mode, op0, op1);
428 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
429 and simplify the result. If FN is non-NULL, call this callback on each
430 X, if it returns non-NULL, replace X with its return value and simplify the
431 result. */
434 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
435 rtx (*fn) (rtx, const_rtx, void *), void *data)
437 enum rtx_code code = GET_CODE (x);
438 machine_mode mode = GET_MODE (x);
439 machine_mode op_mode;
440 const char *fmt;
441 rtx op0, op1, op2, newx, op;
442 rtvec vec, newvec;
443 int i, j;
445 if (__builtin_expect (fn != NULL, 0))
447 newx = fn (x, old_rtx, data);
448 if (newx)
449 return newx;
451 else if (rtx_equal_p (x, old_rtx))
452 return copy_rtx ((rtx) data);
454 switch (GET_RTX_CLASS (code))
456 case RTX_UNARY:
457 op0 = XEXP (x, 0);
458 op_mode = GET_MODE (op0);
459 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
460 if (op0 == XEXP (x, 0))
461 return x;
462 return simplify_gen_unary (code, mode, op0, op_mode);
464 case RTX_BIN_ARITH:
465 case RTX_COMM_ARITH:
466 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
467 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
468 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
469 return x;
470 return simplify_gen_binary (code, mode, op0, op1);
472 case RTX_COMPARE:
473 case RTX_COMM_COMPARE:
474 op0 = XEXP (x, 0);
475 op1 = XEXP (x, 1);
476 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
477 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
478 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
479 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
480 return x;
481 return simplify_gen_relational (code, mode, op_mode, op0, op1);
483 case RTX_TERNARY:
484 case RTX_BITFIELD_OPS:
485 op0 = XEXP (x, 0);
486 op_mode = GET_MODE (op0);
487 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
488 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
489 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
490 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
491 return x;
492 if (op_mode == VOIDmode)
493 op_mode = GET_MODE (op0);
494 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
496 case RTX_EXTRA:
497 if (code == SUBREG)
499 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
500 if (op0 == SUBREG_REG (x))
501 return x;
502 op0 = simplify_gen_subreg (GET_MODE (x), op0,
503 GET_MODE (SUBREG_REG (x)),
504 SUBREG_BYTE (x));
505 return op0 ? op0 : x;
507 break;
509 case RTX_OBJ:
510 if (code == MEM)
512 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
513 if (op0 == XEXP (x, 0))
514 return x;
515 return replace_equiv_address_nv (x, op0);
517 else if (code == LO_SUM)
519 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
520 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
522 /* (lo_sum (high x) y) -> y where x and y have the same base. */
523 if (GET_CODE (op0) == HIGH)
525 rtx base0, base1, offset0, offset1;
526 split_const (XEXP (op0, 0), &base0, &offset0);
527 split_const (op1, &base1, &offset1);
528 if (rtx_equal_p (base0, base1))
529 return op1;
532 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
533 return x;
534 return gen_rtx_LO_SUM (mode, op0, op1);
536 break;
538 default:
539 break;
542 newx = x;
543 fmt = GET_RTX_FORMAT (code);
544 for (i = 0; fmt[i]; i++)
545 switch (fmt[i])
547 case 'E':
548 vec = XVEC (x, i);
549 newvec = XVEC (newx, i);
550 for (j = 0; j < GET_NUM_ELEM (vec); j++)
552 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
553 old_rtx, fn, data);
554 if (op != RTVEC_ELT (vec, j))
556 if (newvec == vec)
558 newvec = shallow_copy_rtvec (vec);
559 if (x == newx)
560 newx = shallow_copy_rtx (x);
561 XVEC (newx, i) = newvec;
563 RTVEC_ELT (newvec, j) = op;
566 break;
568 case 'e':
569 if (XEXP (x, i))
571 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
572 if (op != XEXP (x, i))
574 if (x == newx)
575 newx = shallow_copy_rtx (x);
576 XEXP (newx, i) = op;
579 break;
581 return newx;
584 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
585 resulting RTX. Return a new RTX which is as simplified as possible. */
588 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
590 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
593 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
594 Only handle cases where the truncated value is inherently an rvalue.
596 RTL provides two ways of truncating a value:
598 1. a lowpart subreg. This form is only a truncation when both
599 the outer and inner modes (here MODE and OP_MODE respectively)
600 are scalar integers, and only then when the subreg is used as
601 an rvalue.
603 It is only valid to form such truncating subregs if the
604 truncation requires no action by the target. The onus for
605 proving this is on the creator of the subreg -- e.g. the
606 caller to simplify_subreg or simplify_gen_subreg -- and typically
607 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
609 2. a TRUNCATE. This form handles both scalar and compound integers.
611 The first form is preferred where valid. However, the TRUNCATE
612 handling in simplify_unary_operation turns the second form into the
613 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
614 so it is generally safe to form rvalue truncations using:
616 simplify_gen_unary (TRUNCATE, ...)
618 and leave simplify_unary_operation to work out which representation
619 should be used.
621 Because of the proof requirements on (1), simplify_truncation must
622 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
623 regardless of whether the outer truncation came from a SUBREG or a
624 TRUNCATE. For example, if the caller has proven that an SImode
625 truncation of:
627 (and:DI X Y)
629 is a no-op and can be represented as a subreg, it does not follow
630 that SImode truncations of X and Y are also no-ops. On a target
631 like 64-bit MIPS that requires SImode values to be stored in
632 sign-extended form, an SImode truncation of:
634 (and:DI (reg:DI X) (const_int 63))
636 is trivially a no-op because only the lower 6 bits can be set.
637 However, X is still an arbitrary 64-bit number and so we cannot
638 assume that truncating it too is a no-op. */
640 static rtx
641 simplify_truncation (machine_mode mode, rtx op,
642 machine_mode op_mode)
644 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
645 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
646 scalar_int_mode int_mode, int_op_mode, subreg_mode;
648 gcc_assert (precision <= op_precision);
650 /* Optimize truncations of zero and sign extended values. */
651 if (GET_CODE (op) == ZERO_EXTEND
652 || GET_CODE (op) == SIGN_EXTEND)
654 /* There are three possibilities. If MODE is the same as the
655 origmode, we can omit both the extension and the subreg.
656 If MODE is not larger than the origmode, we can apply the
657 truncation without the extension. Finally, if the outermode
658 is larger than the origmode, we can just extend to the appropriate
659 mode. */
660 machine_mode origmode = GET_MODE (XEXP (op, 0));
661 if (mode == origmode)
662 return XEXP (op, 0);
663 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
664 return simplify_gen_unary (TRUNCATE, mode,
665 XEXP (op, 0), origmode);
666 else
667 return simplify_gen_unary (GET_CODE (op), mode,
668 XEXP (op, 0), origmode);
671 /* If the machine can perform operations in the truncated mode, distribute
672 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
673 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
674 if (1
675 && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
676 && (GET_CODE (op) == PLUS
677 || GET_CODE (op) == MINUS
678 || GET_CODE (op) == MULT))
680 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
681 if (op0)
683 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
684 if (op1)
685 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
689 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
690 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
691 the outer subreg is effectively a truncation to the original mode. */
692 if ((GET_CODE (op) == LSHIFTRT
693 || GET_CODE (op) == ASHIFTRT)
694 /* Ensure that OP_MODE is at least twice as wide as MODE
695 to avoid the possibility that an outer LSHIFTRT shifts by more
696 than the sign extension's sign_bit_copies and introduces zeros
697 into the high bits of the result. */
698 && 2 * precision <= op_precision
699 && CONST_INT_P (XEXP (op, 1))
700 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
701 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
702 && UINTVAL (XEXP (op, 1)) < precision)
703 return simplify_gen_binary (ASHIFTRT, mode,
704 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
706 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
707 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
708 the outer subreg is effectively a truncation to the original mode. */
709 if ((GET_CODE (op) == LSHIFTRT
710 || GET_CODE (op) == ASHIFTRT)
711 && CONST_INT_P (XEXP (op, 1))
712 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
713 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
714 && UINTVAL (XEXP (op, 1)) < precision)
715 return simplify_gen_binary (LSHIFTRT, mode,
716 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
718 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
719 to (ashift:QI (x:QI) C), where C is a suitable small constant and
720 the outer subreg is effectively a truncation to the original mode. */
721 if (GET_CODE (op) == ASHIFT
722 && CONST_INT_P (XEXP (op, 1))
723 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
724 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
725 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
726 && UINTVAL (XEXP (op, 1)) < precision)
727 return simplify_gen_binary (ASHIFT, mode,
728 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
730 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
731 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
732 and C2. */
733 if (GET_CODE (op) == AND
734 && (GET_CODE (XEXP (op, 0)) == LSHIFTRT
735 || GET_CODE (XEXP (op, 0)) == ASHIFTRT)
736 && CONST_INT_P (XEXP (XEXP (op, 0), 1))
737 && CONST_INT_P (XEXP (op, 1)))
739 rtx op0 = (XEXP (XEXP (op, 0), 0));
740 rtx shift_op = XEXP (XEXP (op, 0), 1);
741 rtx mask_op = XEXP (op, 1);
742 unsigned HOST_WIDE_INT shift = UINTVAL (shift_op);
743 unsigned HOST_WIDE_INT mask = UINTVAL (mask_op);
745 if (shift < precision
746 /* If doing this transform works for an X with all bits set,
747 it works for any X. */
748 && ((GET_MODE_MASK (mode) >> shift) & mask)
749 == ((GET_MODE_MASK (op_mode) >> shift) & mask)
750 && (op0 = simplify_gen_unary (TRUNCATE, mode, op0, op_mode))
751 && (op0 = simplify_gen_binary (LSHIFTRT, mode, op0, shift_op)))
753 mask_op = GEN_INT (trunc_int_for_mode (mask, mode));
754 return simplify_gen_binary (AND, mode, op0, mask_op);
758 /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
759 (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
760 changing len. */
761 if ((GET_CODE (op) == ZERO_EXTRACT || GET_CODE (op) == SIGN_EXTRACT)
762 && REG_P (XEXP (op, 0))
763 && GET_MODE (XEXP (op, 0)) == GET_MODE (op)
764 && CONST_INT_P (XEXP (op, 1))
765 && CONST_INT_P (XEXP (op, 2)))
767 rtx op0 = XEXP (op, 0);
768 unsigned HOST_WIDE_INT len = UINTVAL (XEXP (op, 1));
769 unsigned HOST_WIDE_INT pos = UINTVAL (XEXP (op, 2));
770 if (BITS_BIG_ENDIAN && pos >= op_precision - precision)
772 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
773 if (op0)
775 pos -= op_precision - precision;
776 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
777 XEXP (op, 1), GEN_INT (pos));
780 else if (!BITS_BIG_ENDIAN && precision >= len + pos)
782 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
783 if (op0)
784 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
785 XEXP (op, 1), XEXP (op, 2));
789 /* Recognize a word extraction from a multi-word subreg. */
790 if ((GET_CODE (op) == LSHIFTRT
791 || GET_CODE (op) == ASHIFTRT)
792 && SCALAR_INT_MODE_P (mode)
793 && SCALAR_INT_MODE_P (op_mode)
794 && precision >= BITS_PER_WORD
795 && 2 * precision <= op_precision
796 && CONST_INT_P (XEXP (op, 1))
797 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
798 && UINTVAL (XEXP (op, 1)) < op_precision)
800 int byte = subreg_lowpart_offset (mode, op_mode);
801 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
802 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
803 (WORDS_BIG_ENDIAN
804 ? byte - shifted_bytes
805 : byte + shifted_bytes));
808 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
809 and try replacing the TRUNCATE and shift with it. Don't do this
810 if the MEM has a mode-dependent address. */
811 if ((GET_CODE (op) == LSHIFTRT
812 || GET_CODE (op) == ASHIFTRT)
813 && is_a <scalar_int_mode> (mode, &int_mode)
814 && is_a <scalar_int_mode> (op_mode, &int_op_mode)
815 && MEM_P (XEXP (op, 0))
816 && CONST_INT_P (XEXP (op, 1))
817 && INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (int_mode) == 0
818 && INTVAL (XEXP (op, 1)) > 0
819 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (int_op_mode)
820 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
821 MEM_ADDR_SPACE (XEXP (op, 0)))
822 && ! MEM_VOLATILE_P (XEXP (op, 0))
823 && (GET_MODE_SIZE (int_mode) >= UNITS_PER_WORD
824 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
826 int byte = subreg_lowpart_offset (int_mode, int_op_mode);
827 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
828 return adjust_address_nv (XEXP (op, 0), int_mode,
829 (WORDS_BIG_ENDIAN
830 ? byte - shifted_bytes
831 : byte + shifted_bytes));
834 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
835 (OP:SI foo:SI) if OP is NEG or ABS. */
836 if ((GET_CODE (op) == ABS
837 || GET_CODE (op) == NEG)
838 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
839 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
840 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
841 return simplify_gen_unary (GET_CODE (op), mode,
842 XEXP (XEXP (op, 0), 0), mode);
844 /* (truncate:A (subreg:B (truncate:C X) 0)) is
845 (truncate:A X). */
846 if (GET_CODE (op) == SUBREG
847 && is_a <scalar_int_mode> (mode, &int_mode)
848 && SCALAR_INT_MODE_P (op_mode)
849 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &subreg_mode)
850 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
851 && subreg_lowpart_p (op))
853 rtx inner = XEXP (SUBREG_REG (op), 0);
854 if (GET_MODE_PRECISION (int_mode) <= GET_MODE_PRECISION (subreg_mode))
855 return simplify_gen_unary (TRUNCATE, int_mode, inner,
856 GET_MODE (inner));
857 else
858 /* If subreg above is paradoxical and C is narrower
859 than A, return (subreg:A (truncate:C X) 0). */
860 return simplify_gen_subreg (int_mode, SUBREG_REG (op), subreg_mode, 0);
863 /* (truncate:A (truncate:B X)) is (truncate:A X). */
864 if (GET_CODE (op) == TRUNCATE)
865 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
866 GET_MODE (XEXP (op, 0)));
868 /* (truncate:A (ior X C)) is (const_int -1) if C is equal to that already,
869 in mode A. */
870 if (GET_CODE (op) == IOR
871 && SCALAR_INT_MODE_P (mode)
872 && SCALAR_INT_MODE_P (op_mode)
873 && CONST_INT_P (XEXP (op, 1))
874 && trunc_int_for_mode (INTVAL (XEXP (op, 1)), mode) == -1)
875 return constm1_rtx;
877 return NULL_RTX;
880 /* Try to simplify a unary operation CODE whose output mode is to be
881 MODE with input operand OP whose mode was originally OP_MODE.
882 Return zero if no simplification can be made. */
884 simplify_unary_operation (enum rtx_code code, machine_mode mode,
885 rtx op, machine_mode op_mode)
887 rtx trueop, tem;
889 trueop = avoid_constant_pool_reference (op);
891 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
892 if (tem)
893 return tem;
895 return simplify_unary_operation_1 (code, mode, op);
898 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
899 to be exact. */
901 static bool
902 exact_int_to_float_conversion_p (const_rtx op)
904 int out_bits = significand_size (GET_MODE_INNER (GET_MODE (op)));
905 machine_mode op0_mode = GET_MODE (XEXP (op, 0));
906 /* Constants shouldn't reach here. */
907 gcc_assert (op0_mode != VOIDmode);
908 int in_prec = GET_MODE_UNIT_PRECISION (op0_mode);
909 int in_bits = in_prec;
910 if (HWI_COMPUTABLE_MODE_P (op0_mode))
912 unsigned HOST_WIDE_INT nonzero = nonzero_bits (XEXP (op, 0), op0_mode);
913 if (GET_CODE (op) == FLOAT)
914 in_bits -= num_sign_bit_copies (XEXP (op, 0), op0_mode);
915 else if (GET_CODE (op) == UNSIGNED_FLOAT)
916 in_bits = wi::min_precision (wi::uhwi (nonzero, in_prec), UNSIGNED);
917 else
918 gcc_unreachable ();
919 in_bits -= wi::ctz (wi::uhwi (nonzero, in_prec));
921 return in_bits <= out_bits;
924 /* Perform some simplifications we can do even if the operands
925 aren't constant. */
926 static rtx
927 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
929 enum rtx_code reversed;
930 rtx temp, elt, base, step;
931 scalar_int_mode inner, int_mode, op_mode, op0_mode;
933 switch (code)
935 case NOT:
936 /* (not (not X)) == X. */
937 if (GET_CODE (op) == NOT)
938 return XEXP (op, 0);
940 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
941 comparison is all ones. */
942 if (COMPARISON_P (op)
943 && (mode == BImode || STORE_FLAG_VALUE == -1)
944 && ((reversed = reversed_comparison_code (op, NULL)) != UNKNOWN))
945 return simplify_gen_relational (reversed, mode, VOIDmode,
946 XEXP (op, 0), XEXP (op, 1));
948 /* (not (plus X -1)) can become (neg X). */
949 if (GET_CODE (op) == PLUS
950 && XEXP (op, 1) == constm1_rtx)
951 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
953 /* Similarly, (not (neg X)) is (plus X -1). Only do this for
954 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
955 and MODE_VECTOR_INT. */
956 if (GET_CODE (op) == NEG && CONSTM1_RTX (mode))
957 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
958 CONSTM1_RTX (mode));
960 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
961 if (GET_CODE (op) == XOR
962 && CONST_INT_P (XEXP (op, 1))
963 && (temp = simplify_unary_operation (NOT, mode,
964 XEXP (op, 1), mode)) != 0)
965 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
967 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
968 if (GET_CODE (op) == PLUS
969 && CONST_INT_P (XEXP (op, 1))
970 && mode_signbit_p (mode, XEXP (op, 1))
971 && (temp = simplify_unary_operation (NOT, mode,
972 XEXP (op, 1), mode)) != 0)
973 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
976 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
977 operands other than 1, but that is not valid. We could do a
978 similar simplification for (not (lshiftrt C X)) where C is
979 just the sign bit, but this doesn't seem common enough to
980 bother with. */
981 if (GET_CODE (op) == ASHIFT
982 && XEXP (op, 0) == const1_rtx)
984 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
985 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
988 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
989 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
990 so we can perform the above simplification. */
991 if (STORE_FLAG_VALUE == -1
992 && is_a <scalar_int_mode> (mode, &int_mode)
993 && GET_CODE (op) == ASHIFTRT
994 && CONST_INT_P (XEXP (op, 1))
995 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (int_mode) - 1)
996 return simplify_gen_relational (GE, int_mode, VOIDmode,
997 XEXP (op, 0), const0_rtx);
1000 if (partial_subreg_p (op)
1001 && subreg_lowpart_p (op)
1002 && GET_CODE (SUBREG_REG (op)) == ASHIFT
1003 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
1005 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
1006 rtx x;
1008 x = gen_rtx_ROTATE (inner_mode,
1009 simplify_gen_unary (NOT, inner_mode, const1_rtx,
1010 inner_mode),
1011 XEXP (SUBREG_REG (op), 1));
1012 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
1013 if (temp)
1014 return temp;
1017 /* Apply De Morgan's laws to reduce number of patterns for machines
1018 with negating logical insns (and-not, nand, etc.). If result has
1019 only one NOT, put it first, since that is how the patterns are
1020 coded. */
1021 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
1023 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
1024 machine_mode op_mode;
1026 op_mode = GET_MODE (in1);
1027 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
1029 op_mode = GET_MODE (in2);
1030 if (op_mode == VOIDmode)
1031 op_mode = mode;
1032 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
1034 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
1035 std::swap (in1, in2);
1037 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
1038 mode, in1, in2);
1041 /* (not (bswap x)) -> (bswap (not x)). */
1042 if (GET_CODE (op) == BSWAP)
1044 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1045 return simplify_gen_unary (BSWAP, mode, x, mode);
1047 break;
1049 case NEG:
1050 /* (neg (neg X)) == X. */
1051 if (GET_CODE (op) == NEG)
1052 return XEXP (op, 0);
1054 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1055 If comparison is not reversible use
1056 x ? y : (neg y). */
1057 if (GET_CODE (op) == IF_THEN_ELSE)
1059 rtx cond = XEXP (op, 0);
1060 rtx true_rtx = XEXP (op, 1);
1061 rtx false_rtx = XEXP (op, 2);
1063 if ((GET_CODE (true_rtx) == NEG
1064 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
1065 || (GET_CODE (false_rtx) == NEG
1066 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)))
1068 if (reversed_comparison_code (cond, NULL) != UNKNOWN)
1069 temp = reversed_comparison (cond, mode);
1070 else
1072 temp = cond;
1073 std::swap (true_rtx, false_rtx);
1075 return simplify_gen_ternary (IF_THEN_ELSE, mode,
1076 mode, temp, true_rtx, false_rtx);
1080 /* (neg (plus X 1)) can become (not X). */
1081 if (GET_CODE (op) == PLUS
1082 && XEXP (op, 1) == const1_rtx)
1083 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1085 /* Similarly, (neg (not X)) is (plus X 1). */
1086 if (GET_CODE (op) == NOT)
1087 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
1088 CONST1_RTX (mode));
1090 /* (neg (minus X Y)) can become (minus Y X). This transformation
1091 isn't safe for modes with signed zeros, since if X and Y are
1092 both +0, (minus Y X) is the same as (minus X Y). If the
1093 rounding mode is towards +infinity (or -infinity) then the two
1094 expressions will be rounded differently. */
1095 if (GET_CODE (op) == MINUS
1096 && !HONOR_SIGNED_ZEROS (mode)
1097 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1098 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
1100 if (GET_CODE (op) == PLUS
1101 && !HONOR_SIGNED_ZEROS (mode)
1102 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1104 /* (neg (plus A C)) is simplified to (minus -C A). */
1105 if (CONST_SCALAR_INT_P (XEXP (op, 1))
1106 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
1108 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1109 if (temp)
1110 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1113 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1114 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1115 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1118 /* (neg (mult A B)) becomes (mult A (neg B)).
1119 This works even for floating-point values. */
1120 if (GET_CODE (op) == MULT
1121 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1123 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1124 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1127 /* NEG commutes with ASHIFT since it is multiplication. Only do
1128 this if we can then eliminate the NEG (e.g., if the operand
1129 is a constant). */
1130 if (GET_CODE (op) == ASHIFT)
1132 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1133 if (temp)
1134 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1137 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1138 C is equal to the width of MODE minus 1. */
1139 if (GET_CODE (op) == ASHIFTRT
1140 && CONST_INT_P (XEXP (op, 1))
1141 && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
1142 return simplify_gen_binary (LSHIFTRT, mode,
1143 XEXP (op, 0), XEXP (op, 1));
1145 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1146 C is equal to the width of MODE minus 1. */
1147 if (GET_CODE (op) == LSHIFTRT
1148 && CONST_INT_P (XEXP (op, 1))
1149 && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
1150 return simplify_gen_binary (ASHIFTRT, mode,
1151 XEXP (op, 0), XEXP (op, 1));
1153 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1154 if (GET_CODE (op) == XOR
1155 && XEXP (op, 1) == const1_rtx
1156 && nonzero_bits (XEXP (op, 0), mode) == 1)
1157 return plus_constant (mode, XEXP (op, 0), -1);
1159 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1160 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1161 if (GET_CODE (op) == LT
1162 && XEXP (op, 1) == const0_rtx
1163 && is_a <scalar_int_mode> (GET_MODE (XEXP (op, 0)), &inner))
1165 int_mode = as_a <scalar_int_mode> (mode);
1166 int isize = GET_MODE_PRECISION (inner);
1167 if (STORE_FLAG_VALUE == 1)
1169 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1170 GEN_INT (isize - 1));
1171 if (int_mode == inner)
1172 return temp;
1173 if (GET_MODE_PRECISION (int_mode) > isize)
1174 return simplify_gen_unary (SIGN_EXTEND, int_mode, temp, inner);
1175 return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
1177 else if (STORE_FLAG_VALUE == -1)
1179 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1180 GEN_INT (isize - 1));
1181 if (int_mode == inner)
1182 return temp;
1183 if (GET_MODE_PRECISION (int_mode) > isize)
1184 return simplify_gen_unary (ZERO_EXTEND, int_mode, temp, inner);
1185 return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
1189 if (vec_series_p (op, &base, &step))
1191 /* Only create a new series if we can simplify both parts. In other
1192 cases this isn't really a simplification, and it's not necessarily
1193 a win to replace a vector operation with a scalar operation. */
1194 scalar_mode inner_mode = GET_MODE_INNER (mode);
1195 base = simplify_unary_operation (NEG, inner_mode, base, inner_mode);
1196 if (base)
1198 step = simplify_unary_operation (NEG, inner_mode,
1199 step, inner_mode);
1200 if (step)
1201 return gen_vec_series (mode, base, step);
1204 break;
1206 case TRUNCATE:
1207 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1208 with the umulXi3_highpart patterns. */
1209 if (GET_CODE (op) == LSHIFTRT
1210 && GET_CODE (XEXP (op, 0)) == MULT)
1211 break;
1213 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1215 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1217 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1218 if (temp)
1219 return temp;
1221 /* We can't handle truncation to a partial integer mode here
1222 because we don't know the real bitsize of the partial
1223 integer mode. */
1224 break;
1227 if (GET_MODE (op) != VOIDmode)
1229 temp = simplify_truncation (mode, op, GET_MODE (op));
1230 if (temp)
1231 return temp;
1234 /* If we know that the value is already truncated, we can
1235 replace the TRUNCATE with a SUBREG. */
1236 if (GET_MODE_NUNITS (mode) == 1
1237 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1238 || truncated_to_mode (mode, op)))
1240 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1241 if (temp)
1242 return temp;
1245 /* A truncate of a comparison can be replaced with a subreg if
1246 STORE_FLAG_VALUE permits. This is like the previous test,
1247 but it works even if the comparison is done in a mode larger
1248 than HOST_BITS_PER_WIDE_INT. */
1249 if (HWI_COMPUTABLE_MODE_P (mode)
1250 && COMPARISON_P (op)
1251 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1253 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1254 if (temp)
1255 return temp;
1258 /* A truncate of a memory is just loading the low part of the memory
1259 if we are not changing the meaning of the address. */
1260 if (GET_CODE (op) == MEM
1261 && !VECTOR_MODE_P (mode)
1262 && !MEM_VOLATILE_P (op)
1263 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1265 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1266 if (temp)
1267 return temp;
1270 break;
1272 case FLOAT_TRUNCATE:
1273 if (DECIMAL_FLOAT_MODE_P (mode))
1274 break;
1276 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1277 if (GET_CODE (op) == FLOAT_EXTEND
1278 && GET_MODE (XEXP (op, 0)) == mode)
1279 return XEXP (op, 0);
1281 /* (float_truncate:SF (float_truncate:DF foo:XF))
1282 = (float_truncate:SF foo:XF).
1283 This may eliminate double rounding, so it is unsafe.
1285 (float_truncate:SF (float_extend:XF foo:DF))
1286 = (float_truncate:SF foo:DF).
1288 (float_truncate:DF (float_extend:XF foo:SF))
1289 = (float_extend:DF foo:SF). */
1290 if ((GET_CODE (op) == FLOAT_TRUNCATE
1291 && flag_unsafe_math_optimizations)
1292 || GET_CODE (op) == FLOAT_EXTEND)
1293 return simplify_gen_unary (GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)))
1294 > GET_MODE_UNIT_SIZE (mode)
1295 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1296 mode,
1297 XEXP (op, 0), mode);
1299 /* (float_truncate (float x)) is (float x) */
1300 if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1301 && (flag_unsafe_math_optimizations
1302 || exact_int_to_float_conversion_p (op)))
1303 return simplify_gen_unary (GET_CODE (op), mode,
1304 XEXP (op, 0),
1305 GET_MODE (XEXP (op, 0)));
1307 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1308 (OP:SF foo:SF) if OP is NEG or ABS. */
1309 if ((GET_CODE (op) == ABS
1310 || GET_CODE (op) == NEG)
1311 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1312 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1313 return simplify_gen_unary (GET_CODE (op), mode,
1314 XEXP (XEXP (op, 0), 0), mode);
1316 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1317 is (float_truncate:SF x). */
1318 if (GET_CODE (op) == SUBREG
1319 && subreg_lowpart_p (op)
1320 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1321 return SUBREG_REG (op);
1322 break;
1324 case FLOAT_EXTEND:
1325 if (DECIMAL_FLOAT_MODE_P (mode))
1326 break;
1328 /* (float_extend (float_extend x)) is (float_extend x)
1330 (float_extend (float x)) is (float x) assuming that double
1331 rounding can't happen.
1333 if (GET_CODE (op) == FLOAT_EXTEND
1334 || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1335 && exact_int_to_float_conversion_p (op)))
1336 return simplify_gen_unary (GET_CODE (op), mode,
1337 XEXP (op, 0),
1338 GET_MODE (XEXP (op, 0)));
1340 break;
1342 case ABS:
1343 /* (abs (neg <foo>)) -> (abs <foo>) */
1344 if (GET_CODE (op) == NEG)
1345 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1346 GET_MODE (XEXP (op, 0)));
1348 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1349 do nothing. */
1350 if (GET_MODE (op) == VOIDmode)
1351 break;
1353 /* If operand is something known to be positive, ignore the ABS. */
1354 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1355 || val_signbit_known_clear_p (GET_MODE (op),
1356 nonzero_bits (op, GET_MODE (op))))
1357 return op;
1359 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1360 if (is_a <scalar_int_mode> (mode, &int_mode)
1361 && (num_sign_bit_copies (op, int_mode)
1362 == GET_MODE_PRECISION (int_mode)))
1363 return gen_rtx_NEG (int_mode, op);
1365 break;
1367 case FFS:
1368 /* (ffs (*_extend <X>)) = (ffs <X>) */
1369 if (GET_CODE (op) == SIGN_EXTEND
1370 || GET_CODE (op) == ZERO_EXTEND)
1371 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1372 GET_MODE (XEXP (op, 0)));
1373 break;
1375 case POPCOUNT:
1376 switch (GET_CODE (op))
1378 case BSWAP:
1379 case ZERO_EXTEND:
1380 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1381 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1382 GET_MODE (XEXP (op, 0)));
1384 case ROTATE:
1385 case ROTATERT:
1386 /* Rotations don't affect popcount. */
1387 if (!side_effects_p (XEXP (op, 1)))
1388 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1389 GET_MODE (XEXP (op, 0)));
1390 break;
1392 default:
1393 break;
1395 break;
1397 case PARITY:
1398 switch (GET_CODE (op))
1400 case NOT:
1401 case BSWAP:
1402 case ZERO_EXTEND:
1403 case SIGN_EXTEND:
1404 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1405 GET_MODE (XEXP (op, 0)));
1407 case ROTATE:
1408 case ROTATERT:
1409 /* Rotations don't affect parity. */
1410 if (!side_effects_p (XEXP (op, 1)))
1411 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1412 GET_MODE (XEXP (op, 0)));
1413 break;
1415 default:
1416 break;
1418 break;
1420 case BSWAP:
1421 /* (bswap (bswap x)) -> x. */
1422 if (GET_CODE (op) == BSWAP)
1423 return XEXP (op, 0);
1424 break;
1426 case FLOAT:
1427 /* (float (sign_extend <X>)) = (float <X>). */
1428 if (GET_CODE (op) == SIGN_EXTEND)
1429 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1430 GET_MODE (XEXP (op, 0)));
1431 break;
1433 case SIGN_EXTEND:
1434 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1435 becomes just the MINUS if its mode is MODE. This allows
1436 folding switch statements on machines using casesi (such as
1437 the VAX). */
1438 if (GET_CODE (op) == TRUNCATE
1439 && GET_MODE (XEXP (op, 0)) == mode
1440 && GET_CODE (XEXP (op, 0)) == MINUS
1441 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1442 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1443 return XEXP (op, 0);
1445 /* Extending a widening multiplication should be canonicalized to
1446 a wider widening multiplication. */
1447 if (GET_CODE (op) == MULT)
1449 rtx lhs = XEXP (op, 0);
1450 rtx rhs = XEXP (op, 1);
1451 enum rtx_code lcode = GET_CODE (lhs);
1452 enum rtx_code rcode = GET_CODE (rhs);
1454 /* Widening multiplies usually extend both operands, but sometimes
1455 they use a shift to extract a portion of a register. */
1456 if ((lcode == SIGN_EXTEND
1457 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1458 && (rcode == SIGN_EXTEND
1459 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1461 machine_mode lmode = GET_MODE (lhs);
1462 machine_mode rmode = GET_MODE (rhs);
1463 int bits;
1465 if (lcode == ASHIFTRT)
1466 /* Number of bits not shifted off the end. */
1467 bits = (GET_MODE_UNIT_PRECISION (lmode)
1468 - INTVAL (XEXP (lhs, 1)));
1469 else /* lcode == SIGN_EXTEND */
1470 /* Size of inner mode. */
1471 bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0)));
1473 if (rcode == ASHIFTRT)
1474 bits += (GET_MODE_UNIT_PRECISION (rmode)
1475 - INTVAL (XEXP (rhs, 1)));
1476 else /* rcode == SIGN_EXTEND */
1477 bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0)));
1479 /* We can only widen multiplies if the result is mathematiclly
1480 equivalent. I.e. if overflow was impossible. */
1481 if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op)))
1482 return simplify_gen_binary
1483 (MULT, mode,
1484 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1485 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1489 /* Check for a sign extension of a subreg of a promoted
1490 variable, where the promotion is sign-extended, and the
1491 target mode is the same as the variable's promotion. */
1492 if (GET_CODE (op) == SUBREG
1493 && SUBREG_PROMOTED_VAR_P (op)
1494 && SUBREG_PROMOTED_SIGNED_P (op)
1495 && !paradoxical_subreg_p (mode, GET_MODE (SUBREG_REG (op))))
1497 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1498 if (temp)
1499 return temp;
1502 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1503 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1504 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1506 gcc_assert (GET_MODE_UNIT_PRECISION (mode)
1507 > GET_MODE_UNIT_PRECISION (GET_MODE (op)));
1508 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1509 GET_MODE (XEXP (op, 0)));
1512 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1513 is (sign_extend:M (subreg:O <X>)) if there is mode with
1514 GET_MODE_BITSIZE (N) - I bits.
1515 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1516 is similarly (zero_extend:M (subreg:O <X>)). */
1517 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1518 && GET_CODE (XEXP (op, 0)) == ASHIFT
1519 && is_a <scalar_int_mode> (mode, &int_mode)
1520 && CONST_INT_P (XEXP (op, 1))
1521 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1522 && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
1523 GET_MODE_BITSIZE (op_mode) > INTVAL (XEXP (op, 1))))
1525 scalar_int_mode tmode;
1526 gcc_assert (GET_MODE_BITSIZE (int_mode)
1527 > GET_MODE_BITSIZE (op_mode));
1528 if (int_mode_for_size (GET_MODE_BITSIZE (op_mode)
1529 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1531 rtx inner =
1532 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1533 if (inner)
1534 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1535 ? SIGN_EXTEND : ZERO_EXTEND,
1536 int_mode, inner, tmode);
1540 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1541 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1542 if (GET_CODE (op) == LSHIFTRT
1543 && CONST_INT_P (XEXP (op, 1))
1544 && XEXP (op, 1) != const0_rtx)
1545 return simplify_gen_unary (ZERO_EXTEND, mode, op, GET_MODE (op));
1547 #if defined(POINTERS_EXTEND_UNSIGNED)
1548 /* As we do not know which address space the pointer is referring to,
1549 we can do this only if the target does not support different pointer
1550 or address modes depending on the address space. */
1551 if (target_default_pointer_address_modes_p ()
1552 && ! POINTERS_EXTEND_UNSIGNED
1553 && mode == Pmode && GET_MODE (op) == ptr_mode
1554 && (CONSTANT_P (op)
1555 || (GET_CODE (op) == SUBREG
1556 && REG_P (SUBREG_REG (op))
1557 && REG_POINTER (SUBREG_REG (op))
1558 && GET_MODE (SUBREG_REG (op)) == Pmode))
1559 && !targetm.have_ptr_extend ())
1561 temp
1562 = convert_memory_address_addr_space_1 (Pmode, op,
1563 ADDR_SPACE_GENERIC, false,
1564 true);
1565 if (temp)
1566 return temp;
1568 #endif
1569 break;
1571 case ZERO_EXTEND:
1572 /* Check for a zero extension of a subreg of a promoted
1573 variable, where the promotion is zero-extended, and the
1574 target mode is the same as the variable's promotion. */
1575 if (GET_CODE (op) == SUBREG
1576 && SUBREG_PROMOTED_VAR_P (op)
1577 && SUBREG_PROMOTED_UNSIGNED_P (op)
1578 && !paradoxical_subreg_p (mode, GET_MODE (SUBREG_REG (op))))
1580 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1581 if (temp)
1582 return temp;
1585 /* Extending a widening multiplication should be canonicalized to
1586 a wider widening multiplication. */
1587 if (GET_CODE (op) == MULT)
1589 rtx lhs = XEXP (op, 0);
1590 rtx rhs = XEXP (op, 1);
1591 enum rtx_code lcode = GET_CODE (lhs);
1592 enum rtx_code rcode = GET_CODE (rhs);
1594 /* Widening multiplies usually extend both operands, but sometimes
1595 they use a shift to extract a portion of a register. */
1596 if ((lcode == ZERO_EXTEND
1597 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1598 && (rcode == ZERO_EXTEND
1599 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1601 machine_mode lmode = GET_MODE (lhs);
1602 machine_mode rmode = GET_MODE (rhs);
1603 int bits;
1605 if (lcode == LSHIFTRT)
1606 /* Number of bits not shifted off the end. */
1607 bits = (GET_MODE_UNIT_PRECISION (lmode)
1608 - INTVAL (XEXP (lhs, 1)));
1609 else /* lcode == ZERO_EXTEND */
1610 /* Size of inner mode. */
1611 bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0)));
1613 if (rcode == LSHIFTRT)
1614 bits += (GET_MODE_UNIT_PRECISION (rmode)
1615 - INTVAL (XEXP (rhs, 1)));
1616 else /* rcode == ZERO_EXTEND */
1617 bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0)));
1619 /* We can only widen multiplies if the result is mathematiclly
1620 equivalent. I.e. if overflow was impossible. */
1621 if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op)))
1622 return simplify_gen_binary
1623 (MULT, mode,
1624 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1625 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1629 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1630 if (GET_CODE (op) == ZERO_EXTEND)
1631 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1632 GET_MODE (XEXP (op, 0)));
1634 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1635 is (zero_extend:M (subreg:O <X>)) if there is mode with
1636 GET_MODE_PRECISION (N) - I bits. */
1637 if (GET_CODE (op) == LSHIFTRT
1638 && GET_CODE (XEXP (op, 0)) == ASHIFT
1639 && is_a <scalar_int_mode> (mode, &int_mode)
1640 && CONST_INT_P (XEXP (op, 1))
1641 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1642 && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
1643 GET_MODE_PRECISION (op_mode) > INTVAL (XEXP (op, 1))))
1645 scalar_int_mode tmode;
1646 if (int_mode_for_size (GET_MODE_PRECISION (op_mode)
1647 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1649 rtx inner =
1650 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1651 if (inner)
1652 return simplify_gen_unary (ZERO_EXTEND, int_mode,
1653 inner, tmode);
1657 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1658 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1659 of mode N. E.g.
1660 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1661 (and:SI (reg:SI) (const_int 63)). */
1662 if (partial_subreg_p (op)
1663 && is_a <scalar_int_mode> (mode, &int_mode)
1664 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &op0_mode)
1665 && GET_MODE_PRECISION (op0_mode) <= HOST_BITS_PER_WIDE_INT
1666 && GET_MODE_PRECISION (int_mode) >= GET_MODE_PRECISION (op0_mode)
1667 && subreg_lowpart_p (op)
1668 && (nonzero_bits (SUBREG_REG (op), op0_mode)
1669 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1671 if (GET_MODE_PRECISION (int_mode) == GET_MODE_PRECISION (op0_mode))
1672 return SUBREG_REG (op);
1673 return simplify_gen_unary (ZERO_EXTEND, int_mode, SUBREG_REG (op),
1674 op0_mode);
1677 #if defined(POINTERS_EXTEND_UNSIGNED)
1678 /* As we do not know which address space the pointer is referring to,
1679 we can do this only if the target does not support different pointer
1680 or address modes depending on the address space. */
1681 if (target_default_pointer_address_modes_p ()
1682 && POINTERS_EXTEND_UNSIGNED > 0
1683 && mode == Pmode && GET_MODE (op) == ptr_mode
1684 && (CONSTANT_P (op)
1685 || (GET_CODE (op) == SUBREG
1686 && REG_P (SUBREG_REG (op))
1687 && REG_POINTER (SUBREG_REG (op))
1688 && GET_MODE (SUBREG_REG (op)) == Pmode))
1689 && !targetm.have_ptr_extend ())
1691 temp
1692 = convert_memory_address_addr_space_1 (Pmode, op,
1693 ADDR_SPACE_GENERIC, false,
1694 true);
1695 if (temp)
1696 return temp;
1698 #endif
1699 break;
1701 default:
1702 break;
1705 if (VECTOR_MODE_P (mode) && vec_duplicate_p (op, &elt))
1707 /* Try applying the operator to ELT and see if that simplifies.
1708 We can duplicate the result if so.
1710 The reason we don't use simplify_gen_unary is that it isn't
1711 necessarily a win to convert things like:
1713 (neg:V (vec_duplicate:V (reg:S R)))
1717 (vec_duplicate:V (neg:S (reg:S R)))
1719 The first might be done entirely in vector registers while the
1720 second might need a move between register files. */
1721 temp = simplify_unary_operation (code, GET_MODE_INNER (mode),
1722 elt, GET_MODE_INNER (GET_MODE (op)));
1723 if (temp)
1724 return gen_vec_duplicate (mode, temp);
1727 return 0;
1730 /* Try to compute the value of a unary operation CODE whose output mode is to
1731 be MODE with input operand OP whose mode was originally OP_MODE.
1732 Return zero if the value cannot be computed. */
1734 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1735 rtx op, machine_mode op_mode)
1737 scalar_int_mode result_mode;
1739 if (code == VEC_DUPLICATE)
1741 gcc_assert (VECTOR_MODE_P (mode));
1742 if (GET_MODE (op) != VOIDmode)
1744 if (!VECTOR_MODE_P (GET_MODE (op)))
1745 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1746 else
1747 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1748 (GET_MODE (op)));
1750 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op))
1751 return gen_const_vec_duplicate (mode, op);
1752 if (GET_CODE (op) == CONST_VECTOR)
1754 int elt_size = GET_MODE_UNIT_SIZE (mode);
1755 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1756 rtvec v = rtvec_alloc (n_elts);
1757 unsigned int i;
1759 machine_mode inmode = GET_MODE (op);
1760 int in_elt_size = GET_MODE_UNIT_SIZE (inmode);
1761 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1763 gcc_assert (in_n_elts < n_elts);
1764 gcc_assert ((n_elts % in_n_elts) == 0);
1765 for (i = 0; i < n_elts; i++)
1766 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1767 return gen_rtx_CONST_VECTOR (mode, v);
1771 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1773 int elt_size = GET_MODE_UNIT_SIZE (mode);
1774 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1775 machine_mode opmode = GET_MODE (op);
1776 int op_elt_size = GET_MODE_UNIT_SIZE (opmode);
1777 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1778 rtvec v = rtvec_alloc (n_elts);
1779 unsigned int i;
1781 gcc_assert (op_n_elts == n_elts);
1782 for (i = 0; i < n_elts; i++)
1784 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1785 CONST_VECTOR_ELT (op, i),
1786 GET_MODE_INNER (opmode));
1787 if (!x)
1788 return 0;
1789 RTVEC_ELT (v, i) = x;
1791 return gen_rtx_CONST_VECTOR (mode, v);
1794 /* The order of these tests is critical so that, for example, we don't
1795 check the wrong mode (input vs. output) for a conversion operation,
1796 such as FIX. At some point, this should be simplified. */
1798 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1800 REAL_VALUE_TYPE d;
1802 if (op_mode == VOIDmode)
1804 /* CONST_INT have VOIDmode as the mode. We assume that all
1805 the bits of the constant are significant, though, this is
1806 a dangerous assumption as many times CONST_INTs are
1807 created and used with garbage in the bits outside of the
1808 precision of the implied mode of the const_int. */
1809 op_mode = MAX_MODE_INT;
1812 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), SIGNED);
1814 /* Avoid the folding if flag_signaling_nans is on and
1815 operand is a signaling NaN. */
1816 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1817 return 0;
1819 d = real_value_truncate (mode, d);
1820 return const_double_from_real_value (d, mode);
1822 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1824 REAL_VALUE_TYPE d;
1826 if (op_mode == VOIDmode)
1828 /* CONST_INT have VOIDmode as the mode. We assume that all
1829 the bits of the constant are significant, though, this is
1830 a dangerous assumption as many times CONST_INTs are
1831 created and used with garbage in the bits outside of the
1832 precision of the implied mode of the const_int. */
1833 op_mode = MAX_MODE_INT;
1836 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), UNSIGNED);
1838 /* Avoid the folding if flag_signaling_nans is on and
1839 operand is a signaling NaN. */
1840 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1841 return 0;
1843 d = real_value_truncate (mode, d);
1844 return const_double_from_real_value (d, mode);
1847 if (CONST_SCALAR_INT_P (op) && is_a <scalar_int_mode> (mode, &result_mode))
1849 unsigned int width = GET_MODE_PRECISION (result_mode);
1850 wide_int result;
1851 scalar_int_mode imode = (op_mode == VOIDmode
1852 ? result_mode
1853 : as_a <scalar_int_mode> (op_mode));
1854 rtx_mode_t op0 = rtx_mode_t (op, imode);
1855 int int_value;
1857 #if TARGET_SUPPORTS_WIDE_INT == 0
1858 /* This assert keeps the simplification from producing a result
1859 that cannot be represented in a CONST_DOUBLE but a lot of
1860 upstream callers expect that this function never fails to
1861 simplify something and so you if you added this to the test
1862 above the code would die later anyway. If this assert
1863 happens, you just need to make the port support wide int. */
1864 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1865 #endif
1867 switch (code)
1869 case NOT:
1870 result = wi::bit_not (op0);
1871 break;
1873 case NEG:
1874 result = wi::neg (op0);
1875 break;
1877 case ABS:
1878 result = wi::abs (op0);
1879 break;
1881 case FFS:
1882 result = wi::shwi (wi::ffs (op0), result_mode);
1883 break;
1885 case CLZ:
1886 if (wi::ne_p (op0, 0))
1887 int_value = wi::clz (op0);
1888 else if (! CLZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
1889 int_value = GET_MODE_PRECISION (imode);
1890 result = wi::shwi (int_value, result_mode);
1891 break;
1893 case CLRSB:
1894 result = wi::shwi (wi::clrsb (op0), result_mode);
1895 break;
1897 case CTZ:
1898 if (wi::ne_p (op0, 0))
1899 int_value = wi::ctz (op0);
1900 else if (! CTZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
1901 int_value = GET_MODE_PRECISION (imode);
1902 result = wi::shwi (int_value, result_mode);
1903 break;
1905 case POPCOUNT:
1906 result = wi::shwi (wi::popcount (op0), result_mode);
1907 break;
1909 case PARITY:
1910 result = wi::shwi (wi::parity (op0), result_mode);
1911 break;
1913 case BSWAP:
1914 result = wide_int (op0).bswap ();
1915 break;
1917 case TRUNCATE:
1918 case ZERO_EXTEND:
1919 result = wide_int::from (op0, width, UNSIGNED);
1920 break;
1922 case SIGN_EXTEND:
1923 result = wide_int::from (op0, width, SIGNED);
1924 break;
1926 case SQRT:
1927 default:
1928 return 0;
1931 return immed_wide_int_const (result, result_mode);
1934 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1935 && SCALAR_FLOAT_MODE_P (mode)
1936 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1938 REAL_VALUE_TYPE d = *CONST_DOUBLE_REAL_VALUE (op);
1939 switch (code)
1941 case SQRT:
1942 return 0;
1943 case ABS:
1944 d = real_value_abs (&d);
1945 break;
1946 case NEG:
1947 d = real_value_negate (&d);
1948 break;
1949 case FLOAT_TRUNCATE:
1950 /* Don't perform the operation if flag_signaling_nans is on
1951 and the operand is a signaling NaN. */
1952 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1953 return NULL_RTX;
1954 d = real_value_truncate (mode, d);
1955 break;
1956 case FLOAT_EXTEND:
1957 /* Don't perform the operation if flag_signaling_nans is on
1958 and the operand is a signaling NaN. */
1959 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1960 return NULL_RTX;
1961 /* All this does is change the mode, unless changing
1962 mode class. */
1963 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1964 real_convert (&d, mode, &d);
1965 break;
1966 case FIX:
1967 /* Don't perform the operation if flag_signaling_nans is on
1968 and the operand is a signaling NaN. */
1969 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1970 return NULL_RTX;
1971 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1972 break;
1973 case NOT:
1975 long tmp[4];
1976 int i;
1978 real_to_target (tmp, &d, GET_MODE (op));
1979 for (i = 0; i < 4; i++)
1980 tmp[i] = ~tmp[i];
1981 real_from_target (&d, tmp, mode);
1982 break;
1984 default:
1985 gcc_unreachable ();
1987 return const_double_from_real_value (d, mode);
1989 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1990 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1991 && is_int_mode (mode, &result_mode))
1993 unsigned int width = GET_MODE_PRECISION (result_mode);
1994 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1995 operators are intentionally left unspecified (to ease implementation
1996 by target backends), for consistency, this routine implements the
1997 same semantics for constant folding as used by the middle-end. */
1999 /* This was formerly used only for non-IEEE float.
2000 eggert@twinsun.com says it is safe for IEEE also. */
2001 REAL_VALUE_TYPE t;
2002 const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (op);
2003 wide_int wmax, wmin;
2004 /* This is part of the abi to real_to_integer, but we check
2005 things before making this call. */
2006 bool fail;
2008 switch (code)
2010 case FIX:
2011 if (REAL_VALUE_ISNAN (*x))
2012 return const0_rtx;
2014 /* Test against the signed upper bound. */
2015 wmax = wi::max_value (width, SIGNED);
2016 real_from_integer (&t, VOIDmode, wmax, SIGNED);
2017 if (real_less (&t, x))
2018 return immed_wide_int_const (wmax, mode);
2020 /* Test against the signed lower bound. */
2021 wmin = wi::min_value (width, SIGNED);
2022 real_from_integer (&t, VOIDmode, wmin, SIGNED);
2023 if (real_less (x, &t))
2024 return immed_wide_int_const (wmin, mode);
2026 return immed_wide_int_const (real_to_integer (x, &fail, width),
2027 mode);
2029 case UNSIGNED_FIX:
2030 if (REAL_VALUE_ISNAN (*x) || REAL_VALUE_NEGATIVE (*x))
2031 return const0_rtx;
2033 /* Test against the unsigned upper bound. */
2034 wmax = wi::max_value (width, UNSIGNED);
2035 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
2036 if (real_less (&t, x))
2037 return immed_wide_int_const (wmax, mode);
2039 return immed_wide_int_const (real_to_integer (x, &fail, width),
2040 mode);
2042 default:
2043 gcc_unreachable ();
2047 return NULL_RTX;
2050 /* Subroutine of simplify_binary_operation to simplify a binary operation
2051 CODE that can commute with byte swapping, with result mode MODE and
2052 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2053 Return zero if no simplification or canonicalization is possible. */
2055 static rtx
2056 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
2057 rtx op0, rtx op1)
2059 rtx tem;
2061 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2062 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
2064 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
2065 simplify_gen_unary (BSWAP, mode, op1, mode));
2066 return simplify_gen_unary (BSWAP, mode, tem, mode);
2069 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2070 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
2072 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
2073 return simplify_gen_unary (BSWAP, mode, tem, mode);
2076 return NULL_RTX;
2079 /* Subroutine of simplify_binary_operation to simplify a commutative,
2080 associative binary operation CODE with result mode MODE, operating
2081 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2082 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2083 canonicalization is possible. */
2085 static rtx
2086 simplify_associative_operation (enum rtx_code code, machine_mode mode,
2087 rtx op0, rtx op1)
2089 rtx tem;
2091 /* Linearize the operator to the left. */
2092 if (GET_CODE (op1) == code)
2094 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2095 if (GET_CODE (op0) == code)
2097 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2098 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2101 /* "a op (b op c)" becomes "(b op c) op a". */
2102 if (! swap_commutative_operands_p (op1, op0))
2103 return simplify_gen_binary (code, mode, op1, op0);
2105 std::swap (op0, op1);
2108 if (GET_CODE (op0) == code)
2110 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2111 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2113 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2114 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2117 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2118 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2119 if (tem != 0)
2120 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2122 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2123 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2124 if (tem != 0)
2125 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2128 return 0;
2132 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2133 and OP1. Return 0 if no simplification is possible.
2135 Don't use this for relational operations such as EQ or LT.
2136 Use simplify_relational_operation instead. */
2138 simplify_binary_operation (enum rtx_code code, machine_mode mode,
2139 rtx op0, rtx op1)
2141 rtx trueop0, trueop1;
2142 rtx tem;
2144 /* Relational operations don't work here. We must know the mode
2145 of the operands in order to do the comparison correctly.
2146 Assuming a full word can give incorrect results.
2147 Consider comparing 128 with -128 in QImode. */
2148 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2149 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2151 /* Make sure the constant is second. */
2152 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2153 && swap_commutative_operands_p (op0, op1))
2154 std::swap (op0, op1);
2156 trueop0 = avoid_constant_pool_reference (op0);
2157 trueop1 = avoid_constant_pool_reference (op1);
2159 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2160 if (tem)
2161 return tem;
2162 tem = simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2164 if (tem)
2165 return tem;
2167 /* If the above steps did not result in a simplification and op0 or op1
2168 were constant pool references, use the referenced constants directly. */
2169 if (trueop0 != op0 || trueop1 != op1)
2170 return simplify_gen_binary (code, mode, trueop0, trueop1);
2172 return NULL_RTX;
2175 /* Subroutine of simplify_binary_operation_1 that looks for cases in
2176 which OP0 and OP1 are both vector series or vector duplicates
2177 (which are really just series with a step of 0). If so, try to
2178 form a new series by applying CODE to the bases and to the steps.
2179 Return null if no simplification is possible.
2181 MODE is the mode of the operation and is known to be a vector
2182 integer mode. */
2184 static rtx
2185 simplify_binary_operation_series (rtx_code code, machine_mode mode,
2186 rtx op0, rtx op1)
2188 rtx base0, step0;
2189 if (vec_duplicate_p (op0, &base0))
2190 step0 = const0_rtx;
2191 else if (!vec_series_p (op0, &base0, &step0))
2192 return NULL_RTX;
2194 rtx base1, step1;
2195 if (vec_duplicate_p (op1, &base1))
2196 step1 = const0_rtx;
2197 else if (!vec_series_p (op1, &base1, &step1))
2198 return NULL_RTX;
2200 /* Only create a new series if we can simplify both parts. In other
2201 cases this isn't really a simplification, and it's not necessarily
2202 a win to replace a vector operation with a scalar operation. */
2203 scalar_mode inner_mode = GET_MODE_INNER (mode);
2204 rtx new_base = simplify_binary_operation (code, inner_mode, base0, base1);
2205 if (!new_base)
2206 return NULL_RTX;
2208 rtx new_step = simplify_binary_operation (code, inner_mode, step0, step1);
2209 if (!new_step)
2210 return NULL_RTX;
2212 return gen_vec_series (mode, new_base, new_step);
2215 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2216 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2217 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2218 actual constants. */
2220 static rtx
2221 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
2222 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2224 rtx tem, reversed, opleft, opright, elt0, elt1;
2225 HOST_WIDE_INT val;
2226 scalar_int_mode int_mode, inner_mode;
2228 /* Even if we can't compute a constant result,
2229 there are some cases worth simplifying. */
2231 switch (code)
2233 case PLUS:
2234 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2235 when x is NaN, infinite, or finite and nonzero. They aren't
2236 when x is -0 and the rounding mode is not towards -infinity,
2237 since (-0) + 0 is then 0. */
2238 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2239 return op0;
2241 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2242 transformations are safe even for IEEE. */
2243 if (GET_CODE (op0) == NEG)
2244 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2245 else if (GET_CODE (op1) == NEG)
2246 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2248 /* (~a) + 1 -> -a */
2249 if (INTEGRAL_MODE_P (mode)
2250 && GET_CODE (op0) == NOT
2251 && trueop1 == const1_rtx)
2252 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2254 /* Handle both-operands-constant cases. We can only add
2255 CONST_INTs to constants since the sum of relocatable symbols
2256 can't be handled by most assemblers. Don't add CONST_INT
2257 to CONST_INT since overflow won't be computed properly if wider
2258 than HOST_BITS_PER_WIDE_INT. */
2260 if ((GET_CODE (op0) == CONST
2261 || GET_CODE (op0) == SYMBOL_REF
2262 || GET_CODE (op0) == LABEL_REF)
2263 && CONST_INT_P (op1))
2264 return plus_constant (mode, op0, INTVAL (op1));
2265 else if ((GET_CODE (op1) == CONST
2266 || GET_CODE (op1) == SYMBOL_REF
2267 || GET_CODE (op1) == LABEL_REF)
2268 && CONST_INT_P (op0))
2269 return plus_constant (mode, op1, INTVAL (op0));
2271 /* See if this is something like X * C - X or vice versa or
2272 if the multiplication is written as a shift. If so, we can
2273 distribute and make a new multiply, shift, or maybe just
2274 have X (if C is 2 in the example above). But don't make
2275 something more expensive than we had before. */
2277 if (is_a <scalar_int_mode> (mode, &int_mode))
2279 rtx lhs = op0, rhs = op1;
2281 wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2282 wide_int coeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2284 if (GET_CODE (lhs) == NEG)
2286 coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2287 lhs = XEXP (lhs, 0);
2289 else if (GET_CODE (lhs) == MULT
2290 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2292 coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2293 lhs = XEXP (lhs, 0);
2295 else if (GET_CODE (lhs) == ASHIFT
2296 && CONST_INT_P (XEXP (lhs, 1))
2297 && INTVAL (XEXP (lhs, 1)) >= 0
2298 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2300 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2301 GET_MODE_PRECISION (int_mode));
2302 lhs = XEXP (lhs, 0);
2305 if (GET_CODE (rhs) == NEG)
2307 coeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2308 rhs = XEXP (rhs, 0);
2310 else if (GET_CODE (rhs) == MULT
2311 && CONST_INT_P (XEXP (rhs, 1)))
2313 coeff1 = rtx_mode_t (XEXP (rhs, 1), int_mode);
2314 rhs = XEXP (rhs, 0);
2316 else if (GET_CODE (rhs) == ASHIFT
2317 && CONST_INT_P (XEXP (rhs, 1))
2318 && INTVAL (XEXP (rhs, 1)) >= 0
2319 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
2321 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2322 GET_MODE_PRECISION (int_mode));
2323 rhs = XEXP (rhs, 0);
2326 if (rtx_equal_p (lhs, rhs))
2328 rtx orig = gen_rtx_PLUS (int_mode, op0, op1);
2329 rtx coeff;
2330 bool speed = optimize_function_for_speed_p (cfun);
2332 coeff = immed_wide_int_const (coeff0 + coeff1, int_mode);
2334 tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
2335 return (set_src_cost (tem, int_mode, speed)
2336 <= set_src_cost (orig, int_mode, speed) ? tem : 0);
2340 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2341 if (CONST_SCALAR_INT_P (op1)
2342 && GET_CODE (op0) == XOR
2343 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2344 && mode_signbit_p (mode, op1))
2345 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2346 simplify_gen_binary (XOR, mode, op1,
2347 XEXP (op0, 1)));
2349 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2350 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2351 && GET_CODE (op0) == MULT
2352 && GET_CODE (XEXP (op0, 0)) == NEG)
2354 rtx in1, in2;
2356 in1 = XEXP (XEXP (op0, 0), 0);
2357 in2 = XEXP (op0, 1);
2358 return simplify_gen_binary (MINUS, mode, op1,
2359 simplify_gen_binary (MULT, mode,
2360 in1, in2));
2363 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2364 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2365 is 1. */
2366 if (COMPARISON_P (op0)
2367 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2368 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2369 && (reversed = reversed_comparison (op0, mode)))
2370 return
2371 simplify_gen_unary (NEG, mode, reversed, mode);
2373 /* If one of the operands is a PLUS or a MINUS, see if we can
2374 simplify this by the associative law.
2375 Don't use the associative law for floating point.
2376 The inaccuracy makes it nonassociative,
2377 and subtle programs can break if operations are associated. */
2379 if (INTEGRAL_MODE_P (mode)
2380 && (plus_minus_operand_p (op0)
2381 || plus_minus_operand_p (op1))
2382 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2383 return tem;
2385 /* Reassociate floating point addition only when the user
2386 specifies associative math operations. */
2387 if (FLOAT_MODE_P (mode)
2388 && flag_associative_math)
2390 tem = simplify_associative_operation (code, mode, op0, op1);
2391 if (tem)
2392 return tem;
2395 /* Handle vector series. */
2396 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
2398 tem = simplify_binary_operation_series (code, mode, op0, op1);
2399 if (tem)
2400 return tem;
2402 break;
2404 case COMPARE:
2405 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2406 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2407 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2408 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2410 rtx xop00 = XEXP (op0, 0);
2411 rtx xop10 = XEXP (op1, 0);
2413 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2414 return xop00;
2416 if (REG_P (xop00) && REG_P (xop10)
2417 && REGNO (xop00) == REGNO (xop10)
2418 && GET_MODE (xop00) == mode
2419 && GET_MODE (xop10) == mode
2420 && GET_MODE_CLASS (mode) == MODE_CC)
2421 return xop00;
2423 break;
2425 case MINUS:
2426 /* We can't assume x-x is 0 even with non-IEEE floating point,
2427 but since it is zero except in very strange circumstances, we
2428 will treat it as zero with -ffinite-math-only. */
2429 if (rtx_equal_p (trueop0, trueop1)
2430 && ! side_effects_p (op0)
2431 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2432 return CONST0_RTX (mode);
2434 /* Change subtraction from zero into negation. (0 - x) is the
2435 same as -x when x is NaN, infinite, or finite and nonzero.
2436 But if the mode has signed zeros, and does not round towards
2437 -infinity, then 0 - 0 is 0, not -0. */
2438 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2439 return simplify_gen_unary (NEG, mode, op1, mode);
2441 /* (-1 - a) is ~a, unless the expression contains symbolic
2442 constants, in which case not retaining additions and
2443 subtractions could cause invalid assembly to be produced. */
2444 if (trueop0 == constm1_rtx
2445 && !contains_symbolic_reference_p (op1))
2446 return simplify_gen_unary (NOT, mode, op1, mode);
2448 /* Subtracting 0 has no effect unless the mode has signed zeros
2449 and supports rounding towards -infinity. In such a case,
2450 0 - 0 is -0. */
2451 if (!(HONOR_SIGNED_ZEROS (mode)
2452 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2453 && trueop1 == CONST0_RTX (mode))
2454 return op0;
2456 /* See if this is something like X * C - X or vice versa or
2457 if the multiplication is written as a shift. If so, we can
2458 distribute and make a new multiply, shift, or maybe just
2459 have X (if C is 2 in the example above). But don't make
2460 something more expensive than we had before. */
2462 if (is_a <scalar_int_mode> (mode, &int_mode))
2464 rtx lhs = op0, rhs = op1;
2466 wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2467 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2469 if (GET_CODE (lhs) == NEG)
2471 coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2472 lhs = XEXP (lhs, 0);
2474 else if (GET_CODE (lhs) == MULT
2475 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2477 coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2478 lhs = XEXP (lhs, 0);
2480 else if (GET_CODE (lhs) == ASHIFT
2481 && CONST_INT_P (XEXP (lhs, 1))
2482 && INTVAL (XEXP (lhs, 1)) >= 0
2483 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2485 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2486 GET_MODE_PRECISION (int_mode));
2487 lhs = XEXP (lhs, 0);
2490 if (GET_CODE (rhs) == NEG)
2492 negcoeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2493 rhs = XEXP (rhs, 0);
2495 else if (GET_CODE (rhs) == MULT
2496 && CONST_INT_P (XEXP (rhs, 1)))
2498 negcoeff1 = wi::neg (rtx_mode_t (XEXP (rhs, 1), int_mode));
2499 rhs = XEXP (rhs, 0);
2501 else if (GET_CODE (rhs) == ASHIFT
2502 && CONST_INT_P (XEXP (rhs, 1))
2503 && INTVAL (XEXP (rhs, 1)) >= 0
2504 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
2506 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2507 GET_MODE_PRECISION (int_mode));
2508 negcoeff1 = -negcoeff1;
2509 rhs = XEXP (rhs, 0);
2512 if (rtx_equal_p (lhs, rhs))
2514 rtx orig = gen_rtx_MINUS (int_mode, op0, op1);
2515 rtx coeff;
2516 bool speed = optimize_function_for_speed_p (cfun);
2518 coeff = immed_wide_int_const (coeff0 + negcoeff1, int_mode);
2520 tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
2521 return (set_src_cost (tem, int_mode, speed)
2522 <= set_src_cost (orig, int_mode, speed) ? tem : 0);
2526 /* (a - (-b)) -> (a + b). True even for IEEE. */
2527 if (GET_CODE (op1) == NEG)
2528 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2530 /* (-x - c) may be simplified as (-c - x). */
2531 if (GET_CODE (op0) == NEG
2532 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2534 tem = simplify_unary_operation (NEG, mode, op1, mode);
2535 if (tem)
2536 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2539 /* Don't let a relocatable value get a negative coeff. */
2540 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2541 return simplify_gen_binary (PLUS, mode,
2542 op0,
2543 neg_const_int (mode, op1));
2545 /* (x - (x & y)) -> (x & ~y) */
2546 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2548 if (rtx_equal_p (op0, XEXP (op1, 0)))
2550 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2551 GET_MODE (XEXP (op1, 1)));
2552 return simplify_gen_binary (AND, mode, op0, tem);
2554 if (rtx_equal_p (op0, XEXP (op1, 1)))
2556 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2557 GET_MODE (XEXP (op1, 0)));
2558 return simplify_gen_binary (AND, mode, op0, tem);
2562 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2563 by reversing the comparison code if valid. */
2564 if (STORE_FLAG_VALUE == 1
2565 && trueop0 == const1_rtx
2566 && COMPARISON_P (op1)
2567 && (reversed = reversed_comparison (op1, mode)))
2568 return reversed;
2570 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2571 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2572 && GET_CODE (op1) == MULT
2573 && GET_CODE (XEXP (op1, 0)) == NEG)
2575 rtx in1, in2;
2577 in1 = XEXP (XEXP (op1, 0), 0);
2578 in2 = XEXP (op1, 1);
2579 return simplify_gen_binary (PLUS, mode,
2580 simplify_gen_binary (MULT, mode,
2581 in1, in2),
2582 op0);
2585 /* Canonicalize (minus (neg A) (mult B C)) to
2586 (minus (mult (neg B) C) A). */
2587 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2588 && GET_CODE (op1) == MULT
2589 && GET_CODE (op0) == NEG)
2591 rtx in1, in2;
2593 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2594 in2 = XEXP (op1, 1);
2595 return simplify_gen_binary (MINUS, mode,
2596 simplify_gen_binary (MULT, mode,
2597 in1, in2),
2598 XEXP (op0, 0));
2601 /* If one of the operands is a PLUS or a MINUS, see if we can
2602 simplify this by the associative law. This will, for example,
2603 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2604 Don't use the associative law for floating point.
2605 The inaccuracy makes it nonassociative,
2606 and subtle programs can break if operations are associated. */
2608 if (INTEGRAL_MODE_P (mode)
2609 && (plus_minus_operand_p (op0)
2610 || plus_minus_operand_p (op1))
2611 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2612 return tem;
2614 /* Handle vector series. */
2615 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
2617 tem = simplify_binary_operation_series (code, mode, op0, op1);
2618 if (tem)
2619 return tem;
2621 break;
2623 case MULT:
2624 if (trueop1 == constm1_rtx)
2625 return simplify_gen_unary (NEG, mode, op0, mode);
2627 if (GET_CODE (op0) == NEG)
2629 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2630 /* If op1 is a MULT as well and simplify_unary_operation
2631 just moved the NEG to the second operand, simplify_gen_binary
2632 below could through simplify_associative_operation move
2633 the NEG around again and recurse endlessly. */
2634 if (temp
2635 && GET_CODE (op1) == MULT
2636 && GET_CODE (temp) == MULT
2637 && XEXP (op1, 0) == XEXP (temp, 0)
2638 && GET_CODE (XEXP (temp, 1)) == NEG
2639 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2640 temp = NULL_RTX;
2641 if (temp)
2642 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2644 if (GET_CODE (op1) == NEG)
2646 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2647 /* If op0 is a MULT as well and simplify_unary_operation
2648 just moved the NEG to the second operand, simplify_gen_binary
2649 below could through simplify_associative_operation move
2650 the NEG around again and recurse endlessly. */
2651 if (temp
2652 && GET_CODE (op0) == MULT
2653 && GET_CODE (temp) == MULT
2654 && XEXP (op0, 0) == XEXP (temp, 0)
2655 && GET_CODE (XEXP (temp, 1)) == NEG
2656 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2657 temp = NULL_RTX;
2658 if (temp)
2659 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2662 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2663 x is NaN, since x * 0 is then also NaN. Nor is it valid
2664 when the mode has signed zeros, since multiplying a negative
2665 number by 0 will give -0, not 0. */
2666 if (!HONOR_NANS (mode)
2667 && !HONOR_SIGNED_ZEROS (mode)
2668 && trueop1 == CONST0_RTX (mode)
2669 && ! side_effects_p (op0))
2670 return op1;
2672 /* In IEEE floating point, x*1 is not equivalent to x for
2673 signalling NaNs. */
2674 if (!HONOR_SNANS (mode)
2675 && trueop1 == CONST1_RTX (mode))
2676 return op0;
2678 /* Convert multiply by constant power of two into shift. */
2679 if (CONST_SCALAR_INT_P (trueop1))
2681 val = wi::exact_log2 (rtx_mode_t (trueop1, mode));
2682 if (val >= 0)
2683 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2686 /* x*2 is x+x and x*(-1) is -x */
2687 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2688 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2689 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2690 && GET_MODE (op0) == mode)
2692 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
2694 if (real_equal (d1, &dconst2))
2695 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2697 if (!HONOR_SNANS (mode)
2698 && real_equal (d1, &dconstm1))
2699 return simplify_gen_unary (NEG, mode, op0, mode);
2702 /* Optimize -x * -x as x * x. */
2703 if (FLOAT_MODE_P (mode)
2704 && GET_CODE (op0) == NEG
2705 && GET_CODE (op1) == NEG
2706 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2707 && !side_effects_p (XEXP (op0, 0)))
2708 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2710 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2711 if (SCALAR_FLOAT_MODE_P (mode)
2712 && GET_CODE (op0) == ABS
2713 && GET_CODE (op1) == ABS
2714 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2715 && !side_effects_p (XEXP (op0, 0)))
2716 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2718 /* Reassociate multiplication, but for floating point MULTs
2719 only when the user specifies unsafe math optimizations. */
2720 if (! FLOAT_MODE_P (mode)
2721 || flag_unsafe_math_optimizations)
2723 tem = simplify_associative_operation (code, mode, op0, op1);
2724 if (tem)
2725 return tem;
2727 break;
2729 case IOR:
2730 if (trueop1 == CONST0_RTX (mode))
2731 return op0;
2732 if (INTEGRAL_MODE_P (mode)
2733 && trueop1 == CONSTM1_RTX (mode)
2734 && !side_effects_p (op0))
2735 return op1;
2736 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2737 return op0;
2738 /* A | (~A) -> -1 */
2739 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2740 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2741 && ! side_effects_p (op0)
2742 && SCALAR_INT_MODE_P (mode))
2743 return constm1_rtx;
2745 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2746 if (CONST_INT_P (op1)
2747 && HWI_COMPUTABLE_MODE_P (mode)
2748 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2749 && !side_effects_p (op0))
2750 return op1;
2752 /* Canonicalize (X & C1) | C2. */
2753 if (GET_CODE (op0) == AND
2754 && CONST_INT_P (trueop1)
2755 && CONST_INT_P (XEXP (op0, 1)))
2757 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2758 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2759 HOST_WIDE_INT c2 = INTVAL (trueop1);
2761 /* If (C1&C2) == C1, then (X&C1)|C2 becomes C2. */
2762 if ((c1 & c2) == c1
2763 && !side_effects_p (XEXP (op0, 0)))
2764 return trueop1;
2766 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2767 if (((c1|c2) & mask) == mask)
2768 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2771 /* Convert (A & B) | A to A. */
2772 if (GET_CODE (op0) == AND
2773 && (rtx_equal_p (XEXP (op0, 0), op1)
2774 || rtx_equal_p (XEXP (op0, 1), op1))
2775 && ! side_effects_p (XEXP (op0, 0))
2776 && ! side_effects_p (XEXP (op0, 1)))
2777 return op1;
2779 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2780 mode size to (rotate A CX). */
2782 if (GET_CODE (op1) == ASHIFT
2783 || GET_CODE (op1) == SUBREG)
2785 opleft = op1;
2786 opright = op0;
2788 else
2790 opright = op1;
2791 opleft = op0;
2794 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2795 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2796 && CONST_INT_P (XEXP (opleft, 1))
2797 && CONST_INT_P (XEXP (opright, 1))
2798 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2799 == GET_MODE_UNIT_PRECISION (mode)))
2800 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2802 /* Same, but for ashift that has been "simplified" to a wider mode
2803 by simplify_shift_const. */
2805 if (GET_CODE (opleft) == SUBREG
2806 && is_a <scalar_int_mode> (mode, &int_mode)
2807 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (opleft)),
2808 &inner_mode)
2809 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2810 && GET_CODE (opright) == LSHIFTRT
2811 && GET_CODE (XEXP (opright, 0)) == SUBREG
2812 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2813 && GET_MODE_SIZE (int_mode) < GET_MODE_SIZE (inner_mode)
2814 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2815 SUBREG_REG (XEXP (opright, 0)))
2816 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2817 && CONST_INT_P (XEXP (opright, 1))
2818 && (INTVAL (XEXP (SUBREG_REG (opleft), 1))
2819 + INTVAL (XEXP (opright, 1))
2820 == GET_MODE_PRECISION (int_mode)))
2821 return gen_rtx_ROTATE (int_mode, XEXP (opright, 0),
2822 XEXP (SUBREG_REG (opleft), 1));
2824 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2825 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2826 the PLUS does not affect any of the bits in OP1: then we can do
2827 the IOR as a PLUS and we can associate. This is valid if OP1
2828 can be safely shifted left C bits. */
2829 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2830 && GET_CODE (XEXP (op0, 0)) == PLUS
2831 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2832 && CONST_INT_P (XEXP (op0, 1))
2833 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2835 int count = INTVAL (XEXP (op0, 1));
2836 HOST_WIDE_INT mask = UINTVAL (trueop1) << count;
2838 if (mask >> count == INTVAL (trueop1)
2839 && trunc_int_for_mode (mask, mode) == mask
2840 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2841 return simplify_gen_binary (ASHIFTRT, mode,
2842 plus_constant (mode, XEXP (op0, 0),
2843 mask),
2844 XEXP (op0, 1));
2847 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2848 if (tem)
2849 return tem;
2851 tem = simplify_associative_operation (code, mode, op0, op1);
2852 if (tem)
2853 return tem;
2854 break;
2856 case XOR:
2857 if (trueop1 == CONST0_RTX (mode))
2858 return op0;
2859 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2860 return simplify_gen_unary (NOT, mode, op0, mode);
2861 if (rtx_equal_p (trueop0, trueop1)
2862 && ! side_effects_p (op0)
2863 && GET_MODE_CLASS (mode) != MODE_CC)
2864 return CONST0_RTX (mode);
2866 /* Canonicalize XOR of the most significant bit to PLUS. */
2867 if (CONST_SCALAR_INT_P (op1)
2868 && mode_signbit_p (mode, op1))
2869 return simplify_gen_binary (PLUS, mode, op0, op1);
2870 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2871 if (CONST_SCALAR_INT_P (op1)
2872 && GET_CODE (op0) == PLUS
2873 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2874 && mode_signbit_p (mode, XEXP (op0, 1)))
2875 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2876 simplify_gen_binary (XOR, mode, op1,
2877 XEXP (op0, 1)));
2879 /* If we are XORing two things that have no bits in common,
2880 convert them into an IOR. This helps to detect rotation encoded
2881 using those methods and possibly other simplifications. */
2883 if (HWI_COMPUTABLE_MODE_P (mode)
2884 && (nonzero_bits (op0, mode)
2885 & nonzero_bits (op1, mode)) == 0)
2886 return (simplify_gen_binary (IOR, mode, op0, op1));
2888 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2889 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2890 (NOT y). */
2892 int num_negated = 0;
2894 if (GET_CODE (op0) == NOT)
2895 num_negated++, op0 = XEXP (op0, 0);
2896 if (GET_CODE (op1) == NOT)
2897 num_negated++, op1 = XEXP (op1, 0);
2899 if (num_negated == 2)
2900 return simplify_gen_binary (XOR, mode, op0, op1);
2901 else if (num_negated == 1)
2902 return simplify_gen_unary (NOT, mode,
2903 simplify_gen_binary (XOR, mode, op0, op1),
2904 mode);
2907 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2908 correspond to a machine insn or result in further simplifications
2909 if B is a constant. */
2911 if (GET_CODE (op0) == AND
2912 && rtx_equal_p (XEXP (op0, 1), op1)
2913 && ! side_effects_p (op1))
2914 return simplify_gen_binary (AND, mode,
2915 simplify_gen_unary (NOT, mode,
2916 XEXP (op0, 0), mode),
2917 op1);
2919 else if (GET_CODE (op0) == AND
2920 && rtx_equal_p (XEXP (op0, 0), op1)
2921 && ! side_effects_p (op1))
2922 return simplify_gen_binary (AND, mode,
2923 simplify_gen_unary (NOT, mode,
2924 XEXP (op0, 1), mode),
2925 op1);
2927 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2928 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2929 out bits inverted twice and not set by C. Similarly, given
2930 (xor (and (xor A B) C) D), simplify without inverting C in
2931 the xor operand: (xor (and A C) (B&C)^D).
2933 else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
2934 && GET_CODE (XEXP (op0, 0)) == XOR
2935 && CONST_INT_P (op1)
2936 && CONST_INT_P (XEXP (op0, 1))
2937 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
2939 enum rtx_code op = GET_CODE (op0);
2940 rtx a = XEXP (XEXP (op0, 0), 0);
2941 rtx b = XEXP (XEXP (op0, 0), 1);
2942 rtx c = XEXP (op0, 1);
2943 rtx d = op1;
2944 HOST_WIDE_INT bval = INTVAL (b);
2945 HOST_WIDE_INT cval = INTVAL (c);
2946 HOST_WIDE_INT dval = INTVAL (d);
2947 HOST_WIDE_INT xcval;
2949 if (op == IOR)
2950 xcval = ~cval;
2951 else
2952 xcval = cval;
2954 return simplify_gen_binary (XOR, mode,
2955 simplify_gen_binary (op, mode, a, c),
2956 gen_int_mode ((bval & xcval) ^ dval,
2957 mode));
2960 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2961 we can transform like this:
2962 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2963 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2964 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2965 Attempt a few simplifications when B and C are both constants. */
2966 if (GET_CODE (op0) == AND
2967 && CONST_INT_P (op1)
2968 && CONST_INT_P (XEXP (op0, 1)))
2970 rtx a = XEXP (op0, 0);
2971 rtx b = XEXP (op0, 1);
2972 rtx c = op1;
2973 HOST_WIDE_INT bval = INTVAL (b);
2974 HOST_WIDE_INT cval = INTVAL (c);
2976 /* Instead of computing ~A&C, we compute its negated value,
2977 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2978 optimize for sure. If it does not simplify, we still try
2979 to compute ~A&C below, but since that always allocates
2980 RTL, we don't try that before committing to returning a
2981 simplified expression. */
2982 rtx n_na_c = simplify_binary_operation (IOR, mode, a,
2983 GEN_INT (~cval));
2985 if ((~cval & bval) == 0)
2987 rtx na_c = NULL_RTX;
2988 if (n_na_c)
2989 na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
2990 else
2992 /* If ~A does not simplify, don't bother: we don't
2993 want to simplify 2 operations into 3, and if na_c
2994 were to simplify with na, n_na_c would have
2995 simplified as well. */
2996 rtx na = simplify_unary_operation (NOT, mode, a, mode);
2997 if (na)
2998 na_c = simplify_gen_binary (AND, mode, na, c);
3001 /* Try to simplify ~A&C | ~B&C. */
3002 if (na_c != NULL_RTX)
3003 return simplify_gen_binary (IOR, mode, na_c,
3004 gen_int_mode (~bval & cval, mode));
3006 else
3008 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
3009 if (n_na_c == CONSTM1_RTX (mode))
3011 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
3012 gen_int_mode (~cval & bval,
3013 mode));
3014 return simplify_gen_binary (IOR, mode, a_nc_b,
3015 gen_int_mode (~bval & cval,
3016 mode));
3021 /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
3022 do (ior (and A ~C) (and B C)) which is a machine instruction on some
3023 machines, and also has shorter instruction path length. */
3024 if (GET_CODE (op0) == AND
3025 && GET_CODE (XEXP (op0, 0)) == XOR
3026 && CONST_INT_P (XEXP (op0, 1))
3027 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), trueop1))
3029 rtx a = trueop1;
3030 rtx b = XEXP (XEXP (op0, 0), 1);
3031 rtx c = XEXP (op0, 1);
3032 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
3033 rtx a_nc = simplify_gen_binary (AND, mode, a, nc);
3034 rtx bc = simplify_gen_binary (AND, mode, b, c);
3035 return simplify_gen_binary (IOR, mode, a_nc, bc);
3037 /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C)) */
3038 else if (GET_CODE (op0) == AND
3039 && GET_CODE (XEXP (op0, 0)) == XOR
3040 && CONST_INT_P (XEXP (op0, 1))
3041 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), trueop1))
3043 rtx a = XEXP (XEXP (op0, 0), 0);
3044 rtx b = trueop1;
3045 rtx c = XEXP (op0, 1);
3046 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
3047 rtx b_nc = simplify_gen_binary (AND, mode, b, nc);
3048 rtx ac = simplify_gen_binary (AND, mode, a, c);
3049 return simplify_gen_binary (IOR, mode, ac, b_nc);
3052 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
3053 comparison if STORE_FLAG_VALUE is 1. */
3054 if (STORE_FLAG_VALUE == 1
3055 && trueop1 == const1_rtx
3056 && COMPARISON_P (op0)
3057 && (reversed = reversed_comparison (op0, mode)))
3058 return reversed;
3060 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
3061 is (lt foo (const_int 0)), so we can perform the above
3062 simplification if STORE_FLAG_VALUE is 1. */
3064 if (is_a <scalar_int_mode> (mode, &int_mode)
3065 && STORE_FLAG_VALUE == 1
3066 && trueop1 == const1_rtx
3067 && GET_CODE (op0) == LSHIFTRT
3068 && CONST_INT_P (XEXP (op0, 1))
3069 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (int_mode) - 1)
3070 return gen_rtx_GE (int_mode, XEXP (op0, 0), const0_rtx);
3072 /* (xor (comparison foo bar) (const_int sign-bit))
3073 when STORE_FLAG_VALUE is the sign bit. */
3074 if (is_a <scalar_int_mode> (mode, &int_mode)
3075 && val_signbit_p (int_mode, STORE_FLAG_VALUE)
3076 && trueop1 == const_true_rtx
3077 && COMPARISON_P (op0)
3078 && (reversed = reversed_comparison (op0, int_mode)))
3079 return reversed;
3081 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3082 if (tem)
3083 return tem;
3085 tem = simplify_associative_operation (code, mode, op0, op1);
3086 if (tem)
3087 return tem;
3088 break;
3090 case AND:
3091 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3092 return trueop1;
3093 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
3094 return op0;
3095 if (HWI_COMPUTABLE_MODE_P (mode))
3097 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
3098 HOST_WIDE_INT nzop1;
3099 if (CONST_INT_P (trueop1))
3101 HOST_WIDE_INT val1 = INTVAL (trueop1);
3102 /* If we are turning off bits already known off in OP0, we need
3103 not do an AND. */
3104 if ((nzop0 & ~val1) == 0)
3105 return op0;
3107 nzop1 = nonzero_bits (trueop1, mode);
3108 /* If we are clearing all the nonzero bits, the result is zero. */
3109 if ((nzop1 & nzop0) == 0
3110 && !side_effects_p (op0) && !side_effects_p (op1))
3111 return CONST0_RTX (mode);
3113 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
3114 && GET_MODE_CLASS (mode) != MODE_CC)
3115 return op0;
3116 /* A & (~A) -> 0 */
3117 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3118 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3119 && ! side_effects_p (op0)
3120 && GET_MODE_CLASS (mode) != MODE_CC)
3121 return CONST0_RTX (mode);
3123 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3124 there are no nonzero bits of C outside of X's mode. */
3125 if ((GET_CODE (op0) == SIGN_EXTEND
3126 || GET_CODE (op0) == ZERO_EXTEND)
3127 && CONST_INT_P (trueop1)
3128 && HWI_COMPUTABLE_MODE_P (mode)
3129 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
3130 & UINTVAL (trueop1)) == 0)
3132 machine_mode imode = GET_MODE (XEXP (op0, 0));
3133 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
3134 gen_int_mode (INTVAL (trueop1),
3135 imode));
3136 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
3139 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3140 we might be able to further simplify the AND with X and potentially
3141 remove the truncation altogether. */
3142 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
3144 rtx x = XEXP (op0, 0);
3145 machine_mode xmode = GET_MODE (x);
3146 tem = simplify_gen_binary (AND, xmode, x,
3147 gen_int_mode (INTVAL (trueop1), xmode));
3148 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
3151 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3152 if (GET_CODE (op0) == IOR
3153 && CONST_INT_P (trueop1)
3154 && CONST_INT_P (XEXP (op0, 1)))
3156 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
3157 return simplify_gen_binary (IOR, mode,
3158 simplify_gen_binary (AND, mode,
3159 XEXP (op0, 0), op1),
3160 gen_int_mode (tmp, mode));
3163 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3164 insn (and may simplify more). */
3165 if (GET_CODE (op0) == XOR
3166 && rtx_equal_p (XEXP (op0, 0), op1)
3167 && ! side_effects_p (op1))
3168 return simplify_gen_binary (AND, mode,
3169 simplify_gen_unary (NOT, mode,
3170 XEXP (op0, 1), mode),
3171 op1);
3173 if (GET_CODE (op0) == XOR
3174 && rtx_equal_p (XEXP (op0, 1), op1)
3175 && ! side_effects_p (op1))
3176 return simplify_gen_binary (AND, mode,
3177 simplify_gen_unary (NOT, mode,
3178 XEXP (op0, 0), mode),
3179 op1);
3181 /* Similarly for (~(A ^ B)) & A. */
3182 if (GET_CODE (op0) == NOT
3183 && GET_CODE (XEXP (op0, 0)) == XOR
3184 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3185 && ! side_effects_p (op1))
3186 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3188 if (GET_CODE (op0) == NOT
3189 && GET_CODE (XEXP (op0, 0)) == XOR
3190 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3191 && ! side_effects_p (op1))
3192 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3194 /* Convert (A | B) & A to A. */
3195 if (GET_CODE (op0) == IOR
3196 && (rtx_equal_p (XEXP (op0, 0), op1)
3197 || rtx_equal_p (XEXP (op0, 1), op1))
3198 && ! side_effects_p (XEXP (op0, 0))
3199 && ! side_effects_p (XEXP (op0, 1)))
3200 return op1;
3202 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3203 ((A & N) + B) & M -> (A + B) & M
3204 Similarly if (N & M) == 0,
3205 ((A | N) + B) & M -> (A + B) & M
3206 and for - instead of + and/or ^ instead of |.
3207 Also, if (N & M) == 0, then
3208 (A +- N) & M -> A & M. */
3209 if (CONST_INT_P (trueop1)
3210 && HWI_COMPUTABLE_MODE_P (mode)
3211 && ~UINTVAL (trueop1)
3212 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3213 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3215 rtx pmop[2];
3216 int which;
3218 pmop[0] = XEXP (op0, 0);
3219 pmop[1] = XEXP (op0, 1);
3221 if (CONST_INT_P (pmop[1])
3222 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3223 return simplify_gen_binary (AND, mode, pmop[0], op1);
3225 for (which = 0; which < 2; which++)
3227 tem = pmop[which];
3228 switch (GET_CODE (tem))
3230 case AND:
3231 if (CONST_INT_P (XEXP (tem, 1))
3232 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3233 == UINTVAL (trueop1))
3234 pmop[which] = XEXP (tem, 0);
3235 break;
3236 case IOR:
3237 case XOR:
3238 if (CONST_INT_P (XEXP (tem, 1))
3239 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3240 pmop[which] = XEXP (tem, 0);
3241 break;
3242 default:
3243 break;
3247 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3249 tem = simplify_gen_binary (GET_CODE (op0), mode,
3250 pmop[0], pmop[1]);
3251 return simplify_gen_binary (code, mode, tem, op1);
3255 /* (and X (ior (not X) Y) -> (and X Y) */
3256 if (GET_CODE (op1) == IOR
3257 && GET_CODE (XEXP (op1, 0)) == NOT
3258 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
3259 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3261 /* (and (ior (not X) Y) X) -> (and X Y) */
3262 if (GET_CODE (op0) == IOR
3263 && GET_CODE (XEXP (op0, 0)) == NOT
3264 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
3265 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3267 /* (and X (ior Y (not X)) -> (and X Y) */
3268 if (GET_CODE (op1) == IOR
3269 && GET_CODE (XEXP (op1, 1)) == NOT
3270 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3271 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3273 /* (and (ior Y (not X)) X) -> (and X Y) */
3274 if (GET_CODE (op0) == IOR
3275 && GET_CODE (XEXP (op0, 1)) == NOT
3276 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3277 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3279 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3280 if (tem)
3281 return tem;
3283 tem = simplify_associative_operation (code, mode, op0, op1);
3284 if (tem)
3285 return tem;
3286 break;
3288 case UDIV:
3289 /* 0/x is 0 (or x&0 if x has side-effects). */
3290 if (trueop0 == CONST0_RTX (mode)
3291 && !cfun->can_throw_non_call_exceptions)
3293 if (side_effects_p (op1))
3294 return simplify_gen_binary (AND, mode, op1, trueop0);
3295 return trueop0;
3297 /* x/1 is x. */
3298 if (trueop1 == CONST1_RTX (mode))
3300 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3301 if (tem)
3302 return tem;
3304 /* Convert divide by power of two into shift. */
3305 if (CONST_INT_P (trueop1)
3306 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3307 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3308 break;
3310 case DIV:
3311 /* Handle floating point and integers separately. */
3312 if (SCALAR_FLOAT_MODE_P (mode))
3314 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3315 safe for modes with NaNs, since 0.0 / 0.0 will then be
3316 NaN rather than 0.0. Nor is it safe for modes with signed
3317 zeros, since dividing 0 by a negative number gives -0.0 */
3318 if (trueop0 == CONST0_RTX (mode)
3319 && !HONOR_NANS (mode)
3320 && !HONOR_SIGNED_ZEROS (mode)
3321 && ! side_effects_p (op1))
3322 return op0;
3323 /* x/1.0 is x. */
3324 if (trueop1 == CONST1_RTX (mode)
3325 && !HONOR_SNANS (mode))
3326 return op0;
3328 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3329 && trueop1 != CONST0_RTX (mode))
3331 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
3333 /* x/-1.0 is -x. */
3334 if (real_equal (d1, &dconstm1)
3335 && !HONOR_SNANS (mode))
3336 return simplify_gen_unary (NEG, mode, op0, mode);
3338 /* Change FP division by a constant into multiplication.
3339 Only do this with -freciprocal-math. */
3340 if (flag_reciprocal_math
3341 && !real_equal (d1, &dconst0))
3343 REAL_VALUE_TYPE d;
3344 real_arithmetic (&d, RDIV_EXPR, &dconst1, d1);
3345 tem = const_double_from_real_value (d, mode);
3346 return simplify_gen_binary (MULT, mode, op0, tem);
3350 else if (SCALAR_INT_MODE_P (mode))
3352 /* 0/x is 0 (or x&0 if x has side-effects). */
3353 if (trueop0 == CONST0_RTX (mode)
3354 && !cfun->can_throw_non_call_exceptions)
3356 if (side_effects_p (op1))
3357 return simplify_gen_binary (AND, mode, op1, trueop0);
3358 return trueop0;
3360 /* x/1 is x. */
3361 if (trueop1 == CONST1_RTX (mode))
3363 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3364 if (tem)
3365 return tem;
3367 /* x/-1 is -x. */
3368 if (trueop1 == constm1_rtx)
3370 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3371 if (x)
3372 return simplify_gen_unary (NEG, mode, x, mode);
3375 break;
3377 case UMOD:
3378 /* 0%x is 0 (or x&0 if x has side-effects). */
3379 if (trueop0 == CONST0_RTX (mode))
3381 if (side_effects_p (op1))
3382 return simplify_gen_binary (AND, mode, op1, trueop0);
3383 return trueop0;
3385 /* x%1 is 0 (of x&0 if x has side-effects). */
3386 if (trueop1 == CONST1_RTX (mode))
3388 if (side_effects_p (op0))
3389 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3390 return CONST0_RTX (mode);
3392 /* Implement modulus by power of two as AND. */
3393 if (CONST_INT_P (trueop1)
3394 && exact_log2 (UINTVAL (trueop1)) > 0)
3395 return simplify_gen_binary (AND, mode, op0,
3396 gen_int_mode (INTVAL (op1) - 1, mode));
3397 break;
3399 case MOD:
3400 /* 0%x is 0 (or x&0 if x has side-effects). */
3401 if (trueop0 == CONST0_RTX (mode))
3403 if (side_effects_p (op1))
3404 return simplify_gen_binary (AND, mode, op1, trueop0);
3405 return trueop0;
3407 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3408 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3410 if (side_effects_p (op0))
3411 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3412 return CONST0_RTX (mode);
3414 break;
3416 case ROTATERT:
3417 case ROTATE:
3418 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3419 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3420 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3421 amount instead. */
3422 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3423 if (CONST_INT_P (trueop1)
3424 && IN_RANGE (INTVAL (trueop1),
3425 GET_MODE_UNIT_PRECISION (mode) / 2 + (code == ROTATE),
3426 GET_MODE_UNIT_PRECISION (mode) - 1))
3427 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3428 mode, op0,
3429 GEN_INT (GET_MODE_UNIT_PRECISION (mode)
3430 - INTVAL (trueop1)));
3431 #endif
3432 /* FALLTHRU */
3433 case ASHIFTRT:
3434 if (trueop1 == CONST0_RTX (mode))
3435 return op0;
3436 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3437 return op0;
3438 /* Rotating ~0 always results in ~0. */
3439 if (CONST_INT_P (trueop0)
3440 && HWI_COMPUTABLE_MODE_P (mode)
3441 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3442 && ! side_effects_p (op1))
3443 return op0;
3445 canonicalize_shift:
3446 /* Given:
3447 scalar modes M1, M2
3448 scalar constants c1, c2
3449 size (M2) > size (M1)
3450 c1 == size (M2) - size (M1)
3451 optimize:
3452 ([a|l]shiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3453 <low_part>)
3454 (const_int <c2>))
3456 (subreg:M1 ([a|l]shiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3457 <low_part>). */
3458 if ((code == ASHIFTRT || code == LSHIFTRT)
3459 && is_a <scalar_int_mode> (mode, &int_mode)
3460 && SUBREG_P (op0)
3461 && CONST_INT_P (op1)
3462 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3463 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op0)),
3464 &inner_mode)
3465 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3466 && GET_MODE_BITSIZE (inner_mode) > GET_MODE_BITSIZE (int_mode)
3467 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3468 == GET_MODE_BITSIZE (inner_mode) - GET_MODE_BITSIZE (int_mode))
3469 && subreg_lowpart_p (op0))
3471 rtx tmp = GEN_INT (INTVAL (XEXP (SUBREG_REG (op0), 1))
3472 + INTVAL (op1));
3473 tmp = simplify_gen_binary (code, inner_mode,
3474 XEXP (SUBREG_REG (op0), 0),
3475 tmp);
3476 return lowpart_subreg (int_mode, tmp, inner_mode);
3479 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3481 val = INTVAL (op1) & (GET_MODE_UNIT_PRECISION (mode) - 1);
3482 if (val != INTVAL (op1))
3483 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3485 break;
3487 case ASHIFT:
3488 case SS_ASHIFT:
3489 case US_ASHIFT:
3490 if (trueop1 == CONST0_RTX (mode))
3491 return op0;
3492 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3493 return op0;
3494 goto canonicalize_shift;
3496 case LSHIFTRT:
3497 if (trueop1 == CONST0_RTX (mode))
3498 return op0;
3499 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3500 return op0;
3501 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3502 if (GET_CODE (op0) == CLZ
3503 && is_a <scalar_int_mode> (GET_MODE (XEXP (op0, 0)), &inner_mode)
3504 && CONST_INT_P (trueop1)
3505 && STORE_FLAG_VALUE == 1
3506 && INTVAL (trueop1) < GET_MODE_UNIT_PRECISION (mode))
3508 unsigned HOST_WIDE_INT zero_val = 0;
3510 if (CLZ_DEFINED_VALUE_AT_ZERO (inner_mode, zero_val)
3511 && zero_val == GET_MODE_PRECISION (inner_mode)
3512 && INTVAL (trueop1) == exact_log2 (zero_val))
3513 return simplify_gen_relational (EQ, mode, inner_mode,
3514 XEXP (op0, 0), const0_rtx);
3516 goto canonicalize_shift;
3518 case SMIN:
3519 if (HWI_COMPUTABLE_MODE_P (mode)
3520 && mode_signbit_p (mode, trueop1)
3521 && ! side_effects_p (op0))
3522 return op1;
3523 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3524 return op0;
3525 tem = simplify_associative_operation (code, mode, op0, op1);
3526 if (tem)
3527 return tem;
3528 break;
3530 case SMAX:
3531 if (HWI_COMPUTABLE_MODE_P (mode)
3532 && CONST_INT_P (trueop1)
3533 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3534 && ! side_effects_p (op0))
3535 return op1;
3536 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3537 return op0;
3538 tem = simplify_associative_operation (code, mode, op0, op1);
3539 if (tem)
3540 return tem;
3541 break;
3543 case UMIN:
3544 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3545 return op1;
3546 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3547 return op0;
3548 tem = simplify_associative_operation (code, mode, op0, op1);
3549 if (tem)
3550 return tem;
3551 break;
3553 case UMAX:
3554 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3555 return op1;
3556 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3557 return op0;
3558 tem = simplify_associative_operation (code, mode, op0, op1);
3559 if (tem)
3560 return tem;
3561 break;
3563 case SS_PLUS:
3564 case US_PLUS:
3565 case SS_MINUS:
3566 case US_MINUS:
3567 case SS_MULT:
3568 case US_MULT:
3569 case SS_DIV:
3570 case US_DIV:
3571 /* ??? There are simplifications that can be done. */
3572 return 0;
3574 case VEC_SERIES:
3575 if (op1 == CONST0_RTX (GET_MODE_INNER (mode)))
3576 return gen_vec_duplicate (mode, op0);
3577 return 0;
3579 case VEC_SELECT:
3580 if (!VECTOR_MODE_P (mode))
3582 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3583 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3584 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3585 gcc_assert (XVECLEN (trueop1, 0) == 1);
3586 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3588 if (vec_duplicate_p (trueop0, &elt0))
3589 return elt0;
3591 if (GET_CODE (trueop0) == CONST_VECTOR)
3592 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3593 (trueop1, 0, 0)));
3595 /* Extract a scalar element from a nested VEC_SELECT expression
3596 (with optional nested VEC_CONCAT expression). Some targets
3597 (i386) extract scalar element from a vector using chain of
3598 nested VEC_SELECT expressions. When input operand is a memory
3599 operand, this operation can be simplified to a simple scalar
3600 load from an offseted memory address. */
3601 if (GET_CODE (trueop0) == VEC_SELECT)
3603 rtx op0 = XEXP (trueop0, 0);
3604 rtx op1 = XEXP (trueop0, 1);
3606 machine_mode opmode = GET_MODE (op0);
3607 int elt_size = GET_MODE_UNIT_SIZE (opmode);
3608 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3610 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3611 int elem;
3613 rtvec vec;
3614 rtx tmp_op, tmp;
3616 gcc_assert (GET_CODE (op1) == PARALLEL);
3617 gcc_assert (i < n_elts);
3619 /* Select element, pointed by nested selector. */
3620 elem = INTVAL (XVECEXP (op1, 0, i));
3622 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3623 if (GET_CODE (op0) == VEC_CONCAT)
3625 rtx op00 = XEXP (op0, 0);
3626 rtx op01 = XEXP (op0, 1);
3628 machine_mode mode00, mode01;
3629 int n_elts00, n_elts01;
3631 mode00 = GET_MODE (op00);
3632 mode01 = GET_MODE (op01);
3634 /* Find out number of elements of each operand. */
3635 if (VECTOR_MODE_P (mode00))
3637 elt_size = GET_MODE_UNIT_SIZE (mode00);
3638 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3640 else
3641 n_elts00 = 1;
3643 if (VECTOR_MODE_P (mode01))
3645 elt_size = GET_MODE_UNIT_SIZE (mode01);
3646 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3648 else
3649 n_elts01 = 1;
3651 gcc_assert (n_elts == n_elts00 + n_elts01);
3653 /* Select correct operand of VEC_CONCAT
3654 and adjust selector. */
3655 if (elem < n_elts01)
3656 tmp_op = op00;
3657 else
3659 tmp_op = op01;
3660 elem -= n_elts00;
3663 else
3664 tmp_op = op0;
3666 vec = rtvec_alloc (1);
3667 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3669 tmp = gen_rtx_fmt_ee (code, mode,
3670 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3671 return tmp;
3674 else
3676 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3677 gcc_assert (GET_MODE_INNER (mode)
3678 == GET_MODE_INNER (GET_MODE (trueop0)));
3679 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3681 if (vec_duplicate_p (trueop0, &elt0))
3682 /* It doesn't matter which elements are selected by trueop1,
3683 because they are all the same. */
3684 return gen_vec_duplicate (mode, elt0);
3686 if (GET_CODE (trueop0) == CONST_VECTOR)
3688 int elt_size = GET_MODE_UNIT_SIZE (mode);
3689 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3690 rtvec v = rtvec_alloc (n_elts);
3691 unsigned int i;
3693 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3694 for (i = 0; i < n_elts; i++)
3696 rtx x = XVECEXP (trueop1, 0, i);
3698 gcc_assert (CONST_INT_P (x));
3699 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3700 INTVAL (x));
3703 return gen_rtx_CONST_VECTOR (mode, v);
3706 /* Recognize the identity. */
3707 if (GET_MODE (trueop0) == mode)
3709 bool maybe_ident = true;
3710 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3712 rtx j = XVECEXP (trueop1, 0, i);
3713 if (!CONST_INT_P (j) || INTVAL (j) != i)
3715 maybe_ident = false;
3716 break;
3719 if (maybe_ident)
3720 return trueop0;
3723 /* If we build {a,b} then permute it, build the result directly. */
3724 if (XVECLEN (trueop1, 0) == 2
3725 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3726 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3727 && GET_CODE (trueop0) == VEC_CONCAT
3728 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3729 && GET_MODE (XEXP (trueop0, 0)) == mode
3730 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3731 && GET_MODE (XEXP (trueop0, 1)) == mode)
3733 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3734 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3735 rtx subop0, subop1;
3737 gcc_assert (i0 < 4 && i1 < 4);
3738 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3739 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3741 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3744 if (XVECLEN (trueop1, 0) == 2
3745 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3746 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3747 && GET_CODE (trueop0) == VEC_CONCAT
3748 && GET_MODE (trueop0) == mode)
3750 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3751 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3752 rtx subop0, subop1;
3754 gcc_assert (i0 < 2 && i1 < 2);
3755 subop0 = XEXP (trueop0, i0);
3756 subop1 = XEXP (trueop0, i1);
3758 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3761 /* If we select one half of a vec_concat, return that. */
3762 if (GET_CODE (trueop0) == VEC_CONCAT
3763 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3765 rtx subop0 = XEXP (trueop0, 0);
3766 rtx subop1 = XEXP (trueop0, 1);
3767 machine_mode mode0 = GET_MODE (subop0);
3768 machine_mode mode1 = GET_MODE (subop1);
3769 int li = GET_MODE_UNIT_SIZE (mode0);
3770 int l0 = GET_MODE_SIZE (mode0) / li;
3771 int l1 = GET_MODE_SIZE (mode1) / li;
3772 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3773 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3775 bool success = true;
3776 for (int i = 1; i < l0; ++i)
3778 rtx j = XVECEXP (trueop1, 0, i);
3779 if (!CONST_INT_P (j) || INTVAL (j) != i)
3781 success = false;
3782 break;
3785 if (success)
3786 return subop0;
3788 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3790 bool success = true;
3791 for (int i = 1; i < l1; ++i)
3793 rtx j = XVECEXP (trueop1, 0, i);
3794 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3796 success = false;
3797 break;
3800 if (success)
3801 return subop1;
3806 if (XVECLEN (trueop1, 0) == 1
3807 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3808 && GET_CODE (trueop0) == VEC_CONCAT)
3810 rtx vec = trueop0;
3811 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3813 /* Try to find the element in the VEC_CONCAT. */
3814 while (GET_MODE (vec) != mode
3815 && GET_CODE (vec) == VEC_CONCAT)
3817 HOST_WIDE_INT vec_size;
3819 if (CONST_INT_P (XEXP (vec, 0)))
3821 /* vec_concat of two const_ints doesn't make sense with
3822 respect to modes. */
3823 if (CONST_INT_P (XEXP (vec, 1)))
3824 return 0;
3826 vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
3827 - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
3829 else
3830 vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3832 if (offset < vec_size)
3833 vec = XEXP (vec, 0);
3834 else
3836 offset -= vec_size;
3837 vec = XEXP (vec, 1);
3839 vec = avoid_constant_pool_reference (vec);
3842 if (GET_MODE (vec) == mode)
3843 return vec;
3846 /* If we select elements in a vec_merge that all come from the same
3847 operand, select from that operand directly. */
3848 if (GET_CODE (op0) == VEC_MERGE)
3850 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3851 if (CONST_INT_P (trueop02))
3853 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3854 bool all_operand0 = true;
3855 bool all_operand1 = true;
3856 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3858 rtx j = XVECEXP (trueop1, 0, i);
3859 if (sel & (HOST_WIDE_INT_1U << UINTVAL (j)))
3860 all_operand1 = false;
3861 else
3862 all_operand0 = false;
3864 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3865 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3866 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3867 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3871 /* If we have two nested selects that are inverses of each
3872 other, replace them with the source operand. */
3873 if (GET_CODE (trueop0) == VEC_SELECT
3874 && GET_MODE (XEXP (trueop0, 0)) == mode)
3876 rtx op0_subop1 = XEXP (trueop0, 1);
3877 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3878 gcc_assert (XVECLEN (trueop1, 0) == GET_MODE_NUNITS (mode));
3880 /* Apply the outer ordering vector to the inner one. (The inner
3881 ordering vector is expressly permitted to be of a different
3882 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3883 then the two VEC_SELECTs cancel. */
3884 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3886 rtx x = XVECEXP (trueop1, 0, i);
3887 if (!CONST_INT_P (x))
3888 return 0;
3889 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3890 if (!CONST_INT_P (y) || i != INTVAL (y))
3891 return 0;
3893 return XEXP (trueop0, 0);
3896 return 0;
3897 case VEC_CONCAT:
3899 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3900 ? GET_MODE (trueop0)
3901 : GET_MODE_INNER (mode));
3902 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3903 ? GET_MODE (trueop1)
3904 : GET_MODE_INNER (mode));
3906 gcc_assert (VECTOR_MODE_P (mode));
3907 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3908 == GET_MODE_SIZE (mode));
3910 if (VECTOR_MODE_P (op0_mode))
3911 gcc_assert (GET_MODE_INNER (mode)
3912 == GET_MODE_INNER (op0_mode));
3913 else
3914 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3916 if (VECTOR_MODE_P (op1_mode))
3917 gcc_assert (GET_MODE_INNER (mode)
3918 == GET_MODE_INNER (op1_mode));
3919 else
3920 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3922 if ((GET_CODE (trueop0) == CONST_VECTOR
3923 || CONST_SCALAR_INT_P (trueop0)
3924 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3925 && (GET_CODE (trueop1) == CONST_VECTOR
3926 || CONST_SCALAR_INT_P (trueop1)
3927 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3929 int elt_size = GET_MODE_UNIT_SIZE (mode);
3930 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3931 rtvec v = rtvec_alloc (n_elts);
3932 unsigned int i;
3933 unsigned in_n_elts = 1;
3935 if (VECTOR_MODE_P (op0_mode))
3936 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3937 for (i = 0; i < n_elts; i++)
3939 if (i < in_n_elts)
3941 if (!VECTOR_MODE_P (op0_mode))
3942 RTVEC_ELT (v, i) = trueop0;
3943 else
3944 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3946 else
3948 if (!VECTOR_MODE_P (op1_mode))
3949 RTVEC_ELT (v, i) = trueop1;
3950 else
3951 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3952 i - in_n_elts);
3956 return gen_rtx_CONST_VECTOR (mode, v);
3959 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3960 Restrict the transformation to avoid generating a VEC_SELECT with a
3961 mode unrelated to its operand. */
3962 if (GET_CODE (trueop0) == VEC_SELECT
3963 && GET_CODE (trueop1) == VEC_SELECT
3964 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3965 && GET_MODE (XEXP (trueop0, 0)) == mode)
3967 rtx par0 = XEXP (trueop0, 1);
3968 rtx par1 = XEXP (trueop1, 1);
3969 int len0 = XVECLEN (par0, 0);
3970 int len1 = XVECLEN (par1, 0);
3971 rtvec vec = rtvec_alloc (len0 + len1);
3972 for (int i = 0; i < len0; i++)
3973 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3974 for (int i = 0; i < len1; i++)
3975 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3976 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3977 gen_rtx_PARALLEL (VOIDmode, vec));
3980 return 0;
3982 default:
3983 gcc_unreachable ();
3986 if (mode == GET_MODE (op0)
3987 && mode == GET_MODE (op1)
3988 && vec_duplicate_p (op0, &elt0)
3989 && vec_duplicate_p (op1, &elt1))
3991 /* Try applying the operator to ELT and see if that simplifies.
3992 We can duplicate the result if so.
3994 The reason we don't use simplify_gen_binary is that it isn't
3995 necessarily a win to convert things like:
3997 (plus:V (vec_duplicate:V (reg:S R1))
3998 (vec_duplicate:V (reg:S R2)))
4002 (vec_duplicate:V (plus:S (reg:S R1) (reg:S R2)))
4004 The first might be done entirely in vector registers while the
4005 second might need a move between register files. */
4006 tem = simplify_binary_operation (code, GET_MODE_INNER (mode),
4007 elt0, elt1);
4008 if (tem)
4009 return gen_vec_duplicate (mode, tem);
4012 return 0;
4016 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
4017 rtx op0, rtx op1)
4019 if (VECTOR_MODE_P (mode)
4020 && code != VEC_CONCAT
4021 && GET_CODE (op0) == CONST_VECTOR
4022 && GET_CODE (op1) == CONST_VECTOR)
4024 unsigned n_elts = GET_MODE_NUNITS (mode);
4025 machine_mode op0mode = GET_MODE (op0);
4026 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
4027 machine_mode op1mode = GET_MODE (op1);
4028 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
4029 rtvec v = rtvec_alloc (n_elts);
4030 unsigned int i;
4032 gcc_assert (op0_n_elts == n_elts);
4033 gcc_assert (op1_n_elts == n_elts);
4034 for (i = 0; i < n_elts; i++)
4036 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
4037 CONST_VECTOR_ELT (op0, i),
4038 CONST_VECTOR_ELT (op1, i));
4039 if (!x)
4040 return 0;
4041 RTVEC_ELT (v, i) = x;
4044 return gen_rtx_CONST_VECTOR (mode, v);
4047 if (VECTOR_MODE_P (mode)
4048 && code == VEC_CONCAT
4049 && (CONST_SCALAR_INT_P (op0)
4050 || GET_CODE (op0) == CONST_FIXED
4051 || CONST_DOUBLE_AS_FLOAT_P (op0))
4052 && (CONST_SCALAR_INT_P (op1)
4053 || CONST_DOUBLE_AS_FLOAT_P (op1)
4054 || GET_CODE (op1) == CONST_FIXED))
4056 unsigned n_elts = GET_MODE_NUNITS (mode);
4057 rtvec v = rtvec_alloc (n_elts);
4059 gcc_assert (n_elts >= 2);
4060 if (n_elts == 2)
4062 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
4063 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
4065 RTVEC_ELT (v, 0) = op0;
4066 RTVEC_ELT (v, 1) = op1;
4068 else
4070 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
4071 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
4072 unsigned i;
4074 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
4075 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
4076 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
4078 for (i = 0; i < op0_n_elts; ++i)
4079 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
4080 for (i = 0; i < op1_n_elts; ++i)
4081 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
4084 return gen_rtx_CONST_VECTOR (mode, v);
4087 if (SCALAR_FLOAT_MODE_P (mode)
4088 && CONST_DOUBLE_AS_FLOAT_P (op0)
4089 && CONST_DOUBLE_AS_FLOAT_P (op1)
4090 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
4092 if (code == AND
4093 || code == IOR
4094 || code == XOR)
4096 long tmp0[4];
4097 long tmp1[4];
4098 REAL_VALUE_TYPE r;
4099 int i;
4101 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
4102 GET_MODE (op0));
4103 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
4104 GET_MODE (op1));
4105 for (i = 0; i < 4; i++)
4107 switch (code)
4109 case AND:
4110 tmp0[i] &= tmp1[i];
4111 break;
4112 case IOR:
4113 tmp0[i] |= tmp1[i];
4114 break;
4115 case XOR:
4116 tmp0[i] ^= tmp1[i];
4117 break;
4118 default:
4119 gcc_unreachable ();
4122 real_from_target (&r, tmp0, mode);
4123 return const_double_from_real_value (r, mode);
4125 else
4127 REAL_VALUE_TYPE f0, f1, value, result;
4128 const REAL_VALUE_TYPE *opr0, *opr1;
4129 bool inexact;
4131 opr0 = CONST_DOUBLE_REAL_VALUE (op0);
4132 opr1 = CONST_DOUBLE_REAL_VALUE (op1);
4134 if (HONOR_SNANS (mode)
4135 && (REAL_VALUE_ISSIGNALING_NAN (*opr0)
4136 || REAL_VALUE_ISSIGNALING_NAN (*opr1)))
4137 return 0;
4139 real_convert (&f0, mode, opr0);
4140 real_convert (&f1, mode, opr1);
4142 if (code == DIV
4143 && real_equal (&f1, &dconst0)
4144 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
4145 return 0;
4147 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4148 && flag_trapping_math
4149 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
4151 int s0 = REAL_VALUE_NEGATIVE (f0);
4152 int s1 = REAL_VALUE_NEGATIVE (f1);
4154 switch (code)
4156 case PLUS:
4157 /* Inf + -Inf = NaN plus exception. */
4158 if (s0 != s1)
4159 return 0;
4160 break;
4161 case MINUS:
4162 /* Inf - Inf = NaN plus exception. */
4163 if (s0 == s1)
4164 return 0;
4165 break;
4166 case DIV:
4167 /* Inf / Inf = NaN plus exception. */
4168 return 0;
4169 default:
4170 break;
4174 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4175 && flag_trapping_math
4176 && ((REAL_VALUE_ISINF (f0) && real_equal (&f1, &dconst0))
4177 || (REAL_VALUE_ISINF (f1)
4178 && real_equal (&f0, &dconst0))))
4179 /* Inf * 0 = NaN plus exception. */
4180 return 0;
4182 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
4183 &f0, &f1);
4184 real_convert (&result, mode, &value);
4186 /* Don't constant fold this floating point operation if
4187 the result has overflowed and flag_trapping_math. */
4189 if (flag_trapping_math
4190 && MODE_HAS_INFINITIES (mode)
4191 && REAL_VALUE_ISINF (result)
4192 && !REAL_VALUE_ISINF (f0)
4193 && !REAL_VALUE_ISINF (f1))
4194 /* Overflow plus exception. */
4195 return 0;
4197 /* Don't constant fold this floating point operation if the
4198 result may dependent upon the run-time rounding mode and
4199 flag_rounding_math is set, or if GCC's software emulation
4200 is unable to accurately represent the result. */
4202 if ((flag_rounding_math
4203 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
4204 && (inexact || !real_identical (&result, &value)))
4205 return NULL_RTX;
4207 return const_double_from_real_value (result, mode);
4211 /* We can fold some multi-word operations. */
4212 scalar_int_mode int_mode;
4213 if (is_a <scalar_int_mode> (mode, &int_mode)
4214 && CONST_SCALAR_INT_P (op0)
4215 && CONST_SCALAR_INT_P (op1))
4217 wide_int result;
4218 bool overflow;
4219 rtx_mode_t pop0 = rtx_mode_t (op0, int_mode);
4220 rtx_mode_t pop1 = rtx_mode_t (op1, int_mode);
4222 #if TARGET_SUPPORTS_WIDE_INT == 0
4223 /* This assert keeps the simplification from producing a result
4224 that cannot be represented in a CONST_DOUBLE but a lot of
4225 upstream callers expect that this function never fails to
4226 simplify something and so you if you added this to the test
4227 above the code would die later anyway. If this assert
4228 happens, you just need to make the port support wide int. */
4229 gcc_assert (GET_MODE_PRECISION (int_mode) <= HOST_BITS_PER_DOUBLE_INT);
4230 #endif
4231 switch (code)
4233 case MINUS:
4234 result = wi::sub (pop0, pop1);
4235 break;
4237 case PLUS:
4238 result = wi::add (pop0, pop1);
4239 break;
4241 case MULT:
4242 result = wi::mul (pop0, pop1);
4243 break;
4245 case DIV:
4246 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
4247 if (overflow)
4248 return NULL_RTX;
4249 break;
4251 case MOD:
4252 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
4253 if (overflow)
4254 return NULL_RTX;
4255 break;
4257 case UDIV:
4258 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
4259 if (overflow)
4260 return NULL_RTX;
4261 break;
4263 case UMOD:
4264 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
4265 if (overflow)
4266 return NULL_RTX;
4267 break;
4269 case AND:
4270 result = wi::bit_and (pop0, pop1);
4271 break;
4273 case IOR:
4274 result = wi::bit_or (pop0, pop1);
4275 break;
4277 case XOR:
4278 result = wi::bit_xor (pop0, pop1);
4279 break;
4281 case SMIN:
4282 result = wi::smin (pop0, pop1);
4283 break;
4285 case SMAX:
4286 result = wi::smax (pop0, pop1);
4287 break;
4289 case UMIN:
4290 result = wi::umin (pop0, pop1);
4291 break;
4293 case UMAX:
4294 result = wi::umax (pop0, pop1);
4295 break;
4297 case LSHIFTRT:
4298 case ASHIFTRT:
4299 case ASHIFT:
4301 wide_int wop1 = pop1;
4302 if (SHIFT_COUNT_TRUNCATED)
4303 wop1 = wi::umod_trunc (wop1, GET_MODE_PRECISION (int_mode));
4304 else if (wi::geu_p (wop1, GET_MODE_PRECISION (int_mode)))
4305 return NULL_RTX;
4307 switch (code)
4309 case LSHIFTRT:
4310 result = wi::lrshift (pop0, wop1);
4311 break;
4313 case ASHIFTRT:
4314 result = wi::arshift (pop0, wop1);
4315 break;
4317 case ASHIFT:
4318 result = wi::lshift (pop0, wop1);
4319 break;
4321 default:
4322 gcc_unreachable ();
4324 break;
4326 case ROTATE:
4327 case ROTATERT:
4329 if (wi::neg_p (pop1))
4330 return NULL_RTX;
4332 switch (code)
4334 case ROTATE:
4335 result = wi::lrotate (pop0, pop1);
4336 break;
4338 case ROTATERT:
4339 result = wi::rrotate (pop0, pop1);
4340 break;
4342 default:
4343 gcc_unreachable ();
4345 break;
4347 default:
4348 return NULL_RTX;
4350 return immed_wide_int_const (result, int_mode);
4353 return NULL_RTX;
4358 /* Return a positive integer if X should sort after Y. The value
4359 returned is 1 if and only if X and Y are both regs. */
4361 static int
4362 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4364 int result;
4366 result = (commutative_operand_precedence (y)
4367 - commutative_operand_precedence (x));
4368 if (result)
4369 return result + result;
4371 /* Group together equal REGs to do more simplification. */
4372 if (REG_P (x) && REG_P (y))
4373 return REGNO (x) > REGNO (y);
4375 return 0;
4378 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4379 operands may be another PLUS or MINUS.
4381 Rather than test for specific case, we do this by a brute-force method
4382 and do all possible simplifications until no more changes occur. Then
4383 we rebuild the operation.
4385 May return NULL_RTX when no changes were made. */
4387 static rtx
4388 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4389 rtx op1)
4391 struct simplify_plus_minus_op_data
4393 rtx op;
4394 short neg;
4395 } ops[16];
4396 rtx result, tem;
4397 int n_ops = 2;
4398 int changed, n_constants, canonicalized = 0;
4399 int i, j;
4401 memset (ops, 0, sizeof ops);
4403 /* Set up the two operands and then expand them until nothing has been
4404 changed. If we run out of room in our array, give up; this should
4405 almost never happen. */
4407 ops[0].op = op0;
4408 ops[0].neg = 0;
4409 ops[1].op = op1;
4410 ops[1].neg = (code == MINUS);
4414 changed = 0;
4415 n_constants = 0;
4417 for (i = 0; i < n_ops; i++)
4419 rtx this_op = ops[i].op;
4420 int this_neg = ops[i].neg;
4421 enum rtx_code this_code = GET_CODE (this_op);
4423 switch (this_code)
4425 case PLUS:
4426 case MINUS:
4427 if (n_ops == ARRAY_SIZE (ops))
4428 return NULL_RTX;
4430 ops[n_ops].op = XEXP (this_op, 1);
4431 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4432 n_ops++;
4434 ops[i].op = XEXP (this_op, 0);
4435 changed = 1;
4436 /* If this operand was negated then we will potentially
4437 canonicalize the expression. Similarly if we don't
4438 place the operands adjacent we're re-ordering the
4439 expression and thus might be performing a
4440 canonicalization. Ignore register re-ordering.
4441 ??? It might be better to shuffle the ops array here,
4442 but then (plus (plus (A, B), plus (C, D))) wouldn't
4443 be seen as non-canonical. */
4444 if (this_neg
4445 || (i != n_ops - 2
4446 && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
4447 canonicalized = 1;
4448 break;
4450 case NEG:
4451 ops[i].op = XEXP (this_op, 0);
4452 ops[i].neg = ! this_neg;
4453 changed = 1;
4454 canonicalized = 1;
4455 break;
4457 case CONST:
4458 if (n_ops != ARRAY_SIZE (ops)
4459 && GET_CODE (XEXP (this_op, 0)) == PLUS
4460 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4461 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4463 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4464 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4465 ops[n_ops].neg = this_neg;
4466 n_ops++;
4467 changed = 1;
4468 canonicalized = 1;
4470 break;
4472 case NOT:
4473 /* ~a -> (-a - 1) */
4474 if (n_ops != ARRAY_SIZE (ops))
4476 ops[n_ops].op = CONSTM1_RTX (mode);
4477 ops[n_ops++].neg = this_neg;
4478 ops[i].op = XEXP (this_op, 0);
4479 ops[i].neg = !this_neg;
4480 changed = 1;
4481 canonicalized = 1;
4483 break;
4485 case CONST_INT:
4486 n_constants++;
4487 if (this_neg)
4489 ops[i].op = neg_const_int (mode, this_op);
4490 ops[i].neg = 0;
4491 changed = 1;
4492 canonicalized = 1;
4494 break;
4496 default:
4497 break;
4501 while (changed);
4503 if (n_constants > 1)
4504 canonicalized = 1;
4506 gcc_assert (n_ops >= 2);
4508 /* If we only have two operands, we can avoid the loops. */
4509 if (n_ops == 2)
4511 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4512 rtx lhs, rhs;
4514 /* Get the two operands. Be careful with the order, especially for
4515 the cases where code == MINUS. */
4516 if (ops[0].neg && ops[1].neg)
4518 lhs = gen_rtx_NEG (mode, ops[0].op);
4519 rhs = ops[1].op;
4521 else if (ops[0].neg)
4523 lhs = ops[1].op;
4524 rhs = ops[0].op;
4526 else
4528 lhs = ops[0].op;
4529 rhs = ops[1].op;
4532 return simplify_const_binary_operation (code, mode, lhs, rhs);
4535 /* Now simplify each pair of operands until nothing changes. */
4536 while (1)
4538 /* Insertion sort is good enough for a small array. */
4539 for (i = 1; i < n_ops; i++)
4541 struct simplify_plus_minus_op_data save;
4542 int cmp;
4544 j = i - 1;
4545 cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
4546 if (cmp <= 0)
4547 continue;
4548 /* Just swapping registers doesn't count as canonicalization. */
4549 if (cmp != 1)
4550 canonicalized = 1;
4552 save = ops[i];
4554 ops[j + 1] = ops[j];
4555 while (j--
4556 && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
4557 ops[j + 1] = save;
4560 changed = 0;
4561 for (i = n_ops - 1; i > 0; i--)
4562 for (j = i - 1; j >= 0; j--)
4564 rtx lhs = ops[j].op, rhs = ops[i].op;
4565 int lneg = ops[j].neg, rneg = ops[i].neg;
4567 if (lhs != 0 && rhs != 0)
4569 enum rtx_code ncode = PLUS;
4571 if (lneg != rneg)
4573 ncode = MINUS;
4574 if (lneg)
4575 std::swap (lhs, rhs);
4577 else if (swap_commutative_operands_p (lhs, rhs))
4578 std::swap (lhs, rhs);
4580 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4581 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4583 rtx tem_lhs, tem_rhs;
4585 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4586 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4587 tem = simplify_binary_operation (ncode, mode, tem_lhs,
4588 tem_rhs);
4590 if (tem && !CONSTANT_P (tem))
4591 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4593 else
4594 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4596 if (tem)
4598 /* Reject "simplifications" that just wrap the two
4599 arguments in a CONST. Failure to do so can result
4600 in infinite recursion with simplify_binary_operation
4601 when it calls us to simplify CONST operations.
4602 Also, if we find such a simplification, don't try
4603 any more combinations with this rhs: We must have
4604 something like symbol+offset, ie. one of the
4605 trivial CONST expressions we handle later. */
4606 if (GET_CODE (tem) == CONST
4607 && GET_CODE (XEXP (tem, 0)) == ncode
4608 && XEXP (XEXP (tem, 0), 0) == lhs
4609 && XEXP (XEXP (tem, 0), 1) == rhs)
4610 break;
4611 lneg &= rneg;
4612 if (GET_CODE (tem) == NEG)
4613 tem = XEXP (tem, 0), lneg = !lneg;
4614 if (CONST_INT_P (tem) && lneg)
4615 tem = neg_const_int (mode, tem), lneg = 0;
4617 ops[i].op = tem;
4618 ops[i].neg = lneg;
4619 ops[j].op = NULL_RTX;
4620 changed = 1;
4621 canonicalized = 1;
4626 if (!changed)
4627 break;
4629 /* Pack all the operands to the lower-numbered entries. */
4630 for (i = 0, j = 0; j < n_ops; j++)
4631 if (ops[j].op)
4633 ops[i] = ops[j];
4634 i++;
4636 n_ops = i;
4639 /* If nothing changed, check that rematerialization of rtl instructions
4640 is still required. */
4641 if (!canonicalized)
4643 /* Perform rematerialization if only all operands are registers and
4644 all operations are PLUS. */
4645 /* ??? Also disallow (non-global, non-frame) fixed registers to work
4646 around rs6000 and how it uses the CA register. See PR67145. */
4647 for (i = 0; i < n_ops; i++)
4648 if (ops[i].neg
4649 || !REG_P (ops[i].op)
4650 || (REGNO (ops[i].op) < FIRST_PSEUDO_REGISTER
4651 && fixed_regs[REGNO (ops[i].op)]
4652 && !global_regs[REGNO (ops[i].op)]
4653 && ops[i].op != frame_pointer_rtx
4654 && ops[i].op != arg_pointer_rtx
4655 && ops[i].op != stack_pointer_rtx))
4656 return NULL_RTX;
4657 goto gen_result;
4660 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4661 if (n_ops == 2
4662 && CONST_INT_P (ops[1].op)
4663 && CONSTANT_P (ops[0].op)
4664 && ops[0].neg)
4665 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4667 /* We suppressed creation of trivial CONST expressions in the
4668 combination loop to avoid recursion. Create one manually now.
4669 The combination loop should have ensured that there is exactly
4670 one CONST_INT, and the sort will have ensured that it is last
4671 in the array and that any other constant will be next-to-last. */
4673 if (n_ops > 1
4674 && CONST_INT_P (ops[n_ops - 1].op)
4675 && CONSTANT_P (ops[n_ops - 2].op))
4677 rtx value = ops[n_ops - 1].op;
4678 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4679 value = neg_const_int (mode, value);
4680 if (CONST_INT_P (value))
4682 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4683 INTVAL (value));
4684 n_ops--;
4688 /* Put a non-negated operand first, if possible. */
4690 for (i = 0; i < n_ops && ops[i].neg; i++)
4691 continue;
4692 if (i == n_ops)
4693 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4694 else if (i != 0)
4696 tem = ops[0].op;
4697 ops[0] = ops[i];
4698 ops[i].op = tem;
4699 ops[i].neg = 1;
4702 /* Now make the result by performing the requested operations. */
4703 gen_result:
4704 result = ops[0].op;
4705 for (i = 1; i < n_ops; i++)
4706 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4707 mode, result, ops[i].op);
4709 return result;
4712 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4713 static bool
4714 plus_minus_operand_p (const_rtx x)
4716 return GET_CODE (x) == PLUS
4717 || GET_CODE (x) == MINUS
4718 || (GET_CODE (x) == CONST
4719 && GET_CODE (XEXP (x, 0)) == PLUS
4720 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4721 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4724 /* Like simplify_binary_operation except used for relational operators.
4725 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4726 not also be VOIDmode.
4728 CMP_MODE specifies in which mode the comparison is done in, so it is
4729 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4730 the operands or, if both are VOIDmode, the operands are compared in
4731 "infinite precision". */
4733 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4734 machine_mode cmp_mode, rtx op0, rtx op1)
4736 rtx tem, trueop0, trueop1;
4738 if (cmp_mode == VOIDmode)
4739 cmp_mode = GET_MODE (op0);
4740 if (cmp_mode == VOIDmode)
4741 cmp_mode = GET_MODE (op1);
4743 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4744 if (tem)
4746 if (SCALAR_FLOAT_MODE_P (mode))
4748 if (tem == const0_rtx)
4749 return CONST0_RTX (mode);
4750 #ifdef FLOAT_STORE_FLAG_VALUE
4752 REAL_VALUE_TYPE val;
4753 val = FLOAT_STORE_FLAG_VALUE (mode);
4754 return const_double_from_real_value (val, mode);
4756 #else
4757 return NULL_RTX;
4758 #endif
4760 if (VECTOR_MODE_P (mode))
4762 if (tem == const0_rtx)
4763 return CONST0_RTX (mode);
4764 #ifdef VECTOR_STORE_FLAG_VALUE
4766 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4767 if (val == NULL_RTX)
4768 return NULL_RTX;
4769 if (val == const1_rtx)
4770 return CONST1_RTX (mode);
4772 return gen_const_vec_duplicate (mode, val);
4774 #else
4775 return NULL_RTX;
4776 #endif
4779 return tem;
4782 /* For the following tests, ensure const0_rtx is op1. */
4783 if (swap_commutative_operands_p (op0, op1)
4784 || (op0 == const0_rtx && op1 != const0_rtx))
4785 std::swap (op0, op1), code = swap_condition (code);
4787 /* If op0 is a compare, extract the comparison arguments from it. */
4788 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4789 return simplify_gen_relational (code, mode, VOIDmode,
4790 XEXP (op0, 0), XEXP (op0, 1));
4792 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4793 || CC0_P (op0))
4794 return NULL_RTX;
4796 trueop0 = avoid_constant_pool_reference (op0);
4797 trueop1 = avoid_constant_pool_reference (op1);
4798 return simplify_relational_operation_1 (code, mode, cmp_mode,
4799 trueop0, trueop1);
4802 /* This part of simplify_relational_operation is only used when CMP_MODE
4803 is not in class MODE_CC (i.e. it is a real comparison).
4805 MODE is the mode of the result, while CMP_MODE specifies in which
4806 mode the comparison is done in, so it is the mode of the operands. */
4808 static rtx
4809 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4810 machine_mode cmp_mode, rtx op0, rtx op1)
4812 enum rtx_code op0code = GET_CODE (op0);
4814 if (op1 == const0_rtx && COMPARISON_P (op0))
4816 /* If op0 is a comparison, extract the comparison arguments
4817 from it. */
4818 if (code == NE)
4820 if (GET_MODE (op0) == mode)
4821 return simplify_rtx (op0);
4822 else
4823 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4824 XEXP (op0, 0), XEXP (op0, 1));
4826 else if (code == EQ)
4828 enum rtx_code new_code = reversed_comparison_code (op0, NULL);
4829 if (new_code != UNKNOWN)
4830 return simplify_gen_relational (new_code, mode, VOIDmode,
4831 XEXP (op0, 0), XEXP (op0, 1));
4835 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4836 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4837 if ((code == LTU || code == GEU)
4838 && GET_CODE (op0) == PLUS
4839 && CONST_INT_P (XEXP (op0, 1))
4840 && (rtx_equal_p (op1, XEXP (op0, 0))
4841 || rtx_equal_p (op1, XEXP (op0, 1)))
4842 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4843 && XEXP (op0, 1) != const0_rtx)
4845 rtx new_cmp
4846 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4847 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4848 cmp_mode, XEXP (op0, 0), new_cmp);
4851 /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
4852 transformed into (LTU a -C). */
4853 if (code == GTU && GET_CODE (op0) == PLUS && CONST_INT_P (op1)
4854 && CONST_INT_P (XEXP (op0, 1))
4855 && (UINTVAL (op1) == UINTVAL (XEXP (op0, 1)) - 1)
4856 && XEXP (op0, 1) != const0_rtx)
4858 rtx new_cmp
4859 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4860 return simplify_gen_relational (LTU, mode, cmp_mode,
4861 XEXP (op0, 0), new_cmp);
4864 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4865 if ((code == LTU || code == GEU)
4866 && GET_CODE (op0) == PLUS
4867 && rtx_equal_p (op1, XEXP (op0, 1))
4868 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4869 && !rtx_equal_p (op1, XEXP (op0, 0)))
4870 return simplify_gen_relational (code, mode, cmp_mode, op0,
4871 copy_rtx (XEXP (op0, 0)));
4873 if (op1 == const0_rtx)
4875 /* Canonicalize (GTU x 0) as (NE x 0). */
4876 if (code == GTU)
4877 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4878 /* Canonicalize (LEU x 0) as (EQ x 0). */
4879 if (code == LEU)
4880 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4882 else if (op1 == const1_rtx)
4884 switch (code)
4886 case GE:
4887 /* Canonicalize (GE x 1) as (GT x 0). */
4888 return simplify_gen_relational (GT, mode, cmp_mode,
4889 op0, const0_rtx);
4890 case GEU:
4891 /* Canonicalize (GEU x 1) as (NE x 0). */
4892 return simplify_gen_relational (NE, mode, cmp_mode,
4893 op0, const0_rtx);
4894 case LT:
4895 /* Canonicalize (LT x 1) as (LE x 0). */
4896 return simplify_gen_relational (LE, mode, cmp_mode,
4897 op0, const0_rtx);
4898 case LTU:
4899 /* Canonicalize (LTU x 1) as (EQ x 0). */
4900 return simplify_gen_relational (EQ, mode, cmp_mode,
4901 op0, const0_rtx);
4902 default:
4903 break;
4906 else if (op1 == constm1_rtx)
4908 /* Canonicalize (LE x -1) as (LT x 0). */
4909 if (code == LE)
4910 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4911 /* Canonicalize (GT x -1) as (GE x 0). */
4912 if (code == GT)
4913 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4916 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4917 if ((code == EQ || code == NE)
4918 && (op0code == PLUS || op0code == MINUS)
4919 && CONSTANT_P (op1)
4920 && CONSTANT_P (XEXP (op0, 1))
4921 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4923 rtx x = XEXP (op0, 0);
4924 rtx c = XEXP (op0, 1);
4925 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4926 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4928 /* Detect an infinite recursive condition, where we oscillate at this
4929 simplification case between:
4930 A + B == C <---> C - B == A,
4931 where A, B, and C are all constants with non-simplifiable expressions,
4932 usually SYMBOL_REFs. */
4933 if (GET_CODE (tem) == invcode
4934 && CONSTANT_P (x)
4935 && rtx_equal_p (c, XEXP (tem, 1)))
4936 return NULL_RTX;
4938 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4941 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4942 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4943 scalar_int_mode int_mode, int_cmp_mode;
4944 if (code == NE
4945 && op1 == const0_rtx
4946 && is_int_mode (mode, &int_mode)
4947 && is_a <scalar_int_mode> (cmp_mode, &int_cmp_mode)
4948 /* ??? Work-around BImode bugs in the ia64 backend. */
4949 && int_mode != BImode
4950 && int_cmp_mode != BImode
4951 && nonzero_bits (op0, int_cmp_mode) == 1
4952 && STORE_FLAG_VALUE == 1)
4953 return GET_MODE_SIZE (int_mode) > GET_MODE_SIZE (int_cmp_mode)
4954 ? simplify_gen_unary (ZERO_EXTEND, int_mode, op0, int_cmp_mode)
4955 : lowpart_subreg (int_mode, op0, int_cmp_mode);
4957 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4958 if ((code == EQ || code == NE)
4959 && op1 == const0_rtx
4960 && op0code == XOR)
4961 return simplify_gen_relational (code, mode, cmp_mode,
4962 XEXP (op0, 0), XEXP (op0, 1));
4964 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4965 if ((code == EQ || code == NE)
4966 && op0code == XOR
4967 && rtx_equal_p (XEXP (op0, 0), op1)
4968 && !side_effects_p (XEXP (op0, 0)))
4969 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
4970 CONST0_RTX (mode));
4972 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4973 if ((code == EQ || code == NE)
4974 && op0code == XOR
4975 && rtx_equal_p (XEXP (op0, 1), op1)
4976 && !side_effects_p (XEXP (op0, 1)))
4977 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4978 CONST0_RTX (mode));
4980 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4981 if ((code == EQ || code == NE)
4982 && op0code == XOR
4983 && CONST_SCALAR_INT_P (op1)
4984 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4985 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4986 simplify_gen_binary (XOR, cmp_mode,
4987 XEXP (op0, 1), op1));
4989 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4990 can be implemented with a BICS instruction on some targets, or
4991 constant-folded if y is a constant. */
4992 if ((code == EQ || code == NE)
4993 && op0code == AND
4994 && rtx_equal_p (XEXP (op0, 0), op1)
4995 && !side_effects_p (op1)
4996 && op1 != CONST0_RTX (cmp_mode))
4998 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1), cmp_mode);
4999 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
5001 return simplify_gen_relational (code, mode, cmp_mode, lhs,
5002 CONST0_RTX (cmp_mode));
5005 /* Likewise for (eq/ne (and x y) y). */
5006 if ((code == EQ || code == NE)
5007 && op0code == AND
5008 && rtx_equal_p (XEXP (op0, 1), op1)
5009 && !side_effects_p (op1)
5010 && op1 != CONST0_RTX (cmp_mode))
5012 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0), cmp_mode);
5013 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
5015 return simplify_gen_relational (code, mode, cmp_mode, lhs,
5016 CONST0_RTX (cmp_mode));
5019 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
5020 if ((code == EQ || code == NE)
5021 && GET_CODE (op0) == BSWAP
5022 && CONST_SCALAR_INT_P (op1))
5023 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
5024 simplify_gen_unary (BSWAP, cmp_mode,
5025 op1, cmp_mode));
5027 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
5028 if ((code == EQ || code == NE)
5029 && GET_CODE (op0) == BSWAP
5030 && GET_CODE (op1) == BSWAP)
5031 return simplify_gen_relational (code, mode, cmp_mode,
5032 XEXP (op0, 0), XEXP (op1, 0));
5034 if (op0code == POPCOUNT && op1 == const0_rtx)
5035 switch (code)
5037 case EQ:
5038 case LE:
5039 case LEU:
5040 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
5041 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
5042 XEXP (op0, 0), const0_rtx);
5044 case NE:
5045 case GT:
5046 case GTU:
5047 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
5048 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
5049 XEXP (op0, 0), const0_rtx);
5051 default:
5052 break;
5055 return NULL_RTX;
5058 enum
5060 CMP_EQ = 1,
5061 CMP_LT = 2,
5062 CMP_GT = 4,
5063 CMP_LTU = 8,
5064 CMP_GTU = 16
5068 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
5069 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
5070 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
5071 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
5072 For floating-point comparisons, assume that the operands were ordered. */
5074 static rtx
5075 comparison_result (enum rtx_code code, int known_results)
5077 switch (code)
5079 case EQ:
5080 case UNEQ:
5081 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
5082 case NE:
5083 case LTGT:
5084 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
5086 case LT:
5087 case UNLT:
5088 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
5089 case GE:
5090 case UNGE:
5091 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
5093 case GT:
5094 case UNGT:
5095 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
5096 case LE:
5097 case UNLE:
5098 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
5100 case LTU:
5101 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
5102 case GEU:
5103 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
5105 case GTU:
5106 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
5107 case LEU:
5108 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
5110 case ORDERED:
5111 return const_true_rtx;
5112 case UNORDERED:
5113 return const0_rtx;
5114 default:
5115 gcc_unreachable ();
5119 /* Check if the given comparison (done in the given MODE) is actually
5120 a tautology or a contradiction. If the mode is VOID_mode, the
5121 comparison is done in "infinite precision". If no simplification
5122 is possible, this function returns zero. Otherwise, it returns
5123 either const_true_rtx or const0_rtx. */
5126 simplify_const_relational_operation (enum rtx_code code,
5127 machine_mode mode,
5128 rtx op0, rtx op1)
5130 rtx tem;
5131 rtx trueop0;
5132 rtx trueop1;
5134 gcc_assert (mode != VOIDmode
5135 || (GET_MODE (op0) == VOIDmode
5136 && GET_MODE (op1) == VOIDmode));
5138 /* If op0 is a compare, extract the comparison arguments from it. */
5139 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
5141 op1 = XEXP (op0, 1);
5142 op0 = XEXP (op0, 0);
5144 if (GET_MODE (op0) != VOIDmode)
5145 mode = GET_MODE (op0);
5146 else if (GET_MODE (op1) != VOIDmode)
5147 mode = GET_MODE (op1);
5148 else
5149 return 0;
5152 /* We can't simplify MODE_CC values since we don't know what the
5153 actual comparison is. */
5154 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
5155 return 0;
5157 /* Make sure the constant is second. */
5158 if (swap_commutative_operands_p (op0, op1))
5160 std::swap (op0, op1);
5161 code = swap_condition (code);
5164 trueop0 = avoid_constant_pool_reference (op0);
5165 trueop1 = avoid_constant_pool_reference (op1);
5167 /* For integer comparisons of A and B maybe we can simplify A - B and can
5168 then simplify a comparison of that with zero. If A and B are both either
5169 a register or a CONST_INT, this can't help; testing for these cases will
5170 prevent infinite recursion here and speed things up.
5172 We can only do this for EQ and NE comparisons as otherwise we may
5173 lose or introduce overflow which we cannot disregard as undefined as
5174 we do not know the signedness of the operation on either the left or
5175 the right hand side of the comparison. */
5177 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
5178 && (code == EQ || code == NE)
5179 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
5180 && (REG_P (op1) || CONST_INT_P (trueop1)))
5181 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
5182 /* We cannot do this if tem is a nonzero address. */
5183 && ! nonzero_address_p (tem))
5184 return simplify_const_relational_operation (signed_condition (code),
5185 mode, tem, const0_rtx);
5187 if (! HONOR_NANS (mode) && code == ORDERED)
5188 return const_true_rtx;
5190 if (! HONOR_NANS (mode) && code == UNORDERED)
5191 return const0_rtx;
5193 /* For modes without NaNs, if the two operands are equal, we know the
5194 result except if they have side-effects. Even with NaNs we know
5195 the result of unordered comparisons and, if signaling NaNs are
5196 irrelevant, also the result of LT/GT/LTGT. */
5197 if ((! HONOR_NANS (trueop0)
5198 || code == UNEQ || code == UNLE || code == UNGE
5199 || ((code == LT || code == GT || code == LTGT)
5200 && ! HONOR_SNANS (trueop0)))
5201 && rtx_equal_p (trueop0, trueop1)
5202 && ! side_effects_p (trueop0))
5203 return comparison_result (code, CMP_EQ);
5205 /* If the operands are floating-point constants, see if we can fold
5206 the result. */
5207 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
5208 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
5209 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
5211 const REAL_VALUE_TYPE *d0 = CONST_DOUBLE_REAL_VALUE (trueop0);
5212 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
5214 /* Comparisons are unordered iff at least one of the values is NaN. */
5215 if (REAL_VALUE_ISNAN (*d0) || REAL_VALUE_ISNAN (*d1))
5216 switch (code)
5218 case UNEQ:
5219 case UNLT:
5220 case UNGT:
5221 case UNLE:
5222 case UNGE:
5223 case NE:
5224 case UNORDERED:
5225 return const_true_rtx;
5226 case EQ:
5227 case LT:
5228 case GT:
5229 case LE:
5230 case GE:
5231 case LTGT:
5232 case ORDERED:
5233 return const0_rtx;
5234 default:
5235 return 0;
5238 return comparison_result (code,
5239 (real_equal (d0, d1) ? CMP_EQ :
5240 real_less (d0, d1) ? CMP_LT : CMP_GT));
5243 /* Otherwise, see if the operands are both integers. */
5244 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
5245 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
5247 /* It would be nice if we really had a mode here. However, the
5248 largest int representable on the target is as good as
5249 infinite. */
5250 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
5251 rtx_mode_t ptrueop0 = rtx_mode_t (trueop0, cmode);
5252 rtx_mode_t ptrueop1 = rtx_mode_t (trueop1, cmode);
5254 if (wi::eq_p (ptrueop0, ptrueop1))
5255 return comparison_result (code, CMP_EQ);
5256 else
5258 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
5259 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
5260 return comparison_result (code, cr);
5264 /* Optimize comparisons with upper and lower bounds. */
5265 scalar_int_mode int_mode;
5266 if (CONST_INT_P (trueop1)
5267 && is_a <scalar_int_mode> (mode, &int_mode)
5268 && HWI_COMPUTABLE_MODE_P (int_mode)
5269 && !side_effects_p (trueop0))
5271 int sign;
5272 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, int_mode);
5273 HOST_WIDE_INT val = INTVAL (trueop1);
5274 HOST_WIDE_INT mmin, mmax;
5276 if (code == GEU
5277 || code == LEU
5278 || code == GTU
5279 || code == LTU)
5280 sign = 0;
5281 else
5282 sign = 1;
5284 /* Get a reduced range if the sign bit is zero. */
5285 if (nonzero <= (GET_MODE_MASK (int_mode) >> 1))
5287 mmin = 0;
5288 mmax = nonzero;
5290 else
5292 rtx mmin_rtx, mmax_rtx;
5293 get_mode_bounds (int_mode, sign, int_mode, &mmin_rtx, &mmax_rtx);
5295 mmin = INTVAL (mmin_rtx);
5296 mmax = INTVAL (mmax_rtx);
5297 if (sign)
5299 unsigned int sign_copies
5300 = num_sign_bit_copies (trueop0, int_mode);
5302 mmin >>= (sign_copies - 1);
5303 mmax >>= (sign_copies - 1);
5307 switch (code)
5309 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5310 case GEU:
5311 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5312 return const_true_rtx;
5313 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5314 return const0_rtx;
5315 break;
5316 case GE:
5317 if (val <= mmin)
5318 return const_true_rtx;
5319 if (val > mmax)
5320 return const0_rtx;
5321 break;
5323 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5324 case LEU:
5325 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5326 return const_true_rtx;
5327 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5328 return const0_rtx;
5329 break;
5330 case LE:
5331 if (val >= mmax)
5332 return const_true_rtx;
5333 if (val < mmin)
5334 return const0_rtx;
5335 break;
5337 case EQ:
5338 /* x == y is always false for y out of range. */
5339 if (val < mmin || val > mmax)
5340 return const0_rtx;
5341 break;
5343 /* x > y is always false for y >= mmax, always true for y < mmin. */
5344 case GTU:
5345 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5346 return const0_rtx;
5347 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5348 return const_true_rtx;
5349 break;
5350 case GT:
5351 if (val >= mmax)
5352 return const0_rtx;
5353 if (val < mmin)
5354 return const_true_rtx;
5355 break;
5357 /* x < y is always false for y <= mmin, always true for y > mmax. */
5358 case LTU:
5359 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5360 return const0_rtx;
5361 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5362 return const_true_rtx;
5363 break;
5364 case LT:
5365 if (val <= mmin)
5366 return const0_rtx;
5367 if (val > mmax)
5368 return const_true_rtx;
5369 break;
5371 case NE:
5372 /* x != y is always true for y out of range. */
5373 if (val < mmin || val > mmax)
5374 return const_true_rtx;
5375 break;
5377 default:
5378 break;
5382 /* Optimize integer comparisons with zero. */
5383 if (is_a <scalar_int_mode> (mode, &int_mode)
5384 && trueop1 == const0_rtx
5385 && !side_effects_p (trueop0))
5387 /* Some addresses are known to be nonzero. We don't know
5388 their sign, but equality comparisons are known. */
5389 if (nonzero_address_p (trueop0))
5391 if (code == EQ || code == LEU)
5392 return const0_rtx;
5393 if (code == NE || code == GTU)
5394 return const_true_rtx;
5397 /* See if the first operand is an IOR with a constant. If so, we
5398 may be able to determine the result of this comparison. */
5399 if (GET_CODE (op0) == IOR)
5401 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5402 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5404 int sign_bitnum = GET_MODE_PRECISION (int_mode) - 1;
5405 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5406 && (UINTVAL (inner_const)
5407 & (HOST_WIDE_INT_1U
5408 << sign_bitnum)));
5410 switch (code)
5412 case EQ:
5413 case LEU:
5414 return const0_rtx;
5415 case NE:
5416 case GTU:
5417 return const_true_rtx;
5418 case LT:
5419 case LE:
5420 if (has_sign)
5421 return const_true_rtx;
5422 break;
5423 case GT:
5424 case GE:
5425 if (has_sign)
5426 return const0_rtx;
5427 break;
5428 default:
5429 break;
5435 /* Optimize comparison of ABS with zero. */
5436 if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
5437 && (GET_CODE (trueop0) == ABS
5438 || (GET_CODE (trueop0) == FLOAT_EXTEND
5439 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5441 switch (code)
5443 case LT:
5444 /* Optimize abs(x) < 0.0. */
5445 if (!INTEGRAL_MODE_P (mode) && !HONOR_SNANS (mode))
5446 return const0_rtx;
5447 break;
5449 case GE:
5450 /* Optimize abs(x) >= 0.0. */
5451 if (!INTEGRAL_MODE_P (mode) && !HONOR_NANS (mode))
5452 return const_true_rtx;
5453 break;
5455 case UNGE:
5456 /* Optimize ! (abs(x) < 0.0). */
5457 return const_true_rtx;
5459 default:
5460 break;
5464 return 0;
5467 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
5468 where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
5469 or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
5470 can be simplified to that or NULL_RTX if not.
5471 Assume X is compared against zero with CMP_CODE and the true
5472 arm is TRUE_VAL and the false arm is FALSE_VAL. */
5474 static rtx
5475 simplify_cond_clz_ctz (rtx x, rtx_code cmp_code, rtx true_val, rtx false_val)
5477 if (cmp_code != EQ && cmp_code != NE)
5478 return NULL_RTX;
5480 /* Result on X == 0 and X !=0 respectively. */
5481 rtx on_zero, on_nonzero;
5482 if (cmp_code == EQ)
5484 on_zero = true_val;
5485 on_nonzero = false_val;
5487 else
5489 on_zero = false_val;
5490 on_nonzero = true_val;
5493 rtx_code op_code = GET_CODE (on_nonzero);
5494 if ((op_code != CLZ && op_code != CTZ)
5495 || !rtx_equal_p (XEXP (on_nonzero, 0), x)
5496 || !CONST_INT_P (on_zero))
5497 return NULL_RTX;
5499 HOST_WIDE_INT op_val;
5500 scalar_int_mode mode ATTRIBUTE_UNUSED
5501 = as_a <scalar_int_mode> (GET_MODE (XEXP (on_nonzero, 0)));
5502 if (((op_code == CLZ && CLZ_DEFINED_VALUE_AT_ZERO (mode, op_val))
5503 || (op_code == CTZ && CTZ_DEFINED_VALUE_AT_ZERO (mode, op_val)))
5504 && op_val == INTVAL (on_zero))
5505 return on_nonzero;
5507 return NULL_RTX;
5511 /* Simplify CODE, an operation with result mode MODE and three operands,
5512 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5513 a constant. Return 0 if no simplifications is possible. */
5516 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5517 machine_mode op0_mode, rtx op0, rtx op1,
5518 rtx op2)
5520 bool any_change = false;
5521 rtx tem, trueop2;
5522 scalar_int_mode int_mode, int_op0_mode;
5524 switch (code)
5526 case FMA:
5527 /* Simplify negations around the multiplication. */
5528 /* -a * -b + c => a * b + c. */
5529 if (GET_CODE (op0) == NEG)
5531 tem = simplify_unary_operation (NEG, mode, op1, mode);
5532 if (tem)
5533 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5535 else if (GET_CODE (op1) == NEG)
5537 tem = simplify_unary_operation (NEG, mode, op0, mode);
5538 if (tem)
5539 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5542 /* Canonicalize the two multiplication operands. */
5543 /* a * -b + c => -b * a + c. */
5544 if (swap_commutative_operands_p (op0, op1))
5545 std::swap (op0, op1), any_change = true;
5547 if (any_change)
5548 return gen_rtx_FMA (mode, op0, op1, op2);
5549 return NULL_RTX;
5551 case SIGN_EXTRACT:
5552 case ZERO_EXTRACT:
5553 if (CONST_INT_P (op0)
5554 && CONST_INT_P (op1)
5555 && CONST_INT_P (op2)
5556 && is_a <scalar_int_mode> (mode, &int_mode)
5557 && INTVAL (op1) + INTVAL (op2) <= GET_MODE_PRECISION (int_mode)
5558 && HWI_COMPUTABLE_MODE_P (int_mode))
5560 /* Extracting a bit-field from a constant */
5561 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5562 HOST_WIDE_INT op1val = INTVAL (op1);
5563 HOST_WIDE_INT op2val = INTVAL (op2);
5564 if (!BITS_BIG_ENDIAN)
5565 val >>= op2val;
5566 else if (is_a <scalar_int_mode> (op0_mode, &int_op0_mode))
5567 val >>= GET_MODE_PRECISION (int_op0_mode) - op2val - op1val;
5568 else
5569 /* Not enough information to calculate the bit position. */
5570 break;
5572 if (HOST_BITS_PER_WIDE_INT != op1val)
5574 /* First zero-extend. */
5575 val &= (HOST_WIDE_INT_1U << op1val) - 1;
5576 /* If desired, propagate sign bit. */
5577 if (code == SIGN_EXTRACT
5578 && (val & (HOST_WIDE_INT_1U << (op1val - 1)))
5579 != 0)
5580 val |= ~ ((HOST_WIDE_INT_1U << op1val) - 1);
5583 return gen_int_mode (val, int_mode);
5585 break;
5587 case IF_THEN_ELSE:
5588 if (CONST_INT_P (op0))
5589 return op0 != const0_rtx ? op1 : op2;
5591 /* Convert c ? a : a into "a". */
5592 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5593 return op1;
5595 /* Convert a != b ? a : b into "a". */
5596 if (GET_CODE (op0) == NE
5597 && ! side_effects_p (op0)
5598 && ! HONOR_NANS (mode)
5599 && ! HONOR_SIGNED_ZEROS (mode)
5600 && ((rtx_equal_p (XEXP (op0, 0), op1)
5601 && rtx_equal_p (XEXP (op0, 1), op2))
5602 || (rtx_equal_p (XEXP (op0, 0), op2)
5603 && rtx_equal_p (XEXP (op0, 1), op1))))
5604 return op1;
5606 /* Convert a == b ? a : b into "b". */
5607 if (GET_CODE (op0) == EQ
5608 && ! side_effects_p (op0)
5609 && ! HONOR_NANS (mode)
5610 && ! HONOR_SIGNED_ZEROS (mode)
5611 && ((rtx_equal_p (XEXP (op0, 0), op1)
5612 && rtx_equal_p (XEXP (op0, 1), op2))
5613 || (rtx_equal_p (XEXP (op0, 0), op2)
5614 && rtx_equal_p (XEXP (op0, 1), op1))))
5615 return op2;
5617 /* Convert (!c) != {0,...,0} ? a : b into
5618 c != {0,...,0} ? b : a for vector modes. */
5619 if (VECTOR_MODE_P (GET_MODE (op1))
5620 && GET_CODE (op0) == NE
5621 && GET_CODE (XEXP (op0, 0)) == NOT
5622 && GET_CODE (XEXP (op0, 1)) == CONST_VECTOR)
5624 rtx cv = XEXP (op0, 1);
5625 int nunits = CONST_VECTOR_NUNITS (cv);
5626 bool ok = true;
5627 for (int i = 0; i < nunits; ++i)
5628 if (CONST_VECTOR_ELT (cv, i) != const0_rtx)
5630 ok = false;
5631 break;
5633 if (ok)
5635 rtx new_op0 = gen_rtx_NE (GET_MODE (op0),
5636 XEXP (XEXP (op0, 0), 0),
5637 XEXP (op0, 1));
5638 rtx retval = gen_rtx_IF_THEN_ELSE (mode, new_op0, op2, op1);
5639 return retval;
5643 /* Convert x == 0 ? N : clz (x) into clz (x) when
5644 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
5645 Similarly for ctz (x). */
5646 if (COMPARISON_P (op0) && !side_effects_p (op0)
5647 && XEXP (op0, 1) == const0_rtx)
5649 rtx simplified
5650 = simplify_cond_clz_ctz (XEXP (op0, 0), GET_CODE (op0),
5651 op1, op2);
5652 if (simplified)
5653 return simplified;
5656 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5658 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5659 ? GET_MODE (XEXP (op0, 1))
5660 : GET_MODE (XEXP (op0, 0)));
5661 rtx temp;
5663 /* Look for happy constants in op1 and op2. */
5664 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5666 HOST_WIDE_INT t = INTVAL (op1);
5667 HOST_WIDE_INT f = INTVAL (op2);
5669 if (t == STORE_FLAG_VALUE && f == 0)
5670 code = GET_CODE (op0);
5671 else if (t == 0 && f == STORE_FLAG_VALUE)
5673 enum rtx_code tmp;
5674 tmp = reversed_comparison_code (op0, NULL);
5675 if (tmp == UNKNOWN)
5676 break;
5677 code = tmp;
5679 else
5680 break;
5682 return simplify_gen_relational (code, mode, cmp_mode,
5683 XEXP (op0, 0), XEXP (op0, 1));
5686 if (cmp_mode == VOIDmode)
5687 cmp_mode = op0_mode;
5688 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5689 cmp_mode, XEXP (op0, 0),
5690 XEXP (op0, 1));
5692 /* See if any simplifications were possible. */
5693 if (temp)
5695 if (CONST_INT_P (temp))
5696 return temp == const0_rtx ? op2 : op1;
5697 else if (temp)
5698 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5701 break;
5703 case VEC_MERGE:
5704 gcc_assert (GET_MODE (op0) == mode);
5705 gcc_assert (GET_MODE (op1) == mode);
5706 gcc_assert (VECTOR_MODE_P (mode));
5707 trueop2 = avoid_constant_pool_reference (op2);
5708 if (CONST_INT_P (trueop2))
5710 int elt_size = GET_MODE_UNIT_SIZE (mode);
5711 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5712 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5713 unsigned HOST_WIDE_INT mask;
5714 if (n_elts == HOST_BITS_PER_WIDE_INT)
5715 mask = -1;
5716 else
5717 mask = (HOST_WIDE_INT_1U << n_elts) - 1;
5719 if (!(sel & mask) && !side_effects_p (op0))
5720 return op1;
5721 if ((sel & mask) == mask && !side_effects_p (op1))
5722 return op0;
5724 rtx trueop0 = avoid_constant_pool_reference (op0);
5725 rtx trueop1 = avoid_constant_pool_reference (op1);
5726 if (GET_CODE (trueop0) == CONST_VECTOR
5727 && GET_CODE (trueop1) == CONST_VECTOR)
5729 rtvec v = rtvec_alloc (n_elts);
5730 unsigned int i;
5732 for (i = 0; i < n_elts; i++)
5733 RTVEC_ELT (v, i) = ((sel & (HOST_WIDE_INT_1U << i))
5734 ? CONST_VECTOR_ELT (trueop0, i)
5735 : CONST_VECTOR_ELT (trueop1, i));
5736 return gen_rtx_CONST_VECTOR (mode, v);
5739 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5740 if no element from a appears in the result. */
5741 if (GET_CODE (op0) == VEC_MERGE)
5743 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5744 if (CONST_INT_P (tem))
5746 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5747 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5748 return simplify_gen_ternary (code, mode, mode,
5749 XEXP (op0, 1), op1, op2);
5750 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5751 return simplify_gen_ternary (code, mode, mode,
5752 XEXP (op0, 0), op1, op2);
5755 if (GET_CODE (op1) == VEC_MERGE)
5757 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5758 if (CONST_INT_P (tem))
5760 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5761 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5762 return simplify_gen_ternary (code, mode, mode,
5763 op0, XEXP (op1, 1), op2);
5764 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5765 return simplify_gen_ternary (code, mode, mode,
5766 op0, XEXP (op1, 0), op2);
5770 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5771 with a. */
5772 if (GET_CODE (op0) == VEC_DUPLICATE
5773 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5774 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5775 && mode_nunits[GET_MODE (XEXP (op0, 0))] == 1)
5777 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5778 if (CONST_INT_P (tem) && CONST_INT_P (op2))
5780 if (XEXP (XEXP (op0, 0), 0) == op1
5781 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5782 return op1;
5787 if (rtx_equal_p (op0, op1)
5788 && !side_effects_p (op2) && !side_effects_p (op1))
5789 return op0;
5791 break;
5793 default:
5794 gcc_unreachable ();
5797 return 0;
5800 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5801 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5802 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5804 Works by unpacking OP into a collection of 8-bit values
5805 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5806 and then repacking them again for OUTERMODE. */
5808 static rtx
5809 simplify_immed_subreg (machine_mode outermode, rtx op,
5810 machine_mode innermode, unsigned int byte)
5812 enum {
5813 value_bit = 8,
5814 value_mask = (1 << value_bit) - 1
5816 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
5817 int value_start;
5818 int i;
5819 int elem;
5821 int num_elem;
5822 rtx * elems;
5823 int elem_bitsize;
5824 rtx result_s = NULL;
5825 rtvec result_v = NULL;
5826 enum mode_class outer_class;
5827 scalar_mode outer_submode;
5828 int max_bitsize;
5830 /* Some ports misuse CCmode. */
5831 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5832 return op;
5834 /* We have no way to represent a complex constant at the rtl level. */
5835 if (COMPLEX_MODE_P (outermode))
5836 return NULL_RTX;
5838 /* We support any size mode. */
5839 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5840 GET_MODE_BITSIZE (innermode));
5842 /* Unpack the value. */
5844 if (GET_CODE (op) == CONST_VECTOR)
5846 num_elem = CONST_VECTOR_NUNITS (op);
5847 elems = &CONST_VECTOR_ELT (op, 0);
5848 elem_bitsize = GET_MODE_UNIT_BITSIZE (innermode);
5850 else
5852 num_elem = 1;
5853 elems = &op;
5854 elem_bitsize = max_bitsize;
5856 /* If this asserts, it is too complicated; reducing value_bit may help. */
5857 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5858 /* I don't know how to handle endianness of sub-units. */
5859 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5861 for (elem = 0; elem < num_elem; elem++)
5863 unsigned char * vp;
5864 rtx el = elems[elem];
5866 /* Vectors are kept in target memory order. (This is probably
5867 a mistake.) */
5869 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5870 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5871 / BITS_PER_UNIT);
5872 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5873 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5874 unsigned bytele = (subword_byte % UNITS_PER_WORD
5875 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5876 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5879 switch (GET_CODE (el))
5881 case CONST_INT:
5882 for (i = 0;
5883 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5884 i += value_bit)
5885 *vp++ = INTVAL (el) >> i;
5886 /* CONST_INTs are always logically sign-extended. */
5887 for (; i < elem_bitsize; i += value_bit)
5888 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5889 break;
5891 case CONST_WIDE_INT:
5893 rtx_mode_t val = rtx_mode_t (el, GET_MODE_INNER (innermode));
5894 unsigned char extend = wi::sign_mask (val);
5895 int prec = wi::get_precision (val);
5897 for (i = 0; i < prec && i < elem_bitsize; i += value_bit)
5898 *vp++ = wi::extract_uhwi (val, i, value_bit);
5899 for (; i < elem_bitsize; i += value_bit)
5900 *vp++ = extend;
5902 break;
5904 case CONST_DOUBLE:
5905 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
5907 unsigned char extend = 0;
5908 /* If this triggers, someone should have generated a
5909 CONST_INT instead. */
5910 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5912 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5913 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5914 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5916 *vp++
5917 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5918 i += value_bit;
5921 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5922 extend = -1;
5923 for (; i < elem_bitsize; i += value_bit)
5924 *vp++ = extend;
5926 else
5928 /* This is big enough for anything on the platform. */
5929 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5930 scalar_float_mode el_mode;
5932 el_mode = as_a <scalar_float_mode> (GET_MODE (el));
5933 int bitsize = GET_MODE_BITSIZE (el_mode);
5935 gcc_assert (bitsize <= elem_bitsize);
5936 gcc_assert (bitsize % value_bit == 0);
5938 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5939 GET_MODE (el));
5941 /* real_to_target produces its result in words affected by
5942 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5943 and use WORDS_BIG_ENDIAN instead; see the documentation
5944 of SUBREG in rtl.texi. */
5945 for (i = 0; i < bitsize; i += value_bit)
5947 int ibase;
5948 if (WORDS_BIG_ENDIAN)
5949 ibase = bitsize - 1 - i;
5950 else
5951 ibase = i;
5952 *vp++ = tmp[ibase / 32] >> i % 32;
5955 /* It shouldn't matter what's done here, so fill it with
5956 zero. */
5957 for (; i < elem_bitsize; i += value_bit)
5958 *vp++ = 0;
5960 break;
5962 case CONST_FIXED:
5963 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5965 for (i = 0; i < elem_bitsize; i += value_bit)
5966 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5968 else
5970 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5971 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5972 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5973 i += value_bit)
5974 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5975 >> (i - HOST_BITS_PER_WIDE_INT);
5976 for (; i < elem_bitsize; i += value_bit)
5977 *vp++ = 0;
5979 break;
5981 default:
5982 gcc_unreachable ();
5986 /* Now, pick the right byte to start with. */
5987 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5988 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5989 will already have offset 0. */
5990 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5992 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5993 - byte);
5994 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5995 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5996 byte = (subword_byte % UNITS_PER_WORD
5997 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
6000 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
6001 so if it's become negative it will instead be very large.) */
6002 gcc_assert (byte < GET_MODE_SIZE (innermode));
6004 /* Convert from bytes to chunks of size value_bit. */
6005 value_start = byte * (BITS_PER_UNIT / value_bit);
6007 /* Re-pack the value. */
6008 num_elem = GET_MODE_NUNITS (outermode);
6010 if (VECTOR_MODE_P (outermode))
6012 result_v = rtvec_alloc (num_elem);
6013 elems = &RTVEC_ELT (result_v, 0);
6015 else
6016 elems = &result_s;
6018 outer_submode = GET_MODE_INNER (outermode);
6019 outer_class = GET_MODE_CLASS (outer_submode);
6020 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
6022 gcc_assert (elem_bitsize % value_bit == 0);
6023 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
6025 for (elem = 0; elem < num_elem; elem++)
6027 unsigned char *vp;
6029 /* Vectors are stored in target memory order. (This is probably
6030 a mistake.) */
6032 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
6033 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
6034 / BITS_PER_UNIT);
6035 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
6036 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
6037 unsigned bytele = (subword_byte % UNITS_PER_WORD
6038 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
6039 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
6042 switch (outer_class)
6044 case MODE_INT:
6045 case MODE_PARTIAL_INT:
6047 int u;
6048 int base = 0;
6049 int units
6050 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
6051 / HOST_BITS_PER_WIDE_INT;
6052 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
6053 wide_int r;
6055 if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
6056 return NULL_RTX;
6057 for (u = 0; u < units; u++)
6059 unsigned HOST_WIDE_INT buf = 0;
6060 for (i = 0;
6061 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
6062 i += value_bit)
6063 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
6065 tmp[u] = buf;
6066 base += HOST_BITS_PER_WIDE_INT;
6068 r = wide_int::from_array (tmp, units,
6069 GET_MODE_PRECISION (outer_submode));
6070 #if TARGET_SUPPORTS_WIDE_INT == 0
6071 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
6072 if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
6073 return NULL_RTX;
6074 #endif
6075 elems[elem] = immed_wide_int_const (r, outer_submode);
6077 break;
6079 case MODE_FLOAT:
6080 case MODE_DECIMAL_FLOAT:
6082 REAL_VALUE_TYPE r;
6083 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32] = { 0 };
6085 /* real_from_target wants its input in words affected by
6086 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
6087 and use WORDS_BIG_ENDIAN instead; see the documentation
6088 of SUBREG in rtl.texi. */
6089 for (i = 0; i < elem_bitsize; i += value_bit)
6091 int ibase;
6092 if (WORDS_BIG_ENDIAN)
6093 ibase = elem_bitsize - 1 - i;
6094 else
6095 ibase = i;
6096 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
6099 real_from_target (&r, tmp, outer_submode);
6100 elems[elem] = const_double_from_real_value (r, outer_submode);
6102 break;
6104 case MODE_FRACT:
6105 case MODE_UFRACT:
6106 case MODE_ACCUM:
6107 case MODE_UACCUM:
6109 FIXED_VALUE_TYPE f;
6110 f.data.low = 0;
6111 f.data.high = 0;
6112 f.mode = outer_submode;
6114 for (i = 0;
6115 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
6116 i += value_bit)
6117 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
6118 for (; i < elem_bitsize; i += value_bit)
6119 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
6120 << (i - HOST_BITS_PER_WIDE_INT));
6122 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
6124 break;
6126 default:
6127 gcc_unreachable ();
6130 if (VECTOR_MODE_P (outermode))
6131 return gen_rtx_CONST_VECTOR (outermode, result_v);
6132 else
6133 return result_s;
6136 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
6137 Return 0 if no simplifications are possible. */
6139 simplify_subreg (machine_mode outermode, rtx op,
6140 machine_mode innermode, unsigned int byte)
6142 /* Little bit of sanity checking. */
6143 gcc_assert (innermode != VOIDmode);
6144 gcc_assert (outermode != VOIDmode);
6145 gcc_assert (innermode != BLKmode);
6146 gcc_assert (outermode != BLKmode);
6148 gcc_assert (GET_MODE (op) == innermode
6149 || GET_MODE (op) == VOIDmode);
6151 if ((byte % GET_MODE_SIZE (outermode)) != 0)
6152 return NULL_RTX;
6154 if (byte >= GET_MODE_SIZE (innermode))
6155 return NULL_RTX;
6157 if (outermode == innermode && !byte)
6158 return op;
6160 if (byte % GET_MODE_UNIT_SIZE (innermode) == 0)
6162 rtx elt;
6164 if (VECTOR_MODE_P (outermode)
6165 && GET_MODE_INNER (outermode) == GET_MODE_INNER (innermode)
6166 && vec_duplicate_p (op, &elt))
6167 return gen_vec_duplicate (outermode, elt);
6169 if (outermode == GET_MODE_INNER (innermode)
6170 && vec_duplicate_p (op, &elt))
6171 return elt;
6174 if (CONST_SCALAR_INT_P (op)
6175 || CONST_DOUBLE_AS_FLOAT_P (op)
6176 || GET_CODE (op) == CONST_FIXED
6177 || GET_CODE (op) == CONST_VECTOR)
6178 return simplify_immed_subreg (outermode, op, innermode, byte);
6180 /* Changing mode twice with SUBREG => just change it once,
6181 or not at all if changing back op starting mode. */
6182 if (GET_CODE (op) == SUBREG)
6184 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
6185 rtx newx;
6187 if (outermode == innermostmode
6188 && byte == 0 && SUBREG_BYTE (op) == 0)
6189 return SUBREG_REG (op);
6191 /* Work out the memory offset of the final OUTERMODE value relative
6192 to the inner value of OP. */
6193 HOST_WIDE_INT mem_offset = subreg_memory_offset (outermode,
6194 innermode, byte);
6195 HOST_WIDE_INT op_mem_offset = subreg_memory_offset (op);
6196 HOST_WIDE_INT final_offset = mem_offset + op_mem_offset;
6198 /* See whether resulting subreg will be paradoxical. */
6199 if (!paradoxical_subreg_p (outermode, innermostmode))
6201 /* In nonparadoxical subregs we can't handle negative offsets. */
6202 if (final_offset < 0)
6203 return NULL_RTX;
6204 /* Bail out in case resulting subreg would be incorrect. */
6205 if (final_offset % GET_MODE_SIZE (outermode)
6206 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
6207 return NULL_RTX;
6209 else
6211 HOST_WIDE_INT required_offset
6212 = subreg_memory_offset (outermode, innermostmode, 0);
6213 if (final_offset != required_offset)
6214 return NULL_RTX;
6215 /* Paradoxical subregs always have byte offset 0. */
6216 final_offset = 0;
6219 /* Recurse for further possible simplifications. */
6220 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
6221 final_offset);
6222 if (newx)
6223 return newx;
6224 if (validate_subreg (outermode, innermostmode,
6225 SUBREG_REG (op), final_offset))
6227 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
6228 if (SUBREG_PROMOTED_VAR_P (op)
6229 && SUBREG_PROMOTED_SIGN (op) >= 0
6230 && GET_MODE_CLASS (outermode) == MODE_INT
6231 && IN_RANGE (GET_MODE_SIZE (outermode),
6232 GET_MODE_SIZE (innermode),
6233 GET_MODE_SIZE (innermostmode))
6234 && subreg_lowpart_p (newx))
6236 SUBREG_PROMOTED_VAR_P (newx) = 1;
6237 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
6239 return newx;
6241 return NULL_RTX;
6244 /* SUBREG of a hard register => just change the register number
6245 and/or mode. If the hard register is not valid in that mode,
6246 suppress this simplification. If the hard register is the stack,
6247 frame, or argument pointer, leave this as a SUBREG. */
6249 if (REG_P (op) && HARD_REGISTER_P (op))
6251 unsigned int regno, final_regno;
6253 regno = REGNO (op);
6254 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
6255 if (HARD_REGISTER_NUM_P (final_regno))
6257 rtx x = gen_rtx_REG_offset (op, outermode, final_regno,
6258 subreg_memory_offset (outermode,
6259 innermode, byte));
6261 /* Propagate original regno. We don't have any way to specify
6262 the offset inside original regno, so do so only for lowpart.
6263 The information is used only by alias analysis that can not
6264 grog partial register anyway. */
6266 if (subreg_lowpart_offset (outermode, innermode) == byte)
6267 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
6268 return x;
6272 /* If we have a SUBREG of a register that we are replacing and we are
6273 replacing it with a MEM, make a new MEM and try replacing the
6274 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6275 or if we would be widening it. */
6277 if (MEM_P (op)
6278 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
6279 /* Allow splitting of volatile memory references in case we don't
6280 have instruction to move the whole thing. */
6281 && (! MEM_VOLATILE_P (op)
6282 || ! have_insn_for (SET, innermode))
6283 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
6284 return adjust_address_nv (op, outermode, byte);
6286 /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
6287 of two parts. */
6288 if (GET_CODE (op) == CONCAT
6289 || GET_CODE (op) == VEC_CONCAT)
6291 unsigned int part_size, final_offset;
6292 rtx part, res;
6294 machine_mode part_mode = GET_MODE (XEXP (op, 0));
6295 if (part_mode == VOIDmode)
6296 part_mode = GET_MODE_INNER (GET_MODE (op));
6297 part_size = GET_MODE_SIZE (part_mode);
6298 if (byte < part_size)
6300 part = XEXP (op, 0);
6301 final_offset = byte;
6303 else
6305 part = XEXP (op, 1);
6306 final_offset = byte - part_size;
6309 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
6310 return NULL_RTX;
6312 part_mode = GET_MODE (part);
6313 if (part_mode == VOIDmode)
6314 part_mode = GET_MODE_INNER (GET_MODE (op));
6315 res = simplify_subreg (outermode, part, part_mode, final_offset);
6316 if (res)
6317 return res;
6318 if (validate_subreg (outermode, part_mode, part, final_offset))
6319 return gen_rtx_SUBREG (outermode, part, final_offset);
6320 return NULL_RTX;
6323 /* A SUBREG resulting from a zero extension may fold to zero if
6324 it extracts higher bits that the ZERO_EXTEND's source bits. */
6325 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
6327 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
6328 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
6329 return CONST0_RTX (outermode);
6332 scalar_int_mode int_outermode, int_innermode;
6333 if (is_a <scalar_int_mode> (outermode, &int_outermode)
6334 && is_a <scalar_int_mode> (innermode, &int_innermode)
6335 && (GET_MODE_PRECISION (int_outermode)
6336 < GET_MODE_PRECISION (int_innermode))
6337 && byte == subreg_lowpart_offset (int_outermode, int_innermode))
6339 rtx tem = simplify_truncation (int_outermode, op, int_innermode);
6340 if (tem)
6341 return tem;
6344 return NULL_RTX;
6347 /* Make a SUBREG operation or equivalent if it folds. */
6350 simplify_gen_subreg (machine_mode outermode, rtx op,
6351 machine_mode innermode, unsigned int byte)
6353 rtx newx;
6355 newx = simplify_subreg (outermode, op, innermode, byte);
6356 if (newx)
6357 return newx;
6359 if (GET_CODE (op) == SUBREG
6360 || GET_CODE (op) == CONCAT
6361 || GET_MODE (op) == VOIDmode)
6362 return NULL_RTX;
6364 if (validate_subreg (outermode, innermode, op, byte))
6365 return gen_rtx_SUBREG (outermode, op, byte);
6367 return NULL_RTX;
6370 /* Generates a subreg to get the least significant part of EXPR (in mode
6371 INNER_MODE) to OUTER_MODE. */
6374 lowpart_subreg (machine_mode outer_mode, rtx expr,
6375 machine_mode inner_mode)
6377 return simplify_gen_subreg (outer_mode, expr, inner_mode,
6378 subreg_lowpart_offset (outer_mode, inner_mode));
6381 /* Simplify X, an rtx expression.
6383 Return the simplified expression or NULL if no simplifications
6384 were possible.
6386 This is the preferred entry point into the simplification routines;
6387 however, we still allow passes to call the more specific routines.
6389 Right now GCC has three (yes, three) major bodies of RTL simplification
6390 code that need to be unified.
6392 1. fold_rtx in cse.c. This code uses various CSE specific
6393 information to aid in RTL simplification.
6395 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6396 it uses combine specific information to aid in RTL
6397 simplification.
6399 3. The routines in this file.
6402 Long term we want to only have one body of simplification code; to
6403 get to that state I recommend the following steps:
6405 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6406 which are not pass dependent state into these routines.
6408 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6409 use this routine whenever possible.
6411 3. Allow for pass dependent state to be provided to these
6412 routines and add simplifications based on the pass dependent
6413 state. Remove code from cse.c & combine.c that becomes
6414 redundant/dead.
6416 It will take time, but ultimately the compiler will be easier to
6417 maintain and improve. It's totally silly that when we add a
6418 simplification that it needs to be added to 4 places (3 for RTL
6419 simplification and 1 for tree simplification. */
6422 simplify_rtx (const_rtx x)
6424 const enum rtx_code code = GET_CODE (x);
6425 const machine_mode mode = GET_MODE (x);
6427 switch (GET_RTX_CLASS (code))
6429 case RTX_UNARY:
6430 return simplify_unary_operation (code, mode,
6431 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6432 case RTX_COMM_ARITH:
6433 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6434 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6436 /* Fall through. */
6438 case RTX_BIN_ARITH:
6439 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6441 case RTX_TERNARY:
6442 case RTX_BITFIELD_OPS:
6443 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6444 XEXP (x, 0), XEXP (x, 1),
6445 XEXP (x, 2));
6447 case RTX_COMPARE:
6448 case RTX_COMM_COMPARE:
6449 return simplify_relational_operation (code, mode,
6450 ((GET_MODE (XEXP (x, 0))
6451 != VOIDmode)
6452 ? GET_MODE (XEXP (x, 0))
6453 : GET_MODE (XEXP (x, 1))),
6454 XEXP (x, 0),
6455 XEXP (x, 1));
6457 case RTX_EXTRA:
6458 if (code == SUBREG)
6459 return simplify_subreg (mode, SUBREG_REG (x),
6460 GET_MODE (SUBREG_REG (x)),
6461 SUBREG_BYTE (x));
6462 break;
6464 case RTX_OBJ:
6465 if (code == LO_SUM)
6467 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6468 if (GET_CODE (XEXP (x, 0)) == HIGH
6469 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6470 return XEXP (x, 1);
6472 break;
6474 default:
6475 break;
6477 return NULL;
6480 #if CHECKING_P
6482 namespace selftest {
6484 /* Make a unique pseudo REG of mode MODE for use by selftests. */
6486 static rtx
6487 make_test_reg (machine_mode mode)
6489 static int test_reg_num = LAST_VIRTUAL_REGISTER + 1;
6491 return gen_rtx_REG (mode, test_reg_num++);
6494 /* Test vector simplifications involving VEC_DUPLICATE in which the
6495 operands and result have vector mode MODE. SCALAR_REG is a pseudo
6496 register that holds one element of MODE. */
6498 static void
6499 test_vector_ops_duplicate (machine_mode mode, rtx scalar_reg)
6501 scalar_mode inner_mode = GET_MODE_INNER (mode);
6502 rtx duplicate = gen_rtx_VEC_DUPLICATE (mode, scalar_reg);
6503 unsigned int nunits = GET_MODE_NUNITS (mode);
6504 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
6506 /* Test some simple unary cases with VEC_DUPLICATE arguments. */
6507 rtx not_scalar_reg = gen_rtx_NOT (inner_mode, scalar_reg);
6508 rtx duplicate_not = gen_rtx_VEC_DUPLICATE (mode, not_scalar_reg);
6509 ASSERT_RTX_EQ (duplicate,
6510 simplify_unary_operation (NOT, mode,
6511 duplicate_not, mode));
6513 rtx neg_scalar_reg = gen_rtx_NEG (inner_mode, scalar_reg);
6514 rtx duplicate_neg = gen_rtx_VEC_DUPLICATE (mode, neg_scalar_reg);
6515 ASSERT_RTX_EQ (duplicate,
6516 simplify_unary_operation (NEG, mode,
6517 duplicate_neg, mode));
6519 /* Test some simple binary cases with VEC_DUPLICATE arguments. */
6520 ASSERT_RTX_EQ (duplicate,
6521 simplify_binary_operation (PLUS, mode, duplicate,
6522 CONST0_RTX (mode)));
6524 ASSERT_RTX_EQ (duplicate,
6525 simplify_binary_operation (MINUS, mode, duplicate,
6526 CONST0_RTX (mode)));
6528 ASSERT_RTX_PTR_EQ (CONST0_RTX (mode),
6529 simplify_binary_operation (MINUS, mode, duplicate,
6530 duplicate));
6533 /* Test a scalar VEC_SELECT of a VEC_DUPLICATE. */
6534 rtx zero_par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
6535 ASSERT_RTX_PTR_EQ (scalar_reg,
6536 simplify_binary_operation (VEC_SELECT, inner_mode,
6537 duplicate, zero_par));
6539 /* And again with the final element. */
6540 rtx last_index = gen_int_mode (GET_MODE_NUNITS (mode) - 1, word_mode);
6541 rtx last_par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, last_index));
6542 ASSERT_RTX_PTR_EQ (scalar_reg,
6543 simplify_binary_operation (VEC_SELECT, inner_mode,
6544 duplicate, last_par));
6546 /* Test a scalar subreg of a VEC_DUPLICATE. */
6547 unsigned int offset = subreg_lowpart_offset (inner_mode, mode);
6548 ASSERT_RTX_EQ (scalar_reg,
6549 simplify_gen_subreg (inner_mode, duplicate,
6550 mode, offset));
6552 machine_mode narrower_mode;
6553 if (nunits > 2
6554 && mode_for_vector (inner_mode, 2).exists (&narrower_mode)
6555 && VECTOR_MODE_P (narrower_mode))
6557 /* Test VEC_SELECT of a vector. */
6558 rtx vec_par
6559 = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, const1_rtx, const0_rtx));
6560 rtx narrower_duplicate
6561 = gen_rtx_VEC_DUPLICATE (narrower_mode, scalar_reg);
6562 ASSERT_RTX_EQ (narrower_duplicate,
6563 simplify_binary_operation (VEC_SELECT, narrower_mode,
6564 duplicate, vec_par));
6566 /* Test a vector subreg of a VEC_DUPLICATE. */
6567 unsigned int offset = subreg_lowpart_offset (narrower_mode, mode);
6568 ASSERT_RTX_EQ (narrower_duplicate,
6569 simplify_gen_subreg (narrower_mode, duplicate,
6570 mode, offset));
6574 /* Test vector simplifications involving VEC_SERIES in which the
6575 operands and result have vector mode MODE. SCALAR_REG is a pseudo
6576 register that holds one element of MODE. */
6578 static void
6579 test_vector_ops_series (machine_mode mode, rtx scalar_reg)
6581 /* Test unary cases with VEC_SERIES arguments. */
6582 scalar_mode inner_mode = GET_MODE_INNER (mode);
6583 rtx duplicate = gen_rtx_VEC_DUPLICATE (mode, scalar_reg);
6584 rtx neg_scalar_reg = gen_rtx_NEG (inner_mode, scalar_reg);
6585 rtx series_0_r = gen_rtx_VEC_SERIES (mode, const0_rtx, scalar_reg);
6586 rtx series_0_nr = gen_rtx_VEC_SERIES (mode, const0_rtx, neg_scalar_reg);
6587 rtx series_nr_1 = gen_rtx_VEC_SERIES (mode, neg_scalar_reg, const1_rtx);
6588 rtx series_r_m1 = gen_rtx_VEC_SERIES (mode, scalar_reg, constm1_rtx);
6589 rtx series_r_r = gen_rtx_VEC_SERIES (mode, scalar_reg, scalar_reg);
6590 rtx series_nr_nr = gen_rtx_VEC_SERIES (mode, neg_scalar_reg,
6591 neg_scalar_reg);
6592 ASSERT_RTX_EQ (series_0_r,
6593 simplify_unary_operation (NEG, mode, series_0_nr, mode));
6594 ASSERT_RTX_EQ (series_r_m1,
6595 simplify_unary_operation (NEG, mode, series_nr_1, mode));
6596 ASSERT_RTX_EQ (series_r_r,
6597 simplify_unary_operation (NEG, mode, series_nr_nr, mode));
6599 /* Test that a VEC_SERIES with a zero step is simplified away. */
6600 ASSERT_RTX_EQ (duplicate,
6601 simplify_binary_operation (VEC_SERIES, mode,
6602 scalar_reg, const0_rtx));
6604 /* Test PLUS and MINUS with VEC_SERIES. */
6605 rtx series_0_1 = gen_const_vec_series (mode, const0_rtx, const1_rtx);
6606 rtx series_0_m1 = gen_const_vec_series (mode, const0_rtx, constm1_rtx);
6607 rtx series_r_1 = gen_rtx_VEC_SERIES (mode, scalar_reg, const1_rtx);
6608 ASSERT_RTX_EQ (series_r_r,
6609 simplify_binary_operation (PLUS, mode, series_0_r,
6610 duplicate));
6611 ASSERT_RTX_EQ (series_r_1,
6612 simplify_binary_operation (PLUS, mode, duplicate,
6613 series_0_1));
6614 ASSERT_RTX_EQ (series_r_m1,
6615 simplify_binary_operation (PLUS, mode, duplicate,
6616 series_0_m1));
6617 ASSERT_RTX_EQ (series_0_r,
6618 simplify_binary_operation (MINUS, mode, series_r_r,
6619 duplicate));
6620 ASSERT_RTX_EQ (series_r_m1,
6621 simplify_binary_operation (MINUS, mode, duplicate,
6622 series_0_1));
6623 ASSERT_RTX_EQ (series_r_1,
6624 simplify_binary_operation (MINUS, mode, duplicate,
6625 series_0_m1));
6628 /* Verify some simplifications involving vectors. */
6630 static void
6631 test_vector_ops ()
6633 for (unsigned int i = 0; i < NUM_MACHINE_MODES; ++i)
6635 machine_mode mode = (machine_mode) i;
6636 if (VECTOR_MODE_P (mode))
6638 rtx scalar_reg = make_test_reg (GET_MODE_INNER (mode));
6639 test_vector_ops_duplicate (mode, scalar_reg);
6640 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
6641 && GET_MODE_NUNITS (mode) > 2)
6642 test_vector_ops_series (mode, scalar_reg);
6647 /* Run all of the selftests within this file. */
6649 void
6650 simplify_rtx_c_tests ()
6652 test_vector_ops ();
6655 } // namespace selftest
6657 #endif /* CHECKING_P */