* config/spu/spu.md (floatunsdidf2): Remove unused local variable.
[official-gcc.git] / gcc / simplify-rtx.c
blobfde244302481ce2b71211d12717383f1ecfbf8d0
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2016 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "memmodel.h"
30 #include "optabs.h"
31 #include "emit-rtl.h"
32 #include "recog.h"
33 #include "diagnostic-core.h"
34 #include "varasm.h"
35 #include "flags.h"
37 /* Simplification and canonicalization of RTL. */
39 /* Much code operates on (low, high) pairs; the low value is an
40 unsigned wide int, the high value a signed wide int. We
41 occasionally need to sign extend from low to high as if low were a
42 signed wide int. */
43 #define HWI_SIGN_EXTEND(low) \
44 ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
46 static rtx neg_const_int (machine_mode, const_rtx);
47 static bool plus_minus_operand_p (const_rtx);
48 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
49 static rtx simplify_immed_subreg (machine_mode, rtx, machine_mode,
50 unsigned int);
51 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
52 rtx, rtx);
53 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
54 machine_mode, rtx, rtx);
55 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
56 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
57 rtx, rtx, rtx, rtx);
59 /* Negate a CONST_INT rtx, truncating (because a conversion from a
60 maximally negative number can overflow). */
61 static rtx
62 neg_const_int (machine_mode mode, const_rtx i)
64 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
67 /* Test whether expression, X, is an immediate constant that represents
68 the most significant bit of machine mode MODE. */
70 bool
71 mode_signbit_p (machine_mode mode, const_rtx x)
73 unsigned HOST_WIDE_INT val;
74 unsigned int width;
76 if (GET_MODE_CLASS (mode) != MODE_INT)
77 return false;
79 width = GET_MODE_PRECISION (mode);
80 if (width == 0)
81 return false;
83 if (width <= HOST_BITS_PER_WIDE_INT
84 && CONST_INT_P (x))
85 val = INTVAL (x);
86 #if TARGET_SUPPORTS_WIDE_INT
87 else if (CONST_WIDE_INT_P (x))
89 unsigned int i;
90 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
91 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
92 return false;
93 for (i = 0; i < elts - 1; i++)
94 if (CONST_WIDE_INT_ELT (x, i) != 0)
95 return false;
96 val = CONST_WIDE_INT_ELT (x, elts - 1);
97 width %= HOST_BITS_PER_WIDE_INT;
98 if (width == 0)
99 width = HOST_BITS_PER_WIDE_INT;
101 #else
102 else if (width <= HOST_BITS_PER_DOUBLE_INT
103 && CONST_DOUBLE_AS_INT_P (x)
104 && CONST_DOUBLE_LOW (x) == 0)
106 val = CONST_DOUBLE_HIGH (x);
107 width -= HOST_BITS_PER_WIDE_INT;
109 #endif
110 else
111 /* X is not an integer constant. */
112 return false;
114 if (width < HOST_BITS_PER_WIDE_INT)
115 val &= (HOST_WIDE_INT_1U << width) - 1;
116 return val == (HOST_WIDE_INT_1U << (width - 1));
119 /* Test whether VAL is equal to the most significant bit of mode MODE
120 (after masking with the mode mask of MODE). Returns false if the
121 precision of MODE is too large to handle. */
123 bool
124 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
126 unsigned int width;
128 if (GET_MODE_CLASS (mode) != MODE_INT)
129 return false;
131 width = GET_MODE_PRECISION (mode);
132 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
133 return false;
135 val &= GET_MODE_MASK (mode);
136 return val == (HOST_WIDE_INT_1U << (width - 1));
139 /* Test whether the most significant bit of mode MODE is set in VAL.
140 Returns false if the precision of MODE is too large to handle. */
141 bool
142 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
144 unsigned int width;
146 if (GET_MODE_CLASS (mode) != MODE_INT)
147 return false;
149 width = GET_MODE_PRECISION (mode);
150 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
151 return false;
153 val &= HOST_WIDE_INT_1U << (width - 1);
154 return val != 0;
157 /* Test whether the most significant bit of mode MODE is clear in VAL.
158 Returns false if the precision of MODE is too large to handle. */
159 bool
160 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
162 unsigned int width;
164 if (GET_MODE_CLASS (mode) != MODE_INT)
165 return false;
167 width = GET_MODE_PRECISION (mode);
168 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
169 return false;
171 val &= HOST_WIDE_INT_1U << (width - 1);
172 return val == 0;
175 /* Make a binary operation by properly ordering the operands and
176 seeing if the expression folds. */
179 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
180 rtx op1)
182 rtx tem;
184 /* If this simplifies, do it. */
185 tem = simplify_binary_operation (code, mode, op0, op1);
186 if (tem)
187 return tem;
189 /* Put complex operands first and constants second if commutative. */
190 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
191 && swap_commutative_operands_p (op0, op1))
192 std::swap (op0, op1);
194 return gen_rtx_fmt_ee (code, mode, op0, op1);
197 /* If X is a MEM referencing the constant pool, return the real value.
198 Otherwise return X. */
200 avoid_constant_pool_reference (rtx x)
202 rtx c, tmp, addr;
203 machine_mode cmode;
204 HOST_WIDE_INT offset = 0;
206 switch (GET_CODE (x))
208 case MEM:
209 break;
211 case FLOAT_EXTEND:
212 /* Handle float extensions of constant pool references. */
213 tmp = XEXP (x, 0);
214 c = avoid_constant_pool_reference (tmp);
215 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
216 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c),
217 GET_MODE (x));
218 return x;
220 default:
221 return x;
224 if (GET_MODE (x) == BLKmode)
225 return x;
227 addr = XEXP (x, 0);
229 /* Call target hook to avoid the effects of -fpic etc.... */
230 addr = targetm.delegitimize_address (addr);
232 /* Split the address into a base and integer offset. */
233 if (GET_CODE (addr) == CONST
234 && GET_CODE (XEXP (addr, 0)) == PLUS
235 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
237 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
238 addr = XEXP (XEXP (addr, 0), 0);
241 if (GET_CODE (addr) == LO_SUM)
242 addr = XEXP (addr, 1);
244 /* If this is a constant pool reference, we can turn it into its
245 constant and hope that simplifications happen. */
246 if (GET_CODE (addr) == SYMBOL_REF
247 && CONSTANT_POOL_ADDRESS_P (addr))
249 c = get_pool_constant (addr);
250 cmode = get_pool_mode (addr);
252 /* If we're accessing the constant in a different mode than it was
253 originally stored, attempt to fix that up via subreg simplifications.
254 If that fails we have no choice but to return the original memory. */
255 if (offset == 0 && cmode == GET_MODE (x))
256 return c;
257 else if (offset >= 0 && offset < GET_MODE_SIZE (cmode))
259 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
260 if (tem && CONSTANT_P (tem))
261 return tem;
265 return x;
268 /* Simplify a MEM based on its attributes. This is the default
269 delegitimize_address target hook, and it's recommended that every
270 overrider call it. */
273 delegitimize_mem_from_attrs (rtx x)
275 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
276 use their base addresses as equivalent. */
277 if (MEM_P (x)
278 && MEM_EXPR (x)
279 && MEM_OFFSET_KNOWN_P (x))
281 tree decl = MEM_EXPR (x);
282 machine_mode mode = GET_MODE (x);
283 HOST_WIDE_INT offset = 0;
285 switch (TREE_CODE (decl))
287 default:
288 decl = NULL;
289 break;
291 case VAR_DECL:
292 break;
294 case ARRAY_REF:
295 case ARRAY_RANGE_REF:
296 case COMPONENT_REF:
297 case BIT_FIELD_REF:
298 case REALPART_EXPR:
299 case IMAGPART_EXPR:
300 case VIEW_CONVERT_EXPR:
302 HOST_WIDE_INT bitsize, bitpos;
303 tree toffset;
304 int unsignedp, reversep, volatilep = 0;
306 decl
307 = get_inner_reference (decl, &bitsize, &bitpos, &toffset, &mode,
308 &unsignedp, &reversep, &volatilep);
309 if (bitsize != GET_MODE_BITSIZE (mode)
310 || (bitpos % BITS_PER_UNIT)
311 || (toffset && !tree_fits_shwi_p (toffset)))
312 decl = NULL;
313 else
315 offset += bitpos / BITS_PER_UNIT;
316 if (toffset)
317 offset += tree_to_shwi (toffset);
319 break;
323 if (decl
324 && mode == GET_MODE (x)
325 && VAR_P (decl)
326 && (TREE_STATIC (decl)
327 || DECL_THREAD_LOCAL_P (decl))
328 && DECL_RTL_SET_P (decl)
329 && MEM_P (DECL_RTL (decl)))
331 rtx newx;
333 offset += MEM_OFFSET (x);
335 newx = DECL_RTL (decl);
337 if (MEM_P (newx))
339 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
341 /* Avoid creating a new MEM needlessly if we already had
342 the same address. We do if there's no OFFSET and the
343 old address X is identical to NEWX, or if X is of the
344 form (plus NEWX OFFSET), or the NEWX is of the form
345 (plus Y (const_int Z)) and X is that with the offset
346 added: (plus Y (const_int Z+OFFSET)). */
347 if (!((offset == 0
348 || (GET_CODE (o) == PLUS
349 && GET_CODE (XEXP (o, 1)) == CONST_INT
350 && (offset == INTVAL (XEXP (o, 1))
351 || (GET_CODE (n) == PLUS
352 && GET_CODE (XEXP (n, 1)) == CONST_INT
353 && (INTVAL (XEXP (n, 1)) + offset
354 == INTVAL (XEXP (o, 1)))
355 && (n = XEXP (n, 0))))
356 && (o = XEXP (o, 0))))
357 && rtx_equal_p (o, n)))
358 x = adjust_address_nv (newx, mode, offset);
360 else if (GET_MODE (x) == GET_MODE (newx)
361 && offset == 0)
362 x = newx;
366 return x;
369 /* Make a unary operation by first seeing if it folds and otherwise making
370 the specified operation. */
373 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
374 machine_mode op_mode)
376 rtx tem;
378 /* If this simplifies, use it. */
379 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
380 return tem;
382 return gen_rtx_fmt_e (code, mode, op);
385 /* Likewise for ternary operations. */
388 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
389 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
391 rtx tem;
393 /* If this simplifies, use it. */
394 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
395 op0, op1, op2)))
396 return tem;
398 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
401 /* Likewise, for relational operations.
402 CMP_MODE specifies mode comparison is done in. */
405 simplify_gen_relational (enum rtx_code code, machine_mode mode,
406 machine_mode cmp_mode, rtx op0, rtx op1)
408 rtx tem;
410 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
411 op0, op1)))
412 return tem;
414 return gen_rtx_fmt_ee (code, mode, op0, op1);
417 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
418 and simplify the result. If FN is non-NULL, call this callback on each
419 X, if it returns non-NULL, replace X with its return value and simplify the
420 result. */
423 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
424 rtx (*fn) (rtx, const_rtx, void *), void *data)
426 enum rtx_code code = GET_CODE (x);
427 machine_mode mode = GET_MODE (x);
428 machine_mode op_mode;
429 const char *fmt;
430 rtx op0, op1, op2, newx, op;
431 rtvec vec, newvec;
432 int i, j;
434 if (__builtin_expect (fn != NULL, 0))
436 newx = fn (x, old_rtx, data);
437 if (newx)
438 return newx;
440 else if (rtx_equal_p (x, old_rtx))
441 return copy_rtx ((rtx) data);
443 switch (GET_RTX_CLASS (code))
445 case RTX_UNARY:
446 op0 = XEXP (x, 0);
447 op_mode = GET_MODE (op0);
448 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
449 if (op0 == XEXP (x, 0))
450 return x;
451 return simplify_gen_unary (code, mode, op0, op_mode);
453 case RTX_BIN_ARITH:
454 case RTX_COMM_ARITH:
455 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
456 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
457 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
458 return x;
459 return simplify_gen_binary (code, mode, op0, op1);
461 case RTX_COMPARE:
462 case RTX_COMM_COMPARE:
463 op0 = XEXP (x, 0);
464 op1 = XEXP (x, 1);
465 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
466 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
467 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
468 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
469 return x;
470 return simplify_gen_relational (code, mode, op_mode, op0, op1);
472 case RTX_TERNARY:
473 case RTX_BITFIELD_OPS:
474 op0 = XEXP (x, 0);
475 op_mode = GET_MODE (op0);
476 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
477 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
478 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
479 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
480 return x;
481 if (op_mode == VOIDmode)
482 op_mode = GET_MODE (op0);
483 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
485 case RTX_EXTRA:
486 if (code == SUBREG)
488 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
489 if (op0 == SUBREG_REG (x))
490 return x;
491 op0 = simplify_gen_subreg (GET_MODE (x), op0,
492 GET_MODE (SUBREG_REG (x)),
493 SUBREG_BYTE (x));
494 return op0 ? op0 : x;
496 break;
498 case RTX_OBJ:
499 if (code == MEM)
501 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
502 if (op0 == XEXP (x, 0))
503 return x;
504 return replace_equiv_address_nv (x, op0);
506 else if (code == LO_SUM)
508 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
509 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
511 /* (lo_sum (high x) y) -> y where x and y have the same base. */
512 if (GET_CODE (op0) == HIGH)
514 rtx base0, base1, offset0, offset1;
515 split_const (XEXP (op0, 0), &base0, &offset0);
516 split_const (op1, &base1, &offset1);
517 if (rtx_equal_p (base0, base1))
518 return op1;
521 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
522 return x;
523 return gen_rtx_LO_SUM (mode, op0, op1);
525 break;
527 default:
528 break;
531 newx = x;
532 fmt = GET_RTX_FORMAT (code);
533 for (i = 0; fmt[i]; i++)
534 switch (fmt[i])
536 case 'E':
537 vec = XVEC (x, i);
538 newvec = XVEC (newx, i);
539 for (j = 0; j < GET_NUM_ELEM (vec); j++)
541 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
542 old_rtx, fn, data);
543 if (op != RTVEC_ELT (vec, j))
545 if (newvec == vec)
547 newvec = shallow_copy_rtvec (vec);
548 if (x == newx)
549 newx = shallow_copy_rtx (x);
550 XVEC (newx, i) = newvec;
552 RTVEC_ELT (newvec, j) = op;
555 break;
557 case 'e':
558 if (XEXP (x, i))
560 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
561 if (op != XEXP (x, i))
563 if (x == newx)
564 newx = shallow_copy_rtx (x);
565 XEXP (newx, i) = op;
568 break;
570 return newx;
573 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
574 resulting RTX. Return a new RTX which is as simplified as possible. */
577 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
579 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
582 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
583 Only handle cases where the truncated value is inherently an rvalue.
585 RTL provides two ways of truncating a value:
587 1. a lowpart subreg. This form is only a truncation when both
588 the outer and inner modes (here MODE and OP_MODE respectively)
589 are scalar integers, and only then when the subreg is used as
590 an rvalue.
592 It is only valid to form such truncating subregs if the
593 truncation requires no action by the target. The onus for
594 proving this is on the creator of the subreg -- e.g. the
595 caller to simplify_subreg or simplify_gen_subreg -- and typically
596 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
598 2. a TRUNCATE. This form handles both scalar and compound integers.
600 The first form is preferred where valid. However, the TRUNCATE
601 handling in simplify_unary_operation turns the second form into the
602 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
603 so it is generally safe to form rvalue truncations using:
605 simplify_gen_unary (TRUNCATE, ...)
607 and leave simplify_unary_operation to work out which representation
608 should be used.
610 Because of the proof requirements on (1), simplify_truncation must
611 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
612 regardless of whether the outer truncation came from a SUBREG or a
613 TRUNCATE. For example, if the caller has proven that an SImode
614 truncation of:
616 (and:DI X Y)
618 is a no-op and can be represented as a subreg, it does not follow
619 that SImode truncations of X and Y are also no-ops. On a target
620 like 64-bit MIPS that requires SImode values to be stored in
621 sign-extended form, an SImode truncation of:
623 (and:DI (reg:DI X) (const_int 63))
625 is trivially a no-op because only the lower 6 bits can be set.
626 However, X is still an arbitrary 64-bit number and so we cannot
627 assume that truncating it too is a no-op. */
629 static rtx
630 simplify_truncation (machine_mode mode, rtx op,
631 machine_mode op_mode)
633 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
634 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
635 gcc_assert (precision <= op_precision);
637 /* Optimize truncations of zero and sign extended values. */
638 if (GET_CODE (op) == ZERO_EXTEND
639 || GET_CODE (op) == SIGN_EXTEND)
641 /* There are three possibilities. If MODE is the same as the
642 origmode, we can omit both the extension and the subreg.
643 If MODE is not larger than the origmode, we can apply the
644 truncation without the extension. Finally, if the outermode
645 is larger than the origmode, we can just extend to the appropriate
646 mode. */
647 machine_mode origmode = GET_MODE (XEXP (op, 0));
648 if (mode == origmode)
649 return XEXP (op, 0);
650 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
651 return simplify_gen_unary (TRUNCATE, mode,
652 XEXP (op, 0), origmode);
653 else
654 return simplify_gen_unary (GET_CODE (op), mode,
655 XEXP (op, 0), origmode);
658 /* If the machine can perform operations in the truncated mode, distribute
659 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
660 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
661 if (1
662 && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
663 && (GET_CODE (op) == PLUS
664 || GET_CODE (op) == MINUS
665 || GET_CODE (op) == MULT))
667 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
668 if (op0)
670 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
671 if (op1)
672 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
676 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
677 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
678 the outer subreg is effectively a truncation to the original mode. */
679 if ((GET_CODE (op) == LSHIFTRT
680 || GET_CODE (op) == ASHIFTRT)
681 /* Ensure that OP_MODE is at least twice as wide as MODE
682 to avoid the possibility that an outer LSHIFTRT shifts by more
683 than the sign extension's sign_bit_copies and introduces zeros
684 into the high bits of the result. */
685 && 2 * precision <= op_precision
686 && CONST_INT_P (XEXP (op, 1))
687 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
688 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
689 && UINTVAL (XEXP (op, 1)) < precision)
690 return simplify_gen_binary (ASHIFTRT, mode,
691 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
693 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
694 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
695 the outer subreg is effectively a truncation to the original mode. */
696 if ((GET_CODE (op) == LSHIFTRT
697 || GET_CODE (op) == ASHIFTRT)
698 && CONST_INT_P (XEXP (op, 1))
699 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
700 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
701 && UINTVAL (XEXP (op, 1)) < precision)
702 return simplify_gen_binary (LSHIFTRT, mode,
703 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
705 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
706 to (ashift:QI (x:QI) C), where C is a suitable small constant and
707 the outer subreg is effectively a truncation to the original mode. */
708 if (GET_CODE (op) == ASHIFT
709 && CONST_INT_P (XEXP (op, 1))
710 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
711 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
712 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
713 && UINTVAL (XEXP (op, 1)) < precision)
714 return simplify_gen_binary (ASHIFT, mode,
715 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
717 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
718 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
719 and C2. */
720 if (GET_CODE (op) == AND
721 && (GET_CODE (XEXP (op, 0)) == LSHIFTRT
722 || GET_CODE (XEXP (op, 0)) == ASHIFTRT)
723 && CONST_INT_P (XEXP (XEXP (op, 0), 1))
724 && CONST_INT_P (XEXP (op, 1)))
726 rtx op0 = (XEXP (XEXP (op, 0), 0));
727 rtx shift_op = XEXP (XEXP (op, 0), 1);
728 rtx mask_op = XEXP (op, 1);
729 unsigned HOST_WIDE_INT shift = UINTVAL (shift_op);
730 unsigned HOST_WIDE_INT mask = UINTVAL (mask_op);
732 if (shift < precision
733 /* If doing this transform works for an X with all bits set,
734 it works for any X. */
735 && ((GET_MODE_MASK (mode) >> shift) & mask)
736 == ((GET_MODE_MASK (op_mode) >> shift) & mask)
737 && (op0 = simplify_gen_unary (TRUNCATE, mode, op0, op_mode))
738 && (op0 = simplify_gen_binary (LSHIFTRT, mode, op0, shift_op)))
740 mask_op = GEN_INT (trunc_int_for_mode (mask, mode));
741 return simplify_gen_binary (AND, mode, op0, mask_op);
745 /* Recognize a word extraction from a multi-word subreg. */
746 if ((GET_CODE (op) == LSHIFTRT
747 || GET_CODE (op) == ASHIFTRT)
748 && SCALAR_INT_MODE_P (mode)
749 && SCALAR_INT_MODE_P (op_mode)
750 && precision >= BITS_PER_WORD
751 && 2 * precision <= op_precision
752 && CONST_INT_P (XEXP (op, 1))
753 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
754 && UINTVAL (XEXP (op, 1)) < op_precision)
756 int byte = subreg_lowpart_offset (mode, op_mode);
757 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
758 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
759 (WORDS_BIG_ENDIAN
760 ? byte - shifted_bytes
761 : byte + shifted_bytes));
764 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
765 and try replacing the TRUNCATE and shift with it. Don't do this
766 if the MEM has a mode-dependent address. */
767 if ((GET_CODE (op) == LSHIFTRT
768 || GET_CODE (op) == ASHIFTRT)
769 && SCALAR_INT_MODE_P (op_mode)
770 && MEM_P (XEXP (op, 0))
771 && CONST_INT_P (XEXP (op, 1))
772 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
773 && INTVAL (XEXP (op, 1)) > 0
774 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
775 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
776 MEM_ADDR_SPACE (XEXP (op, 0)))
777 && ! MEM_VOLATILE_P (XEXP (op, 0))
778 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
779 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
781 int byte = subreg_lowpart_offset (mode, op_mode);
782 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
783 return adjust_address_nv (XEXP (op, 0), mode,
784 (WORDS_BIG_ENDIAN
785 ? byte - shifted_bytes
786 : byte + shifted_bytes));
789 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
790 (OP:SI foo:SI) if OP is NEG or ABS. */
791 if ((GET_CODE (op) == ABS
792 || GET_CODE (op) == NEG)
793 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
794 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
795 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
796 return simplify_gen_unary (GET_CODE (op), mode,
797 XEXP (XEXP (op, 0), 0), mode);
799 /* (truncate:A (subreg:B (truncate:C X) 0)) is
800 (truncate:A X). */
801 if (GET_CODE (op) == SUBREG
802 && SCALAR_INT_MODE_P (mode)
803 && SCALAR_INT_MODE_P (op_mode)
804 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
805 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
806 && subreg_lowpart_p (op))
808 rtx inner = XEXP (SUBREG_REG (op), 0);
809 if (GET_MODE_PRECISION (mode)
810 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
811 return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
812 else
813 /* If subreg above is paradoxical and C is narrower
814 than A, return (subreg:A (truncate:C X) 0). */
815 return simplify_gen_subreg (mode, SUBREG_REG (op),
816 GET_MODE (SUBREG_REG (op)), 0);
819 /* (truncate:A (truncate:B X)) is (truncate:A X). */
820 if (GET_CODE (op) == TRUNCATE)
821 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
822 GET_MODE (XEXP (op, 0)));
824 return NULL_RTX;
827 /* Try to simplify a unary operation CODE whose output mode is to be
828 MODE with input operand OP whose mode was originally OP_MODE.
829 Return zero if no simplification can be made. */
831 simplify_unary_operation (enum rtx_code code, machine_mode mode,
832 rtx op, machine_mode op_mode)
834 rtx trueop, tem;
836 trueop = avoid_constant_pool_reference (op);
838 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
839 if (tem)
840 return tem;
842 return simplify_unary_operation_1 (code, mode, op);
845 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
846 to be exact. */
848 static bool
849 exact_int_to_float_conversion_p (const_rtx op)
851 int out_bits = significand_size (GET_MODE_INNER (GET_MODE (op)));
852 machine_mode op0_mode = GET_MODE (XEXP (op, 0));
853 /* Constants shouldn't reach here. */
854 gcc_assert (op0_mode != VOIDmode);
855 int in_prec = GET_MODE_UNIT_PRECISION (op0_mode);
856 int in_bits = in_prec;
857 if (HWI_COMPUTABLE_MODE_P (op0_mode))
859 unsigned HOST_WIDE_INT nonzero = nonzero_bits (XEXP (op, 0), op0_mode);
860 if (GET_CODE (op) == FLOAT)
861 in_bits -= num_sign_bit_copies (XEXP (op, 0), op0_mode);
862 else if (GET_CODE (op) == UNSIGNED_FLOAT)
863 in_bits = wi::min_precision (wi::uhwi (nonzero, in_prec), UNSIGNED);
864 else
865 gcc_unreachable ();
866 in_bits -= wi::ctz (wi::uhwi (nonzero, in_prec));
868 return in_bits <= out_bits;
871 /* Perform some simplifications we can do even if the operands
872 aren't constant. */
873 static rtx
874 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
876 enum rtx_code reversed;
877 rtx temp;
879 switch (code)
881 case NOT:
882 /* (not (not X)) == X. */
883 if (GET_CODE (op) == NOT)
884 return XEXP (op, 0);
886 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
887 comparison is all ones. */
888 if (COMPARISON_P (op)
889 && (mode == BImode || STORE_FLAG_VALUE == -1)
890 && ((reversed = reversed_comparison_code (op, NULL)) != UNKNOWN))
891 return simplify_gen_relational (reversed, mode, VOIDmode,
892 XEXP (op, 0), XEXP (op, 1));
894 /* (not (plus X -1)) can become (neg X). */
895 if (GET_CODE (op) == PLUS
896 && XEXP (op, 1) == constm1_rtx)
897 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
899 /* Similarly, (not (neg X)) is (plus X -1). */
900 if (GET_CODE (op) == NEG)
901 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
902 CONSTM1_RTX (mode));
904 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
905 if (GET_CODE (op) == XOR
906 && CONST_INT_P (XEXP (op, 1))
907 && (temp = simplify_unary_operation (NOT, mode,
908 XEXP (op, 1), mode)) != 0)
909 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
911 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
912 if (GET_CODE (op) == PLUS
913 && CONST_INT_P (XEXP (op, 1))
914 && mode_signbit_p (mode, XEXP (op, 1))
915 && (temp = simplify_unary_operation (NOT, mode,
916 XEXP (op, 1), mode)) != 0)
917 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
920 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
921 operands other than 1, but that is not valid. We could do a
922 similar simplification for (not (lshiftrt C X)) where C is
923 just the sign bit, but this doesn't seem common enough to
924 bother with. */
925 if (GET_CODE (op) == ASHIFT
926 && XEXP (op, 0) == const1_rtx)
928 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
929 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
932 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
933 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
934 so we can perform the above simplification. */
935 if (STORE_FLAG_VALUE == -1
936 && GET_CODE (op) == ASHIFTRT
937 && CONST_INT_P (XEXP (op, 1))
938 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
939 return simplify_gen_relational (GE, mode, VOIDmode,
940 XEXP (op, 0), const0_rtx);
943 if (GET_CODE (op) == SUBREG
944 && subreg_lowpart_p (op)
945 && (GET_MODE_SIZE (GET_MODE (op))
946 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
947 && GET_CODE (SUBREG_REG (op)) == ASHIFT
948 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
950 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
951 rtx x;
953 x = gen_rtx_ROTATE (inner_mode,
954 simplify_gen_unary (NOT, inner_mode, const1_rtx,
955 inner_mode),
956 XEXP (SUBREG_REG (op), 1));
957 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
958 if (temp)
959 return temp;
962 /* Apply De Morgan's laws to reduce number of patterns for machines
963 with negating logical insns (and-not, nand, etc.). If result has
964 only one NOT, put it first, since that is how the patterns are
965 coded. */
966 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
968 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
969 machine_mode op_mode;
971 op_mode = GET_MODE (in1);
972 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
974 op_mode = GET_MODE (in2);
975 if (op_mode == VOIDmode)
976 op_mode = mode;
977 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
979 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
980 std::swap (in1, in2);
982 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
983 mode, in1, in2);
986 /* (not (bswap x)) -> (bswap (not x)). */
987 if (GET_CODE (op) == BSWAP)
989 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
990 return simplify_gen_unary (BSWAP, mode, x, mode);
992 break;
994 case NEG:
995 /* (neg (neg X)) == X. */
996 if (GET_CODE (op) == NEG)
997 return XEXP (op, 0);
999 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1000 If comparison is not reversible use
1001 x ? y : (neg y). */
1002 if (GET_CODE (op) == IF_THEN_ELSE)
1004 rtx cond = XEXP (op, 0);
1005 rtx true_rtx = XEXP (op, 1);
1006 rtx false_rtx = XEXP (op, 2);
1008 if ((GET_CODE (true_rtx) == NEG
1009 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
1010 || (GET_CODE (false_rtx) == NEG
1011 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)))
1013 if (reversed_comparison_code (cond, NULL) != UNKNOWN)
1014 temp = reversed_comparison (cond, mode);
1015 else
1017 temp = cond;
1018 std::swap (true_rtx, false_rtx);
1020 return simplify_gen_ternary (IF_THEN_ELSE, mode,
1021 mode, temp, true_rtx, false_rtx);
1025 /* (neg (plus X 1)) can become (not X). */
1026 if (GET_CODE (op) == PLUS
1027 && XEXP (op, 1) == const1_rtx)
1028 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1030 /* Similarly, (neg (not X)) is (plus X 1). */
1031 if (GET_CODE (op) == NOT)
1032 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
1033 CONST1_RTX (mode));
1035 /* (neg (minus X Y)) can become (minus Y X). This transformation
1036 isn't safe for modes with signed zeros, since if X and Y are
1037 both +0, (minus Y X) is the same as (minus X Y). If the
1038 rounding mode is towards +infinity (or -infinity) then the two
1039 expressions will be rounded differently. */
1040 if (GET_CODE (op) == MINUS
1041 && !HONOR_SIGNED_ZEROS (mode)
1042 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1043 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
1045 if (GET_CODE (op) == PLUS
1046 && !HONOR_SIGNED_ZEROS (mode)
1047 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1049 /* (neg (plus A C)) is simplified to (minus -C A). */
1050 if (CONST_SCALAR_INT_P (XEXP (op, 1))
1051 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
1053 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1054 if (temp)
1055 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1058 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1059 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1060 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1063 /* (neg (mult A B)) becomes (mult A (neg B)).
1064 This works even for floating-point values. */
1065 if (GET_CODE (op) == MULT
1066 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1068 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1069 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1072 /* NEG commutes with ASHIFT since it is multiplication. Only do
1073 this if we can then eliminate the NEG (e.g., if the operand
1074 is a constant). */
1075 if (GET_CODE (op) == ASHIFT)
1077 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1078 if (temp)
1079 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1082 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1083 C is equal to the width of MODE minus 1. */
1084 if (GET_CODE (op) == ASHIFTRT
1085 && CONST_INT_P (XEXP (op, 1))
1086 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1087 return simplify_gen_binary (LSHIFTRT, mode,
1088 XEXP (op, 0), XEXP (op, 1));
1090 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1091 C is equal to the width of MODE minus 1. */
1092 if (GET_CODE (op) == LSHIFTRT
1093 && CONST_INT_P (XEXP (op, 1))
1094 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1095 return simplify_gen_binary (ASHIFTRT, mode,
1096 XEXP (op, 0), XEXP (op, 1));
1098 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1099 if (GET_CODE (op) == XOR
1100 && XEXP (op, 1) == const1_rtx
1101 && nonzero_bits (XEXP (op, 0), mode) == 1)
1102 return plus_constant (mode, XEXP (op, 0), -1);
1104 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1105 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1106 if (GET_CODE (op) == LT
1107 && XEXP (op, 1) == const0_rtx
1108 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1110 machine_mode inner = GET_MODE (XEXP (op, 0));
1111 int isize = GET_MODE_PRECISION (inner);
1112 if (STORE_FLAG_VALUE == 1)
1114 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1115 GEN_INT (isize - 1));
1116 if (mode == inner)
1117 return temp;
1118 if (GET_MODE_PRECISION (mode) > isize)
1119 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1120 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1122 else if (STORE_FLAG_VALUE == -1)
1124 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1125 GEN_INT (isize - 1));
1126 if (mode == inner)
1127 return temp;
1128 if (GET_MODE_PRECISION (mode) > isize)
1129 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1130 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1133 break;
1135 case TRUNCATE:
1136 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1137 with the umulXi3_highpart patterns. */
1138 if (GET_CODE (op) == LSHIFTRT
1139 && GET_CODE (XEXP (op, 0)) == MULT)
1140 break;
1142 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1144 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1146 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1147 if (temp)
1148 return temp;
1150 /* We can't handle truncation to a partial integer mode here
1151 because we don't know the real bitsize of the partial
1152 integer mode. */
1153 break;
1156 if (GET_MODE (op) != VOIDmode)
1158 temp = simplify_truncation (mode, op, GET_MODE (op));
1159 if (temp)
1160 return temp;
1163 /* If we know that the value is already truncated, we can
1164 replace the TRUNCATE with a SUBREG. */
1165 if (GET_MODE_NUNITS (mode) == 1
1166 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1167 || truncated_to_mode (mode, op)))
1169 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1170 if (temp)
1171 return temp;
1174 /* A truncate of a comparison can be replaced with a subreg if
1175 STORE_FLAG_VALUE permits. This is like the previous test,
1176 but it works even if the comparison is done in a mode larger
1177 than HOST_BITS_PER_WIDE_INT. */
1178 if (HWI_COMPUTABLE_MODE_P (mode)
1179 && COMPARISON_P (op)
1180 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1182 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1183 if (temp)
1184 return temp;
1187 /* A truncate of a memory is just loading the low part of the memory
1188 if we are not changing the meaning of the address. */
1189 if (GET_CODE (op) == MEM
1190 && !VECTOR_MODE_P (mode)
1191 && !MEM_VOLATILE_P (op)
1192 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1194 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1195 if (temp)
1196 return temp;
1199 break;
1201 case FLOAT_TRUNCATE:
1202 if (DECIMAL_FLOAT_MODE_P (mode))
1203 break;
1205 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1206 if (GET_CODE (op) == FLOAT_EXTEND
1207 && GET_MODE (XEXP (op, 0)) == mode)
1208 return XEXP (op, 0);
1210 /* (float_truncate:SF (float_truncate:DF foo:XF))
1211 = (float_truncate:SF foo:XF).
1212 This may eliminate double rounding, so it is unsafe.
1214 (float_truncate:SF (float_extend:XF foo:DF))
1215 = (float_truncate:SF foo:DF).
1217 (float_truncate:DF (float_extend:XF foo:SF))
1218 = (float_extend:DF foo:SF). */
1219 if ((GET_CODE (op) == FLOAT_TRUNCATE
1220 && flag_unsafe_math_optimizations)
1221 || GET_CODE (op) == FLOAT_EXTEND)
1222 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1223 0)))
1224 > GET_MODE_SIZE (mode)
1225 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1226 mode,
1227 XEXP (op, 0), mode);
1229 /* (float_truncate (float x)) is (float x) */
1230 if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1231 && (flag_unsafe_math_optimizations
1232 || exact_int_to_float_conversion_p (op)))
1233 return simplify_gen_unary (GET_CODE (op), mode,
1234 XEXP (op, 0),
1235 GET_MODE (XEXP (op, 0)));
1237 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1238 (OP:SF foo:SF) if OP is NEG or ABS. */
1239 if ((GET_CODE (op) == ABS
1240 || GET_CODE (op) == NEG)
1241 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1242 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1243 return simplify_gen_unary (GET_CODE (op), mode,
1244 XEXP (XEXP (op, 0), 0), mode);
1246 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1247 is (float_truncate:SF x). */
1248 if (GET_CODE (op) == SUBREG
1249 && subreg_lowpart_p (op)
1250 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1251 return SUBREG_REG (op);
1252 break;
1254 case FLOAT_EXTEND:
1255 if (DECIMAL_FLOAT_MODE_P (mode))
1256 break;
1258 /* (float_extend (float_extend x)) is (float_extend x)
1260 (float_extend (float x)) is (float x) assuming that double
1261 rounding can't happen.
1263 if (GET_CODE (op) == FLOAT_EXTEND
1264 || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1265 && exact_int_to_float_conversion_p (op)))
1266 return simplify_gen_unary (GET_CODE (op), mode,
1267 XEXP (op, 0),
1268 GET_MODE (XEXP (op, 0)));
1270 break;
1272 case ABS:
1273 /* (abs (neg <foo>)) -> (abs <foo>) */
1274 if (GET_CODE (op) == NEG)
1275 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1276 GET_MODE (XEXP (op, 0)));
1278 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1279 do nothing. */
1280 if (GET_MODE (op) == VOIDmode)
1281 break;
1283 /* If operand is something known to be positive, ignore the ABS. */
1284 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1285 || val_signbit_known_clear_p (GET_MODE (op),
1286 nonzero_bits (op, GET_MODE (op))))
1287 return op;
1289 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1290 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1291 return gen_rtx_NEG (mode, op);
1293 break;
1295 case FFS:
1296 /* (ffs (*_extend <X>)) = (ffs <X>) */
1297 if (GET_CODE (op) == SIGN_EXTEND
1298 || GET_CODE (op) == ZERO_EXTEND)
1299 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1300 GET_MODE (XEXP (op, 0)));
1301 break;
1303 case POPCOUNT:
1304 switch (GET_CODE (op))
1306 case BSWAP:
1307 case ZERO_EXTEND:
1308 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1309 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1310 GET_MODE (XEXP (op, 0)));
1312 case ROTATE:
1313 case ROTATERT:
1314 /* Rotations don't affect popcount. */
1315 if (!side_effects_p (XEXP (op, 1)))
1316 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1317 GET_MODE (XEXP (op, 0)));
1318 break;
1320 default:
1321 break;
1323 break;
1325 case PARITY:
1326 switch (GET_CODE (op))
1328 case NOT:
1329 case BSWAP:
1330 case ZERO_EXTEND:
1331 case SIGN_EXTEND:
1332 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1333 GET_MODE (XEXP (op, 0)));
1335 case ROTATE:
1336 case ROTATERT:
1337 /* Rotations don't affect parity. */
1338 if (!side_effects_p (XEXP (op, 1)))
1339 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1340 GET_MODE (XEXP (op, 0)));
1341 break;
1343 default:
1344 break;
1346 break;
1348 case BSWAP:
1349 /* (bswap (bswap x)) -> x. */
1350 if (GET_CODE (op) == BSWAP)
1351 return XEXP (op, 0);
1352 break;
1354 case FLOAT:
1355 /* (float (sign_extend <X>)) = (float <X>). */
1356 if (GET_CODE (op) == SIGN_EXTEND)
1357 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1358 GET_MODE (XEXP (op, 0)));
1359 break;
1361 case SIGN_EXTEND:
1362 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1363 becomes just the MINUS if its mode is MODE. This allows
1364 folding switch statements on machines using casesi (such as
1365 the VAX). */
1366 if (GET_CODE (op) == TRUNCATE
1367 && GET_MODE (XEXP (op, 0)) == mode
1368 && GET_CODE (XEXP (op, 0)) == MINUS
1369 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1370 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1371 return XEXP (op, 0);
1373 /* Extending a widening multiplication should be canonicalized to
1374 a wider widening multiplication. */
1375 if (GET_CODE (op) == MULT)
1377 rtx lhs = XEXP (op, 0);
1378 rtx rhs = XEXP (op, 1);
1379 enum rtx_code lcode = GET_CODE (lhs);
1380 enum rtx_code rcode = GET_CODE (rhs);
1382 /* Widening multiplies usually extend both operands, but sometimes
1383 they use a shift to extract a portion of a register. */
1384 if ((lcode == SIGN_EXTEND
1385 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1386 && (rcode == SIGN_EXTEND
1387 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1389 machine_mode lmode = GET_MODE (lhs);
1390 machine_mode rmode = GET_MODE (rhs);
1391 int bits;
1393 if (lcode == ASHIFTRT)
1394 /* Number of bits not shifted off the end. */
1395 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1396 else /* lcode == SIGN_EXTEND */
1397 /* Size of inner mode. */
1398 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1400 if (rcode == ASHIFTRT)
1401 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1402 else /* rcode == SIGN_EXTEND */
1403 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1405 /* We can only widen multiplies if the result is mathematiclly
1406 equivalent. I.e. if overflow was impossible. */
1407 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1408 return simplify_gen_binary
1409 (MULT, mode,
1410 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1411 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1415 /* Check for a sign extension of a subreg of a promoted
1416 variable, where the promotion is sign-extended, and the
1417 target mode is the same as the variable's promotion. */
1418 if (GET_CODE (op) == SUBREG
1419 && SUBREG_PROMOTED_VAR_P (op)
1420 && SUBREG_PROMOTED_SIGNED_P (op)
1421 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1423 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1424 if (temp)
1425 return temp;
1428 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1429 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1430 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1432 gcc_assert (GET_MODE_PRECISION (mode)
1433 > GET_MODE_PRECISION (GET_MODE (op)));
1434 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1435 GET_MODE (XEXP (op, 0)));
1438 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1439 is (sign_extend:M (subreg:O <X>)) if there is mode with
1440 GET_MODE_BITSIZE (N) - I bits.
1441 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1442 is similarly (zero_extend:M (subreg:O <X>)). */
1443 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1444 && GET_CODE (XEXP (op, 0)) == ASHIFT
1445 && CONST_INT_P (XEXP (op, 1))
1446 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1447 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1449 machine_mode tmode
1450 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1451 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1452 gcc_assert (GET_MODE_BITSIZE (mode)
1453 > GET_MODE_BITSIZE (GET_MODE (op)));
1454 if (tmode != BLKmode)
1456 rtx inner =
1457 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1458 if (inner)
1459 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1460 ? SIGN_EXTEND : ZERO_EXTEND,
1461 mode, inner, tmode);
1465 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1466 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1467 if (GET_CODE (op) == LSHIFTRT
1468 && CONST_INT_P (XEXP (op, 1))
1469 && XEXP (op, 1) != const0_rtx)
1470 return simplify_gen_unary (ZERO_EXTEND, mode, op, GET_MODE (op));
1472 #if defined(POINTERS_EXTEND_UNSIGNED)
1473 /* As we do not know which address space the pointer is referring to,
1474 we can do this only if the target does not support different pointer
1475 or address modes depending on the address space. */
1476 if (target_default_pointer_address_modes_p ()
1477 && ! POINTERS_EXTEND_UNSIGNED
1478 && mode == Pmode && GET_MODE (op) == ptr_mode
1479 && (CONSTANT_P (op)
1480 || (GET_CODE (op) == SUBREG
1481 && REG_P (SUBREG_REG (op))
1482 && REG_POINTER (SUBREG_REG (op))
1483 && GET_MODE (SUBREG_REG (op)) == Pmode))
1484 && !targetm.have_ptr_extend ())
1486 temp
1487 = convert_memory_address_addr_space_1 (Pmode, op,
1488 ADDR_SPACE_GENERIC, false,
1489 true);
1490 if (temp)
1491 return temp;
1493 #endif
1494 break;
1496 case ZERO_EXTEND:
1497 /* Check for a zero extension of a subreg of a promoted
1498 variable, where the promotion is zero-extended, and the
1499 target mode is the same as the variable's promotion. */
1500 if (GET_CODE (op) == SUBREG
1501 && SUBREG_PROMOTED_VAR_P (op)
1502 && SUBREG_PROMOTED_UNSIGNED_P (op)
1503 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1505 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1506 if (temp)
1507 return temp;
1510 /* Extending a widening multiplication should be canonicalized to
1511 a wider widening multiplication. */
1512 if (GET_CODE (op) == MULT)
1514 rtx lhs = XEXP (op, 0);
1515 rtx rhs = XEXP (op, 1);
1516 enum rtx_code lcode = GET_CODE (lhs);
1517 enum rtx_code rcode = GET_CODE (rhs);
1519 /* Widening multiplies usually extend both operands, but sometimes
1520 they use a shift to extract a portion of a register. */
1521 if ((lcode == ZERO_EXTEND
1522 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1523 && (rcode == ZERO_EXTEND
1524 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1526 machine_mode lmode = GET_MODE (lhs);
1527 machine_mode rmode = GET_MODE (rhs);
1528 int bits;
1530 if (lcode == LSHIFTRT)
1531 /* Number of bits not shifted off the end. */
1532 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1533 else /* lcode == ZERO_EXTEND */
1534 /* Size of inner mode. */
1535 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1537 if (rcode == LSHIFTRT)
1538 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1539 else /* rcode == ZERO_EXTEND */
1540 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1542 /* We can only widen multiplies if the result is mathematiclly
1543 equivalent. I.e. if overflow was impossible. */
1544 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1545 return simplify_gen_binary
1546 (MULT, mode,
1547 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1548 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1552 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1553 if (GET_CODE (op) == ZERO_EXTEND)
1554 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1555 GET_MODE (XEXP (op, 0)));
1557 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1558 is (zero_extend:M (subreg:O <X>)) if there is mode with
1559 GET_MODE_PRECISION (N) - I bits. */
1560 if (GET_CODE (op) == LSHIFTRT
1561 && GET_CODE (XEXP (op, 0)) == ASHIFT
1562 && CONST_INT_P (XEXP (op, 1))
1563 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1564 && GET_MODE_PRECISION (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1566 machine_mode tmode
1567 = mode_for_size (GET_MODE_PRECISION (GET_MODE (op))
1568 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1569 if (tmode != BLKmode)
1571 rtx inner =
1572 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1573 if (inner)
1574 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1578 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1579 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1580 of mode N. E.g.
1581 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1582 (and:SI (reg:SI) (const_int 63)). */
1583 if (GET_CODE (op) == SUBREG
1584 && GET_MODE_PRECISION (GET_MODE (op))
1585 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1586 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1587 <= HOST_BITS_PER_WIDE_INT
1588 && GET_MODE_PRECISION (mode)
1589 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1590 && subreg_lowpart_p (op)
1591 && (nonzero_bits (SUBREG_REG (op), GET_MODE (SUBREG_REG (op)))
1592 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1594 if (GET_MODE_PRECISION (mode)
1595 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
1596 return SUBREG_REG (op);
1597 return simplify_gen_unary (ZERO_EXTEND, mode, SUBREG_REG (op),
1598 GET_MODE (SUBREG_REG (op)));
1601 #if defined(POINTERS_EXTEND_UNSIGNED)
1602 /* As we do not know which address space the pointer is referring to,
1603 we can do this only if the target does not support different pointer
1604 or address modes depending on the address space. */
1605 if (target_default_pointer_address_modes_p ()
1606 && POINTERS_EXTEND_UNSIGNED > 0
1607 && mode == Pmode && GET_MODE (op) == ptr_mode
1608 && (CONSTANT_P (op)
1609 || (GET_CODE (op) == SUBREG
1610 && REG_P (SUBREG_REG (op))
1611 && REG_POINTER (SUBREG_REG (op))
1612 && GET_MODE (SUBREG_REG (op)) == Pmode))
1613 && !targetm.have_ptr_extend ())
1615 temp
1616 = convert_memory_address_addr_space_1 (Pmode, op,
1617 ADDR_SPACE_GENERIC, false,
1618 true);
1619 if (temp)
1620 return temp;
1622 #endif
1623 break;
1625 default:
1626 break;
1629 return 0;
1632 /* Try to compute the value of a unary operation CODE whose output mode is to
1633 be MODE with input operand OP whose mode was originally OP_MODE.
1634 Return zero if the value cannot be computed. */
1636 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1637 rtx op, machine_mode op_mode)
1639 unsigned int width = GET_MODE_PRECISION (mode);
1641 if (code == VEC_DUPLICATE)
1643 gcc_assert (VECTOR_MODE_P (mode));
1644 if (GET_MODE (op) != VOIDmode)
1646 if (!VECTOR_MODE_P (GET_MODE (op)))
1647 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1648 else
1649 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1650 (GET_MODE (op)));
1652 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1653 || GET_CODE (op) == CONST_VECTOR)
1655 int elt_size = GET_MODE_UNIT_SIZE (mode);
1656 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1657 rtvec v = rtvec_alloc (n_elts);
1658 unsigned int i;
1660 if (GET_CODE (op) != CONST_VECTOR)
1661 for (i = 0; i < n_elts; i++)
1662 RTVEC_ELT (v, i) = op;
1663 else
1665 machine_mode inmode = GET_MODE (op);
1666 int in_elt_size = GET_MODE_UNIT_SIZE (inmode);
1667 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1669 gcc_assert (in_n_elts < n_elts);
1670 gcc_assert ((n_elts % in_n_elts) == 0);
1671 for (i = 0; i < n_elts; i++)
1672 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1674 return gen_rtx_CONST_VECTOR (mode, v);
1678 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1680 int elt_size = GET_MODE_UNIT_SIZE (mode);
1681 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1682 machine_mode opmode = GET_MODE (op);
1683 int op_elt_size = GET_MODE_UNIT_SIZE (opmode);
1684 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1685 rtvec v = rtvec_alloc (n_elts);
1686 unsigned int i;
1688 gcc_assert (op_n_elts == n_elts);
1689 for (i = 0; i < n_elts; i++)
1691 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1692 CONST_VECTOR_ELT (op, i),
1693 GET_MODE_INNER (opmode));
1694 if (!x)
1695 return 0;
1696 RTVEC_ELT (v, i) = x;
1698 return gen_rtx_CONST_VECTOR (mode, v);
1701 /* The order of these tests is critical so that, for example, we don't
1702 check the wrong mode (input vs. output) for a conversion operation,
1703 such as FIX. At some point, this should be simplified. */
1705 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1707 REAL_VALUE_TYPE d;
1709 if (op_mode == VOIDmode)
1711 /* CONST_INT have VOIDmode as the mode. We assume that all
1712 the bits of the constant are significant, though, this is
1713 a dangerous assumption as many times CONST_INTs are
1714 created and used with garbage in the bits outside of the
1715 precision of the implied mode of the const_int. */
1716 op_mode = MAX_MODE_INT;
1719 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), SIGNED);
1721 /* Avoid the folding if flag_signaling_nans is on and
1722 operand is a signaling NaN. */
1723 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1724 return 0;
1726 d = real_value_truncate (mode, d);
1727 return const_double_from_real_value (d, mode);
1729 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1731 REAL_VALUE_TYPE d;
1733 if (op_mode == VOIDmode)
1735 /* CONST_INT have VOIDmode as the mode. We assume that all
1736 the bits of the constant are significant, though, this is
1737 a dangerous assumption as many times CONST_INTs are
1738 created and used with garbage in the bits outside of the
1739 precision of the implied mode of the const_int. */
1740 op_mode = MAX_MODE_INT;
1743 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), UNSIGNED);
1745 /* Avoid the folding if flag_signaling_nans is on and
1746 operand is a signaling NaN. */
1747 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1748 return 0;
1750 d = real_value_truncate (mode, d);
1751 return const_double_from_real_value (d, mode);
1754 if (CONST_SCALAR_INT_P (op) && width > 0)
1756 wide_int result;
1757 machine_mode imode = op_mode == VOIDmode ? mode : op_mode;
1758 rtx_mode_t op0 = rtx_mode_t (op, imode);
1759 int int_value;
1761 #if TARGET_SUPPORTS_WIDE_INT == 0
1762 /* This assert keeps the simplification from producing a result
1763 that cannot be represented in a CONST_DOUBLE but a lot of
1764 upstream callers expect that this function never fails to
1765 simplify something and so you if you added this to the test
1766 above the code would die later anyway. If this assert
1767 happens, you just need to make the port support wide int. */
1768 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1769 #endif
1771 switch (code)
1773 case NOT:
1774 result = wi::bit_not (op0);
1775 break;
1777 case NEG:
1778 result = wi::neg (op0);
1779 break;
1781 case ABS:
1782 result = wi::abs (op0);
1783 break;
1785 case FFS:
1786 result = wi::shwi (wi::ffs (op0), mode);
1787 break;
1789 case CLZ:
1790 if (wi::ne_p (op0, 0))
1791 int_value = wi::clz (op0);
1792 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1793 int_value = GET_MODE_PRECISION (mode);
1794 result = wi::shwi (int_value, mode);
1795 break;
1797 case CLRSB:
1798 result = wi::shwi (wi::clrsb (op0), mode);
1799 break;
1801 case CTZ:
1802 if (wi::ne_p (op0, 0))
1803 int_value = wi::ctz (op0);
1804 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1805 int_value = GET_MODE_PRECISION (mode);
1806 result = wi::shwi (int_value, mode);
1807 break;
1809 case POPCOUNT:
1810 result = wi::shwi (wi::popcount (op0), mode);
1811 break;
1813 case PARITY:
1814 result = wi::shwi (wi::parity (op0), mode);
1815 break;
1817 case BSWAP:
1818 result = wide_int (op0).bswap ();
1819 break;
1821 case TRUNCATE:
1822 case ZERO_EXTEND:
1823 result = wide_int::from (op0, width, UNSIGNED);
1824 break;
1826 case SIGN_EXTEND:
1827 result = wide_int::from (op0, width, SIGNED);
1828 break;
1830 case SQRT:
1831 default:
1832 return 0;
1835 return immed_wide_int_const (result, mode);
1838 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1839 && SCALAR_FLOAT_MODE_P (mode)
1840 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1842 REAL_VALUE_TYPE d = *CONST_DOUBLE_REAL_VALUE (op);
1843 switch (code)
1845 case SQRT:
1846 return 0;
1847 case ABS:
1848 d = real_value_abs (&d);
1849 break;
1850 case NEG:
1851 d = real_value_negate (&d);
1852 break;
1853 case FLOAT_TRUNCATE:
1854 /* Don't perform the operation if flag_signaling_nans is on
1855 and the operand is a signaling NaN. */
1856 if (!(HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d)))
1857 d = real_value_truncate (mode, d);
1858 break;
1859 case FLOAT_EXTEND:
1860 /* All this does is change the mode, unless changing
1861 mode class. */
1862 /* Don't perform the operation if flag_signaling_nans is on
1863 and the operand is a signaling NaN. */
1864 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op))
1865 && !(HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d)))
1866 real_convert (&d, mode, &d);
1867 break;
1868 case FIX:
1869 /* Don't perform the operation if flag_signaling_nans is on
1870 and the operand is a signaling NaN. */
1871 if (!(HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d)))
1872 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1873 break;
1874 case NOT:
1876 long tmp[4];
1877 int i;
1879 real_to_target (tmp, &d, GET_MODE (op));
1880 for (i = 0; i < 4; i++)
1881 tmp[i] = ~tmp[i];
1882 real_from_target (&d, tmp, mode);
1883 break;
1885 default:
1886 gcc_unreachable ();
1888 return const_double_from_real_value (d, mode);
1890 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1891 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1892 && GET_MODE_CLASS (mode) == MODE_INT
1893 && width > 0)
1895 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1896 operators are intentionally left unspecified (to ease implementation
1897 by target backends), for consistency, this routine implements the
1898 same semantics for constant folding as used by the middle-end. */
1900 /* This was formerly used only for non-IEEE float.
1901 eggert@twinsun.com says it is safe for IEEE also. */
1902 REAL_VALUE_TYPE t;
1903 const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (op);
1904 wide_int wmax, wmin;
1905 /* This is part of the abi to real_to_integer, but we check
1906 things before making this call. */
1907 bool fail;
1909 switch (code)
1911 case FIX:
1912 if (REAL_VALUE_ISNAN (*x))
1913 return const0_rtx;
1915 /* Test against the signed upper bound. */
1916 wmax = wi::max_value (width, SIGNED);
1917 real_from_integer (&t, VOIDmode, wmax, SIGNED);
1918 if (real_less (&t, x))
1919 return immed_wide_int_const (wmax, mode);
1921 /* Test against the signed lower bound. */
1922 wmin = wi::min_value (width, SIGNED);
1923 real_from_integer (&t, VOIDmode, wmin, SIGNED);
1924 if (real_less (x, &t))
1925 return immed_wide_int_const (wmin, mode);
1927 return immed_wide_int_const (real_to_integer (x, &fail, width),
1928 mode);
1930 case UNSIGNED_FIX:
1931 if (REAL_VALUE_ISNAN (*x) || REAL_VALUE_NEGATIVE (*x))
1932 return const0_rtx;
1934 /* Test against the unsigned upper bound. */
1935 wmax = wi::max_value (width, UNSIGNED);
1936 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
1937 if (real_less (&t, x))
1938 return immed_wide_int_const (wmax, mode);
1940 return immed_wide_int_const (real_to_integer (x, &fail, width),
1941 mode);
1943 default:
1944 gcc_unreachable ();
1948 return NULL_RTX;
1951 /* Subroutine of simplify_binary_operation to simplify a binary operation
1952 CODE that can commute with byte swapping, with result mode MODE and
1953 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
1954 Return zero if no simplification or canonicalization is possible. */
1956 static rtx
1957 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
1958 rtx op0, rtx op1)
1960 rtx tem;
1962 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
1963 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
1965 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
1966 simplify_gen_unary (BSWAP, mode, op1, mode));
1967 return simplify_gen_unary (BSWAP, mode, tem, mode);
1970 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
1971 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
1973 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
1974 return simplify_gen_unary (BSWAP, mode, tem, mode);
1977 return NULL_RTX;
1980 /* Subroutine of simplify_binary_operation to simplify a commutative,
1981 associative binary operation CODE with result mode MODE, operating
1982 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1983 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1984 canonicalization is possible. */
1986 static rtx
1987 simplify_associative_operation (enum rtx_code code, machine_mode mode,
1988 rtx op0, rtx op1)
1990 rtx tem;
1992 /* Linearize the operator to the left. */
1993 if (GET_CODE (op1) == code)
1995 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1996 if (GET_CODE (op0) == code)
1998 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1999 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2002 /* "a op (b op c)" becomes "(b op c) op a". */
2003 if (! swap_commutative_operands_p (op1, op0))
2004 return simplify_gen_binary (code, mode, op1, op0);
2006 std::swap (op0, op1);
2009 if (GET_CODE (op0) == code)
2011 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2012 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2014 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2015 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2018 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2019 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2020 if (tem != 0)
2021 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2023 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2024 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2025 if (tem != 0)
2026 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2029 return 0;
2033 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2034 and OP1. Return 0 if no simplification is possible.
2036 Don't use this for relational operations such as EQ or LT.
2037 Use simplify_relational_operation instead. */
2039 simplify_binary_operation (enum rtx_code code, machine_mode mode,
2040 rtx op0, rtx op1)
2042 rtx trueop0, trueop1;
2043 rtx tem;
2045 /* Relational operations don't work here. We must know the mode
2046 of the operands in order to do the comparison correctly.
2047 Assuming a full word can give incorrect results.
2048 Consider comparing 128 with -128 in QImode. */
2049 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2050 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2052 /* Make sure the constant is second. */
2053 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2054 && swap_commutative_operands_p (op0, op1))
2055 std::swap (op0, op1);
2057 trueop0 = avoid_constant_pool_reference (op0);
2058 trueop1 = avoid_constant_pool_reference (op1);
2060 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2061 if (tem)
2062 return tem;
2063 tem = simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2065 if (tem)
2066 return tem;
2068 /* If the above steps did not result in a simplification and op0 or op1
2069 were constant pool references, use the referenced constants directly. */
2070 if (trueop0 != op0 || trueop1 != op1)
2071 return simplify_gen_binary (code, mode, trueop0, trueop1);
2073 return NULL_RTX;
2076 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2077 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2078 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2079 actual constants. */
2081 static rtx
2082 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
2083 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2085 rtx tem, reversed, opleft, opright;
2086 HOST_WIDE_INT val;
2087 unsigned int width = GET_MODE_PRECISION (mode);
2089 /* Even if we can't compute a constant result,
2090 there are some cases worth simplifying. */
2092 switch (code)
2094 case PLUS:
2095 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2096 when x is NaN, infinite, or finite and nonzero. They aren't
2097 when x is -0 and the rounding mode is not towards -infinity,
2098 since (-0) + 0 is then 0. */
2099 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2100 return op0;
2102 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2103 transformations are safe even for IEEE. */
2104 if (GET_CODE (op0) == NEG)
2105 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2106 else if (GET_CODE (op1) == NEG)
2107 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2109 /* (~a) + 1 -> -a */
2110 if (INTEGRAL_MODE_P (mode)
2111 && GET_CODE (op0) == NOT
2112 && trueop1 == const1_rtx)
2113 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2115 /* Handle both-operands-constant cases. We can only add
2116 CONST_INTs to constants since the sum of relocatable symbols
2117 can't be handled by most assemblers. Don't add CONST_INT
2118 to CONST_INT since overflow won't be computed properly if wider
2119 than HOST_BITS_PER_WIDE_INT. */
2121 if ((GET_CODE (op0) == CONST
2122 || GET_CODE (op0) == SYMBOL_REF
2123 || GET_CODE (op0) == LABEL_REF)
2124 && CONST_INT_P (op1))
2125 return plus_constant (mode, op0, INTVAL (op1));
2126 else if ((GET_CODE (op1) == CONST
2127 || GET_CODE (op1) == SYMBOL_REF
2128 || GET_CODE (op1) == LABEL_REF)
2129 && CONST_INT_P (op0))
2130 return plus_constant (mode, op1, INTVAL (op0));
2132 /* See if this is something like X * C - X or vice versa or
2133 if the multiplication is written as a shift. If so, we can
2134 distribute and make a new multiply, shift, or maybe just
2135 have X (if C is 2 in the example above). But don't make
2136 something more expensive than we had before. */
2138 if (SCALAR_INT_MODE_P (mode))
2140 rtx lhs = op0, rhs = op1;
2142 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2143 wide_int coeff1 = wi::one (GET_MODE_PRECISION (mode));
2145 if (GET_CODE (lhs) == NEG)
2147 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2148 lhs = XEXP (lhs, 0);
2150 else if (GET_CODE (lhs) == MULT
2151 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2153 coeff0 = rtx_mode_t (XEXP (lhs, 1), mode);
2154 lhs = XEXP (lhs, 0);
2156 else if (GET_CODE (lhs) == ASHIFT
2157 && CONST_INT_P (XEXP (lhs, 1))
2158 && INTVAL (XEXP (lhs, 1)) >= 0
2159 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2161 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2162 GET_MODE_PRECISION (mode));
2163 lhs = XEXP (lhs, 0);
2166 if (GET_CODE (rhs) == NEG)
2168 coeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2169 rhs = XEXP (rhs, 0);
2171 else if (GET_CODE (rhs) == MULT
2172 && CONST_INT_P (XEXP (rhs, 1)))
2174 coeff1 = rtx_mode_t (XEXP (rhs, 1), mode);
2175 rhs = XEXP (rhs, 0);
2177 else if (GET_CODE (rhs) == ASHIFT
2178 && CONST_INT_P (XEXP (rhs, 1))
2179 && INTVAL (XEXP (rhs, 1)) >= 0
2180 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2182 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2183 GET_MODE_PRECISION (mode));
2184 rhs = XEXP (rhs, 0);
2187 if (rtx_equal_p (lhs, rhs))
2189 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2190 rtx coeff;
2191 bool speed = optimize_function_for_speed_p (cfun);
2193 coeff = immed_wide_int_const (coeff0 + coeff1, mode);
2195 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2196 return (set_src_cost (tem, mode, speed)
2197 <= set_src_cost (orig, mode, speed) ? tem : 0);
2201 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2202 if (CONST_SCALAR_INT_P (op1)
2203 && GET_CODE (op0) == XOR
2204 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2205 && mode_signbit_p (mode, op1))
2206 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2207 simplify_gen_binary (XOR, mode, op1,
2208 XEXP (op0, 1)));
2210 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2211 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2212 && GET_CODE (op0) == MULT
2213 && GET_CODE (XEXP (op0, 0)) == NEG)
2215 rtx in1, in2;
2217 in1 = XEXP (XEXP (op0, 0), 0);
2218 in2 = XEXP (op0, 1);
2219 return simplify_gen_binary (MINUS, mode, op1,
2220 simplify_gen_binary (MULT, mode,
2221 in1, in2));
2224 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2225 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2226 is 1. */
2227 if (COMPARISON_P (op0)
2228 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2229 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2230 && (reversed = reversed_comparison (op0, mode)))
2231 return
2232 simplify_gen_unary (NEG, mode, reversed, mode);
2234 /* If one of the operands is a PLUS or a MINUS, see if we can
2235 simplify this by the associative law.
2236 Don't use the associative law for floating point.
2237 The inaccuracy makes it nonassociative,
2238 and subtle programs can break if operations are associated. */
2240 if (INTEGRAL_MODE_P (mode)
2241 && (plus_minus_operand_p (op0)
2242 || plus_minus_operand_p (op1))
2243 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2244 return tem;
2246 /* Reassociate floating point addition only when the user
2247 specifies associative math operations. */
2248 if (FLOAT_MODE_P (mode)
2249 && flag_associative_math)
2251 tem = simplify_associative_operation (code, mode, op0, op1);
2252 if (tem)
2253 return tem;
2255 break;
2257 case COMPARE:
2258 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2259 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2260 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2261 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2263 rtx xop00 = XEXP (op0, 0);
2264 rtx xop10 = XEXP (op1, 0);
2266 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2267 return xop00;
2269 if (REG_P (xop00) && REG_P (xop10)
2270 && GET_MODE (xop00) == GET_MODE (xop10)
2271 && REGNO (xop00) == REGNO (xop10)
2272 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2273 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2274 return xop00;
2276 break;
2278 case MINUS:
2279 /* We can't assume x-x is 0 even with non-IEEE floating point,
2280 but since it is zero except in very strange circumstances, we
2281 will treat it as zero with -ffinite-math-only. */
2282 if (rtx_equal_p (trueop0, trueop1)
2283 && ! side_effects_p (op0)
2284 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2285 return CONST0_RTX (mode);
2287 /* Change subtraction from zero into negation. (0 - x) is the
2288 same as -x when x is NaN, infinite, or finite and nonzero.
2289 But if the mode has signed zeros, and does not round towards
2290 -infinity, then 0 - 0 is 0, not -0. */
2291 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2292 return simplify_gen_unary (NEG, mode, op1, mode);
2294 /* (-1 - a) is ~a, unless the expression contains symbolic
2295 constants, in which case not retaining additions and
2296 subtractions could cause invalid assembly to be produced. */
2297 if (trueop0 == constm1_rtx
2298 && !contains_symbolic_reference_p (op1))
2299 return simplify_gen_unary (NOT, mode, op1, mode);
2301 /* Subtracting 0 has no effect unless the mode has signed zeros
2302 and supports rounding towards -infinity. In such a case,
2303 0 - 0 is -0. */
2304 if (!(HONOR_SIGNED_ZEROS (mode)
2305 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2306 && trueop1 == CONST0_RTX (mode))
2307 return op0;
2309 /* See if this is something like X * C - X or vice versa or
2310 if the multiplication is written as a shift. If so, we can
2311 distribute and make a new multiply, shift, or maybe just
2312 have X (if C is 2 in the example above). But don't make
2313 something more expensive than we had before. */
2315 if (SCALAR_INT_MODE_P (mode))
2317 rtx lhs = op0, rhs = op1;
2319 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2320 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2322 if (GET_CODE (lhs) == NEG)
2324 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2325 lhs = XEXP (lhs, 0);
2327 else if (GET_CODE (lhs) == MULT
2328 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2330 coeff0 = rtx_mode_t (XEXP (lhs, 1), mode);
2331 lhs = XEXP (lhs, 0);
2333 else if (GET_CODE (lhs) == ASHIFT
2334 && CONST_INT_P (XEXP (lhs, 1))
2335 && INTVAL (XEXP (lhs, 1)) >= 0
2336 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2338 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2339 GET_MODE_PRECISION (mode));
2340 lhs = XEXP (lhs, 0);
2343 if (GET_CODE (rhs) == NEG)
2345 negcoeff1 = wi::one (GET_MODE_PRECISION (mode));
2346 rhs = XEXP (rhs, 0);
2348 else if (GET_CODE (rhs) == MULT
2349 && CONST_INT_P (XEXP (rhs, 1)))
2351 negcoeff1 = wi::neg (rtx_mode_t (XEXP (rhs, 1), mode));
2352 rhs = XEXP (rhs, 0);
2354 else if (GET_CODE (rhs) == ASHIFT
2355 && CONST_INT_P (XEXP (rhs, 1))
2356 && INTVAL (XEXP (rhs, 1)) >= 0
2357 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2359 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2360 GET_MODE_PRECISION (mode));
2361 negcoeff1 = -negcoeff1;
2362 rhs = XEXP (rhs, 0);
2365 if (rtx_equal_p (lhs, rhs))
2367 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2368 rtx coeff;
2369 bool speed = optimize_function_for_speed_p (cfun);
2371 coeff = immed_wide_int_const (coeff0 + negcoeff1, mode);
2373 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2374 return (set_src_cost (tem, mode, speed)
2375 <= set_src_cost (orig, mode, speed) ? tem : 0);
2379 /* (a - (-b)) -> (a + b). True even for IEEE. */
2380 if (GET_CODE (op1) == NEG)
2381 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2383 /* (-x - c) may be simplified as (-c - x). */
2384 if (GET_CODE (op0) == NEG
2385 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2387 tem = simplify_unary_operation (NEG, mode, op1, mode);
2388 if (tem)
2389 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2392 /* Don't let a relocatable value get a negative coeff. */
2393 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2394 return simplify_gen_binary (PLUS, mode,
2395 op0,
2396 neg_const_int (mode, op1));
2398 /* (x - (x & y)) -> (x & ~y) */
2399 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2401 if (rtx_equal_p (op0, XEXP (op1, 0)))
2403 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2404 GET_MODE (XEXP (op1, 1)));
2405 return simplify_gen_binary (AND, mode, op0, tem);
2407 if (rtx_equal_p (op0, XEXP (op1, 1)))
2409 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2410 GET_MODE (XEXP (op1, 0)));
2411 return simplify_gen_binary (AND, mode, op0, tem);
2415 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2416 by reversing the comparison code if valid. */
2417 if (STORE_FLAG_VALUE == 1
2418 && trueop0 == const1_rtx
2419 && COMPARISON_P (op1)
2420 && (reversed = reversed_comparison (op1, mode)))
2421 return reversed;
2423 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2424 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2425 && GET_CODE (op1) == MULT
2426 && GET_CODE (XEXP (op1, 0)) == NEG)
2428 rtx in1, in2;
2430 in1 = XEXP (XEXP (op1, 0), 0);
2431 in2 = XEXP (op1, 1);
2432 return simplify_gen_binary (PLUS, mode,
2433 simplify_gen_binary (MULT, mode,
2434 in1, in2),
2435 op0);
2438 /* Canonicalize (minus (neg A) (mult B C)) to
2439 (minus (mult (neg B) C) A). */
2440 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2441 && GET_CODE (op1) == MULT
2442 && GET_CODE (op0) == NEG)
2444 rtx in1, in2;
2446 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2447 in2 = XEXP (op1, 1);
2448 return simplify_gen_binary (MINUS, mode,
2449 simplify_gen_binary (MULT, mode,
2450 in1, in2),
2451 XEXP (op0, 0));
2454 /* If one of the operands is a PLUS or a MINUS, see if we can
2455 simplify this by the associative law. This will, for example,
2456 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2457 Don't use the associative law for floating point.
2458 The inaccuracy makes it nonassociative,
2459 and subtle programs can break if operations are associated. */
2461 if (INTEGRAL_MODE_P (mode)
2462 && (plus_minus_operand_p (op0)
2463 || plus_minus_operand_p (op1))
2464 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2465 return tem;
2466 break;
2468 case MULT:
2469 if (trueop1 == constm1_rtx)
2470 return simplify_gen_unary (NEG, mode, op0, mode);
2472 if (GET_CODE (op0) == NEG)
2474 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2475 /* If op1 is a MULT as well and simplify_unary_operation
2476 just moved the NEG to the second operand, simplify_gen_binary
2477 below could through simplify_associative_operation move
2478 the NEG around again and recurse endlessly. */
2479 if (temp
2480 && GET_CODE (op1) == MULT
2481 && GET_CODE (temp) == MULT
2482 && XEXP (op1, 0) == XEXP (temp, 0)
2483 && GET_CODE (XEXP (temp, 1)) == NEG
2484 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2485 temp = NULL_RTX;
2486 if (temp)
2487 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2489 if (GET_CODE (op1) == NEG)
2491 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2492 /* If op0 is a MULT as well and simplify_unary_operation
2493 just moved the NEG to the second operand, simplify_gen_binary
2494 below could through simplify_associative_operation move
2495 the NEG around again and recurse endlessly. */
2496 if (temp
2497 && GET_CODE (op0) == MULT
2498 && GET_CODE (temp) == MULT
2499 && XEXP (op0, 0) == XEXP (temp, 0)
2500 && GET_CODE (XEXP (temp, 1)) == NEG
2501 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2502 temp = NULL_RTX;
2503 if (temp)
2504 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2507 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2508 x is NaN, since x * 0 is then also NaN. Nor is it valid
2509 when the mode has signed zeros, since multiplying a negative
2510 number by 0 will give -0, not 0. */
2511 if (!HONOR_NANS (mode)
2512 && !HONOR_SIGNED_ZEROS (mode)
2513 && trueop1 == CONST0_RTX (mode)
2514 && ! side_effects_p (op0))
2515 return op1;
2517 /* In IEEE floating point, x*1 is not equivalent to x for
2518 signalling NaNs. */
2519 if (!HONOR_SNANS (mode)
2520 && trueop1 == CONST1_RTX (mode))
2521 return op0;
2523 /* Convert multiply by constant power of two into shift. */
2524 if (CONST_SCALAR_INT_P (trueop1))
2526 val = wi::exact_log2 (rtx_mode_t (trueop1, mode));
2527 if (val >= 0)
2528 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2531 /* x*2 is x+x and x*(-1) is -x */
2532 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2533 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2534 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2535 && GET_MODE (op0) == mode)
2537 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
2539 if (real_equal (d1, &dconst2))
2540 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2542 if (!HONOR_SNANS (mode)
2543 && real_equal (d1, &dconstm1))
2544 return simplify_gen_unary (NEG, mode, op0, mode);
2547 /* Optimize -x * -x as x * x. */
2548 if (FLOAT_MODE_P (mode)
2549 && GET_CODE (op0) == NEG
2550 && GET_CODE (op1) == NEG
2551 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2552 && !side_effects_p (XEXP (op0, 0)))
2553 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2555 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2556 if (SCALAR_FLOAT_MODE_P (mode)
2557 && GET_CODE (op0) == ABS
2558 && GET_CODE (op1) == ABS
2559 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2560 && !side_effects_p (XEXP (op0, 0)))
2561 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2563 /* Reassociate multiplication, but for floating point MULTs
2564 only when the user specifies unsafe math optimizations. */
2565 if (! FLOAT_MODE_P (mode)
2566 || flag_unsafe_math_optimizations)
2568 tem = simplify_associative_operation (code, mode, op0, op1);
2569 if (tem)
2570 return tem;
2572 break;
2574 case IOR:
2575 if (trueop1 == CONST0_RTX (mode))
2576 return op0;
2577 if (INTEGRAL_MODE_P (mode)
2578 && trueop1 == CONSTM1_RTX (mode)
2579 && !side_effects_p (op0))
2580 return op1;
2581 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2582 return op0;
2583 /* A | (~A) -> -1 */
2584 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2585 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2586 && ! side_effects_p (op0)
2587 && SCALAR_INT_MODE_P (mode))
2588 return constm1_rtx;
2590 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2591 if (CONST_INT_P (op1)
2592 && HWI_COMPUTABLE_MODE_P (mode)
2593 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2594 && !side_effects_p (op0))
2595 return op1;
2597 /* Canonicalize (X & C1) | C2. */
2598 if (GET_CODE (op0) == AND
2599 && CONST_INT_P (trueop1)
2600 && CONST_INT_P (XEXP (op0, 1)))
2602 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2603 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2604 HOST_WIDE_INT c2 = INTVAL (trueop1);
2606 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2607 if ((c1 & c2) == c1
2608 && !side_effects_p (XEXP (op0, 0)))
2609 return trueop1;
2611 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2612 if (((c1|c2) & mask) == mask)
2613 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2615 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2616 if (((c1 & ~c2) & mask) != (c1 & mask))
2618 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2619 gen_int_mode (c1 & ~c2, mode));
2620 return simplify_gen_binary (IOR, mode, tem, op1);
2624 /* Convert (A & B) | A to A. */
2625 if (GET_CODE (op0) == AND
2626 && (rtx_equal_p (XEXP (op0, 0), op1)
2627 || rtx_equal_p (XEXP (op0, 1), op1))
2628 && ! side_effects_p (XEXP (op0, 0))
2629 && ! side_effects_p (XEXP (op0, 1)))
2630 return op1;
2632 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2633 mode size to (rotate A CX). */
2635 if (GET_CODE (op1) == ASHIFT
2636 || GET_CODE (op1) == SUBREG)
2638 opleft = op1;
2639 opright = op0;
2641 else
2643 opright = op1;
2644 opleft = op0;
2647 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2648 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2649 && CONST_INT_P (XEXP (opleft, 1))
2650 && CONST_INT_P (XEXP (opright, 1))
2651 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2652 == GET_MODE_PRECISION (mode)))
2653 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2655 /* Same, but for ashift that has been "simplified" to a wider mode
2656 by simplify_shift_const. */
2658 if (GET_CODE (opleft) == SUBREG
2659 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2660 && GET_CODE (opright) == LSHIFTRT
2661 && GET_CODE (XEXP (opright, 0)) == SUBREG
2662 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2663 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2664 && (GET_MODE_SIZE (GET_MODE (opleft))
2665 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2666 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2667 SUBREG_REG (XEXP (opright, 0)))
2668 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2669 && CONST_INT_P (XEXP (opright, 1))
2670 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2671 == GET_MODE_PRECISION (mode)))
2672 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2673 XEXP (SUBREG_REG (opleft), 1));
2675 /* If we have (ior (and (X C1) C2)), simplify this by making
2676 C1 as small as possible if C1 actually changes. */
2677 if (CONST_INT_P (op1)
2678 && (HWI_COMPUTABLE_MODE_P (mode)
2679 || INTVAL (op1) > 0)
2680 && GET_CODE (op0) == AND
2681 && CONST_INT_P (XEXP (op0, 1))
2682 && CONST_INT_P (op1)
2683 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2685 rtx tmp = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2686 gen_int_mode (UINTVAL (XEXP (op0, 1))
2687 & ~UINTVAL (op1),
2688 mode));
2689 return simplify_gen_binary (IOR, mode, tmp, op1);
2692 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2693 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2694 the PLUS does not affect any of the bits in OP1: then we can do
2695 the IOR as a PLUS and we can associate. This is valid if OP1
2696 can be safely shifted left C bits. */
2697 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2698 && GET_CODE (XEXP (op0, 0)) == PLUS
2699 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2700 && CONST_INT_P (XEXP (op0, 1))
2701 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2703 int count = INTVAL (XEXP (op0, 1));
2704 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2706 if (mask >> count == INTVAL (trueop1)
2707 && trunc_int_for_mode (mask, mode) == mask
2708 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2709 return simplify_gen_binary (ASHIFTRT, mode,
2710 plus_constant (mode, XEXP (op0, 0),
2711 mask),
2712 XEXP (op0, 1));
2715 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2716 if (tem)
2717 return tem;
2719 tem = simplify_associative_operation (code, mode, op0, op1);
2720 if (tem)
2721 return tem;
2722 break;
2724 case XOR:
2725 if (trueop1 == CONST0_RTX (mode))
2726 return op0;
2727 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2728 return simplify_gen_unary (NOT, mode, op0, mode);
2729 if (rtx_equal_p (trueop0, trueop1)
2730 && ! side_effects_p (op0)
2731 && GET_MODE_CLASS (mode) != MODE_CC)
2732 return CONST0_RTX (mode);
2734 /* Canonicalize XOR of the most significant bit to PLUS. */
2735 if (CONST_SCALAR_INT_P (op1)
2736 && mode_signbit_p (mode, op1))
2737 return simplify_gen_binary (PLUS, mode, op0, op1);
2738 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2739 if (CONST_SCALAR_INT_P (op1)
2740 && GET_CODE (op0) == PLUS
2741 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2742 && mode_signbit_p (mode, XEXP (op0, 1)))
2743 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2744 simplify_gen_binary (XOR, mode, op1,
2745 XEXP (op0, 1)));
2747 /* If we are XORing two things that have no bits in common,
2748 convert them into an IOR. This helps to detect rotation encoded
2749 using those methods and possibly other simplifications. */
2751 if (HWI_COMPUTABLE_MODE_P (mode)
2752 && (nonzero_bits (op0, mode)
2753 & nonzero_bits (op1, mode)) == 0)
2754 return (simplify_gen_binary (IOR, mode, op0, op1));
2756 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2757 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2758 (NOT y). */
2760 int num_negated = 0;
2762 if (GET_CODE (op0) == NOT)
2763 num_negated++, op0 = XEXP (op0, 0);
2764 if (GET_CODE (op1) == NOT)
2765 num_negated++, op1 = XEXP (op1, 0);
2767 if (num_negated == 2)
2768 return simplify_gen_binary (XOR, mode, op0, op1);
2769 else if (num_negated == 1)
2770 return simplify_gen_unary (NOT, mode,
2771 simplify_gen_binary (XOR, mode, op0, op1),
2772 mode);
2775 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2776 correspond to a machine insn or result in further simplifications
2777 if B is a constant. */
2779 if (GET_CODE (op0) == AND
2780 && rtx_equal_p (XEXP (op0, 1), op1)
2781 && ! side_effects_p (op1))
2782 return simplify_gen_binary (AND, mode,
2783 simplify_gen_unary (NOT, mode,
2784 XEXP (op0, 0), mode),
2785 op1);
2787 else if (GET_CODE (op0) == AND
2788 && rtx_equal_p (XEXP (op0, 0), op1)
2789 && ! side_effects_p (op1))
2790 return simplify_gen_binary (AND, mode,
2791 simplify_gen_unary (NOT, mode,
2792 XEXP (op0, 1), mode),
2793 op1);
2795 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2796 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2797 out bits inverted twice and not set by C. Similarly, given
2798 (xor (and (xor A B) C) D), simplify without inverting C in
2799 the xor operand: (xor (and A C) (B&C)^D).
2801 else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
2802 && GET_CODE (XEXP (op0, 0)) == XOR
2803 && CONST_INT_P (op1)
2804 && CONST_INT_P (XEXP (op0, 1))
2805 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
2807 enum rtx_code op = GET_CODE (op0);
2808 rtx a = XEXP (XEXP (op0, 0), 0);
2809 rtx b = XEXP (XEXP (op0, 0), 1);
2810 rtx c = XEXP (op0, 1);
2811 rtx d = op1;
2812 HOST_WIDE_INT bval = INTVAL (b);
2813 HOST_WIDE_INT cval = INTVAL (c);
2814 HOST_WIDE_INT dval = INTVAL (d);
2815 HOST_WIDE_INT xcval;
2817 if (op == IOR)
2818 xcval = ~cval;
2819 else
2820 xcval = cval;
2822 return simplify_gen_binary (XOR, mode,
2823 simplify_gen_binary (op, mode, a, c),
2824 gen_int_mode ((bval & xcval) ^ dval,
2825 mode));
2828 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2829 we can transform like this:
2830 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2831 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2832 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2833 Attempt a few simplifications when B and C are both constants. */
2834 if (GET_CODE (op0) == AND
2835 && CONST_INT_P (op1)
2836 && CONST_INT_P (XEXP (op0, 1)))
2838 rtx a = XEXP (op0, 0);
2839 rtx b = XEXP (op0, 1);
2840 rtx c = op1;
2841 HOST_WIDE_INT bval = INTVAL (b);
2842 HOST_WIDE_INT cval = INTVAL (c);
2844 /* Instead of computing ~A&C, we compute its negated value,
2845 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2846 optimize for sure. If it does not simplify, we still try
2847 to compute ~A&C below, but since that always allocates
2848 RTL, we don't try that before committing to returning a
2849 simplified expression. */
2850 rtx n_na_c = simplify_binary_operation (IOR, mode, a,
2851 GEN_INT (~cval));
2853 if ((~cval & bval) == 0)
2855 rtx na_c = NULL_RTX;
2856 if (n_na_c)
2857 na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
2858 else
2860 /* If ~A does not simplify, don't bother: we don't
2861 want to simplify 2 operations into 3, and if na_c
2862 were to simplify with na, n_na_c would have
2863 simplified as well. */
2864 rtx na = simplify_unary_operation (NOT, mode, a, mode);
2865 if (na)
2866 na_c = simplify_gen_binary (AND, mode, na, c);
2869 /* Try to simplify ~A&C | ~B&C. */
2870 if (na_c != NULL_RTX)
2871 return simplify_gen_binary (IOR, mode, na_c,
2872 gen_int_mode (~bval & cval, mode));
2874 else
2876 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2877 if (n_na_c == CONSTM1_RTX (mode))
2879 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2880 gen_int_mode (~cval & bval,
2881 mode));
2882 return simplify_gen_binary (IOR, mode, a_nc_b,
2883 gen_int_mode (~bval & cval,
2884 mode));
2889 /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
2890 do (ior (and A ~C) (and B C)) which is a machine instruction on some
2891 machines, and also has shorter instruction path length. */
2892 if (GET_CODE (op0) == AND
2893 && GET_CODE (XEXP (op0, 0)) == XOR
2894 && CONST_INT_P (XEXP (op0, 1))
2895 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), trueop1))
2897 rtx a = trueop1;
2898 rtx b = XEXP (XEXP (op0, 0), 1);
2899 rtx c = XEXP (op0, 1);
2900 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
2901 rtx a_nc = simplify_gen_binary (AND, mode, a, nc);
2902 rtx bc = simplify_gen_binary (AND, mode, b, c);
2903 return simplify_gen_binary (IOR, mode, a_nc, bc);
2905 /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C)) */
2906 else if (GET_CODE (op0) == AND
2907 && GET_CODE (XEXP (op0, 0)) == XOR
2908 && CONST_INT_P (XEXP (op0, 1))
2909 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), trueop1))
2911 rtx a = XEXP (XEXP (op0, 0), 0);
2912 rtx b = trueop1;
2913 rtx c = XEXP (op0, 1);
2914 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
2915 rtx b_nc = simplify_gen_binary (AND, mode, b, nc);
2916 rtx ac = simplify_gen_binary (AND, mode, a, c);
2917 return simplify_gen_binary (IOR, mode, ac, b_nc);
2920 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2921 comparison if STORE_FLAG_VALUE is 1. */
2922 if (STORE_FLAG_VALUE == 1
2923 && trueop1 == const1_rtx
2924 && COMPARISON_P (op0)
2925 && (reversed = reversed_comparison (op0, mode)))
2926 return reversed;
2928 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2929 is (lt foo (const_int 0)), so we can perform the above
2930 simplification if STORE_FLAG_VALUE is 1. */
2932 if (STORE_FLAG_VALUE == 1
2933 && trueop1 == const1_rtx
2934 && GET_CODE (op0) == LSHIFTRT
2935 && CONST_INT_P (XEXP (op0, 1))
2936 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2937 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2939 /* (xor (comparison foo bar) (const_int sign-bit))
2940 when STORE_FLAG_VALUE is the sign bit. */
2941 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2942 && trueop1 == const_true_rtx
2943 && COMPARISON_P (op0)
2944 && (reversed = reversed_comparison (op0, mode)))
2945 return reversed;
2947 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2948 if (tem)
2949 return tem;
2951 tem = simplify_associative_operation (code, mode, op0, op1);
2952 if (tem)
2953 return tem;
2954 break;
2956 case AND:
2957 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2958 return trueop1;
2959 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2960 return op0;
2961 if (HWI_COMPUTABLE_MODE_P (mode))
2963 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2964 HOST_WIDE_INT nzop1;
2965 if (CONST_INT_P (trueop1))
2967 HOST_WIDE_INT val1 = INTVAL (trueop1);
2968 /* If we are turning off bits already known off in OP0, we need
2969 not do an AND. */
2970 if ((nzop0 & ~val1) == 0)
2971 return op0;
2973 nzop1 = nonzero_bits (trueop1, mode);
2974 /* If we are clearing all the nonzero bits, the result is zero. */
2975 if ((nzop1 & nzop0) == 0
2976 && !side_effects_p (op0) && !side_effects_p (op1))
2977 return CONST0_RTX (mode);
2979 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2980 && GET_MODE_CLASS (mode) != MODE_CC)
2981 return op0;
2982 /* A & (~A) -> 0 */
2983 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2984 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2985 && ! side_effects_p (op0)
2986 && GET_MODE_CLASS (mode) != MODE_CC)
2987 return CONST0_RTX (mode);
2989 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2990 there are no nonzero bits of C outside of X's mode. */
2991 if ((GET_CODE (op0) == SIGN_EXTEND
2992 || GET_CODE (op0) == ZERO_EXTEND)
2993 && CONST_INT_P (trueop1)
2994 && HWI_COMPUTABLE_MODE_P (mode)
2995 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2996 & UINTVAL (trueop1)) == 0)
2998 machine_mode imode = GET_MODE (XEXP (op0, 0));
2999 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
3000 gen_int_mode (INTVAL (trueop1),
3001 imode));
3002 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
3005 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3006 we might be able to further simplify the AND with X and potentially
3007 remove the truncation altogether. */
3008 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
3010 rtx x = XEXP (op0, 0);
3011 machine_mode xmode = GET_MODE (x);
3012 tem = simplify_gen_binary (AND, xmode, x,
3013 gen_int_mode (INTVAL (trueop1), xmode));
3014 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
3017 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3018 if (GET_CODE (op0) == IOR
3019 && CONST_INT_P (trueop1)
3020 && CONST_INT_P (XEXP (op0, 1)))
3022 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
3023 return simplify_gen_binary (IOR, mode,
3024 simplify_gen_binary (AND, mode,
3025 XEXP (op0, 0), op1),
3026 gen_int_mode (tmp, mode));
3029 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3030 insn (and may simplify more). */
3031 if (GET_CODE (op0) == XOR
3032 && rtx_equal_p (XEXP (op0, 0), op1)
3033 && ! side_effects_p (op1))
3034 return simplify_gen_binary (AND, mode,
3035 simplify_gen_unary (NOT, mode,
3036 XEXP (op0, 1), mode),
3037 op1);
3039 if (GET_CODE (op0) == XOR
3040 && rtx_equal_p (XEXP (op0, 1), op1)
3041 && ! side_effects_p (op1))
3042 return simplify_gen_binary (AND, mode,
3043 simplify_gen_unary (NOT, mode,
3044 XEXP (op0, 0), mode),
3045 op1);
3047 /* Similarly for (~(A ^ B)) & A. */
3048 if (GET_CODE (op0) == NOT
3049 && GET_CODE (XEXP (op0, 0)) == XOR
3050 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3051 && ! side_effects_p (op1))
3052 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3054 if (GET_CODE (op0) == NOT
3055 && GET_CODE (XEXP (op0, 0)) == XOR
3056 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3057 && ! side_effects_p (op1))
3058 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3060 /* Convert (A | B) & A to A. */
3061 if (GET_CODE (op0) == IOR
3062 && (rtx_equal_p (XEXP (op0, 0), op1)
3063 || rtx_equal_p (XEXP (op0, 1), op1))
3064 && ! side_effects_p (XEXP (op0, 0))
3065 && ! side_effects_p (XEXP (op0, 1)))
3066 return op1;
3068 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3069 ((A & N) + B) & M -> (A + B) & M
3070 Similarly if (N & M) == 0,
3071 ((A | N) + B) & M -> (A + B) & M
3072 and for - instead of + and/or ^ instead of |.
3073 Also, if (N & M) == 0, then
3074 (A +- N) & M -> A & M. */
3075 if (CONST_INT_P (trueop1)
3076 && HWI_COMPUTABLE_MODE_P (mode)
3077 && ~UINTVAL (trueop1)
3078 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3079 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3081 rtx pmop[2];
3082 int which;
3084 pmop[0] = XEXP (op0, 0);
3085 pmop[1] = XEXP (op0, 1);
3087 if (CONST_INT_P (pmop[1])
3088 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3089 return simplify_gen_binary (AND, mode, pmop[0], op1);
3091 for (which = 0; which < 2; which++)
3093 tem = pmop[which];
3094 switch (GET_CODE (tem))
3096 case AND:
3097 if (CONST_INT_P (XEXP (tem, 1))
3098 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3099 == UINTVAL (trueop1))
3100 pmop[which] = XEXP (tem, 0);
3101 break;
3102 case IOR:
3103 case XOR:
3104 if (CONST_INT_P (XEXP (tem, 1))
3105 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3106 pmop[which] = XEXP (tem, 0);
3107 break;
3108 default:
3109 break;
3113 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3115 tem = simplify_gen_binary (GET_CODE (op0), mode,
3116 pmop[0], pmop[1]);
3117 return simplify_gen_binary (code, mode, tem, op1);
3121 /* (and X (ior (not X) Y) -> (and X Y) */
3122 if (GET_CODE (op1) == IOR
3123 && GET_CODE (XEXP (op1, 0)) == NOT
3124 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
3125 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3127 /* (and (ior (not X) Y) X) -> (and X Y) */
3128 if (GET_CODE (op0) == IOR
3129 && GET_CODE (XEXP (op0, 0)) == NOT
3130 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
3131 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3133 /* (and X (ior Y (not X)) -> (and X Y) */
3134 if (GET_CODE (op1) == IOR
3135 && GET_CODE (XEXP (op1, 1)) == NOT
3136 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3137 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3139 /* (and (ior Y (not X)) X) -> (and X Y) */
3140 if (GET_CODE (op0) == IOR
3141 && GET_CODE (XEXP (op0, 1)) == NOT
3142 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3143 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3145 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3146 if (tem)
3147 return tem;
3149 tem = simplify_associative_operation (code, mode, op0, op1);
3150 if (tem)
3151 return tem;
3152 break;
3154 case UDIV:
3155 /* 0/x is 0 (or x&0 if x has side-effects). */
3156 if (trueop0 == CONST0_RTX (mode))
3158 if (side_effects_p (op1))
3159 return simplify_gen_binary (AND, mode, op1, trueop0);
3160 return trueop0;
3162 /* x/1 is x. */
3163 if (trueop1 == CONST1_RTX (mode))
3165 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3166 if (tem)
3167 return tem;
3169 /* Convert divide by power of two into shift. */
3170 if (CONST_INT_P (trueop1)
3171 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3172 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3173 break;
3175 case DIV:
3176 /* Handle floating point and integers separately. */
3177 if (SCALAR_FLOAT_MODE_P (mode))
3179 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3180 safe for modes with NaNs, since 0.0 / 0.0 will then be
3181 NaN rather than 0.0. Nor is it safe for modes with signed
3182 zeros, since dividing 0 by a negative number gives -0.0 */
3183 if (trueop0 == CONST0_RTX (mode)
3184 && !HONOR_NANS (mode)
3185 && !HONOR_SIGNED_ZEROS (mode)
3186 && ! side_effects_p (op1))
3187 return op0;
3188 /* x/1.0 is x. */
3189 if (trueop1 == CONST1_RTX (mode)
3190 && !HONOR_SNANS (mode))
3191 return op0;
3193 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3194 && trueop1 != CONST0_RTX (mode))
3196 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
3198 /* x/-1.0 is -x. */
3199 if (real_equal (d1, &dconstm1)
3200 && !HONOR_SNANS (mode))
3201 return simplify_gen_unary (NEG, mode, op0, mode);
3203 /* Change FP division by a constant into multiplication.
3204 Only do this with -freciprocal-math. */
3205 if (flag_reciprocal_math
3206 && !real_equal (d1, &dconst0))
3208 REAL_VALUE_TYPE d;
3209 real_arithmetic (&d, RDIV_EXPR, &dconst1, d1);
3210 tem = const_double_from_real_value (d, mode);
3211 return simplify_gen_binary (MULT, mode, op0, tem);
3215 else if (SCALAR_INT_MODE_P (mode))
3217 /* 0/x is 0 (or x&0 if x has side-effects). */
3218 if (trueop0 == CONST0_RTX (mode)
3219 && !cfun->can_throw_non_call_exceptions)
3221 if (side_effects_p (op1))
3222 return simplify_gen_binary (AND, mode, op1, trueop0);
3223 return trueop0;
3225 /* x/1 is x. */
3226 if (trueop1 == CONST1_RTX (mode))
3228 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3229 if (tem)
3230 return tem;
3232 /* x/-1 is -x. */
3233 if (trueop1 == constm1_rtx)
3235 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3236 if (x)
3237 return simplify_gen_unary (NEG, mode, x, mode);
3240 break;
3242 case UMOD:
3243 /* 0%x is 0 (or x&0 if x has side-effects). */
3244 if (trueop0 == CONST0_RTX (mode))
3246 if (side_effects_p (op1))
3247 return simplify_gen_binary (AND, mode, op1, trueop0);
3248 return trueop0;
3250 /* x%1 is 0 (of x&0 if x has side-effects). */
3251 if (trueop1 == CONST1_RTX (mode))
3253 if (side_effects_p (op0))
3254 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3255 return CONST0_RTX (mode);
3257 /* Implement modulus by power of two as AND. */
3258 if (CONST_INT_P (trueop1)
3259 && exact_log2 (UINTVAL (trueop1)) > 0)
3260 return simplify_gen_binary (AND, mode, op0,
3261 gen_int_mode (INTVAL (op1) - 1, mode));
3262 break;
3264 case MOD:
3265 /* 0%x is 0 (or x&0 if x has side-effects). */
3266 if (trueop0 == CONST0_RTX (mode))
3268 if (side_effects_p (op1))
3269 return simplify_gen_binary (AND, mode, op1, trueop0);
3270 return trueop0;
3272 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3273 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3275 if (side_effects_p (op0))
3276 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3277 return CONST0_RTX (mode);
3279 break;
3281 case ROTATERT:
3282 case ROTATE:
3283 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3284 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3285 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3286 amount instead. */
3287 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3288 if (CONST_INT_P (trueop1)
3289 && IN_RANGE (INTVAL (trueop1),
3290 GET_MODE_PRECISION (mode) / 2 + (code == ROTATE),
3291 GET_MODE_PRECISION (mode) - 1))
3292 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3293 mode, op0, GEN_INT (GET_MODE_PRECISION (mode)
3294 - INTVAL (trueop1)));
3295 #endif
3296 /* FALLTHRU */
3297 case ASHIFTRT:
3298 if (trueop1 == CONST0_RTX (mode))
3299 return op0;
3300 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3301 return op0;
3302 /* Rotating ~0 always results in ~0. */
3303 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3304 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3305 && ! side_effects_p (op1))
3306 return op0;
3307 /* Given:
3308 scalar modes M1, M2
3309 scalar constants c1, c2
3310 size (M2) > size (M1)
3311 c1 == size (M2) - size (M1)
3312 optimize:
3313 (ashiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3314 <low_part>)
3315 (const_int <c2>))
3317 (subreg:M1 (ashiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3318 <low_part>). */
3319 if (code == ASHIFTRT
3320 && !VECTOR_MODE_P (mode)
3321 && SUBREG_P (op0)
3322 && CONST_INT_P (op1)
3323 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3324 && !VECTOR_MODE_P (GET_MODE (SUBREG_REG (op0)))
3325 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3326 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3327 > GET_MODE_BITSIZE (mode))
3328 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3329 == (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3330 - GET_MODE_BITSIZE (mode)))
3331 && subreg_lowpart_p (op0))
3333 rtx tmp = GEN_INT (INTVAL (XEXP (SUBREG_REG (op0), 1))
3334 + INTVAL (op1));
3335 machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
3336 tmp = simplify_gen_binary (ASHIFTRT,
3337 GET_MODE (SUBREG_REG (op0)),
3338 XEXP (SUBREG_REG (op0), 0),
3339 tmp);
3340 return lowpart_subreg (mode, tmp, inner_mode);
3342 canonicalize_shift:
3343 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3345 val = INTVAL (op1) & (GET_MODE_PRECISION (mode) - 1);
3346 if (val != INTVAL (op1))
3347 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3349 break;
3351 case ASHIFT:
3352 case SS_ASHIFT:
3353 case US_ASHIFT:
3354 if (trueop1 == CONST0_RTX (mode))
3355 return op0;
3356 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3357 return op0;
3358 goto canonicalize_shift;
3360 case LSHIFTRT:
3361 if (trueop1 == CONST0_RTX (mode))
3362 return op0;
3363 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3364 return op0;
3365 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3366 if (GET_CODE (op0) == CLZ
3367 && CONST_INT_P (trueop1)
3368 && STORE_FLAG_VALUE == 1
3369 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3371 machine_mode imode = GET_MODE (XEXP (op0, 0));
3372 unsigned HOST_WIDE_INT zero_val = 0;
3374 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3375 && zero_val == GET_MODE_PRECISION (imode)
3376 && INTVAL (trueop1) == exact_log2 (zero_val))
3377 return simplify_gen_relational (EQ, mode, imode,
3378 XEXP (op0, 0), const0_rtx);
3380 goto canonicalize_shift;
3382 case SMIN:
3383 if (width <= HOST_BITS_PER_WIDE_INT
3384 && mode_signbit_p (mode, trueop1)
3385 && ! side_effects_p (op0))
3386 return op1;
3387 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3388 return op0;
3389 tem = simplify_associative_operation (code, mode, op0, op1);
3390 if (tem)
3391 return tem;
3392 break;
3394 case SMAX:
3395 if (width <= HOST_BITS_PER_WIDE_INT
3396 && CONST_INT_P (trueop1)
3397 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3398 && ! side_effects_p (op0))
3399 return op1;
3400 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3401 return op0;
3402 tem = simplify_associative_operation (code, mode, op0, op1);
3403 if (tem)
3404 return tem;
3405 break;
3407 case UMIN:
3408 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3409 return op1;
3410 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3411 return op0;
3412 tem = simplify_associative_operation (code, mode, op0, op1);
3413 if (tem)
3414 return tem;
3415 break;
3417 case UMAX:
3418 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3419 return op1;
3420 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3421 return op0;
3422 tem = simplify_associative_operation (code, mode, op0, op1);
3423 if (tem)
3424 return tem;
3425 break;
3427 case SS_PLUS:
3428 case US_PLUS:
3429 case SS_MINUS:
3430 case US_MINUS:
3431 case SS_MULT:
3432 case US_MULT:
3433 case SS_DIV:
3434 case US_DIV:
3435 /* ??? There are simplifications that can be done. */
3436 return 0;
3438 case VEC_SELECT:
3439 if (!VECTOR_MODE_P (mode))
3441 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3442 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3443 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3444 gcc_assert (XVECLEN (trueop1, 0) == 1);
3445 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3447 if (GET_CODE (trueop0) == CONST_VECTOR)
3448 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3449 (trueop1, 0, 0)));
3451 /* Extract a scalar element from a nested VEC_SELECT expression
3452 (with optional nested VEC_CONCAT expression). Some targets
3453 (i386) extract scalar element from a vector using chain of
3454 nested VEC_SELECT expressions. When input operand is a memory
3455 operand, this operation can be simplified to a simple scalar
3456 load from an offseted memory address. */
3457 if (GET_CODE (trueop0) == VEC_SELECT)
3459 rtx op0 = XEXP (trueop0, 0);
3460 rtx op1 = XEXP (trueop0, 1);
3462 machine_mode opmode = GET_MODE (op0);
3463 int elt_size = GET_MODE_UNIT_SIZE (opmode);
3464 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3466 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3467 int elem;
3469 rtvec vec;
3470 rtx tmp_op, tmp;
3472 gcc_assert (GET_CODE (op1) == PARALLEL);
3473 gcc_assert (i < n_elts);
3475 /* Select element, pointed by nested selector. */
3476 elem = INTVAL (XVECEXP (op1, 0, i));
3478 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3479 if (GET_CODE (op0) == VEC_CONCAT)
3481 rtx op00 = XEXP (op0, 0);
3482 rtx op01 = XEXP (op0, 1);
3484 machine_mode mode00, mode01;
3485 int n_elts00, n_elts01;
3487 mode00 = GET_MODE (op00);
3488 mode01 = GET_MODE (op01);
3490 /* Find out number of elements of each operand. */
3491 if (VECTOR_MODE_P (mode00))
3493 elt_size = GET_MODE_UNIT_SIZE (mode00);
3494 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3496 else
3497 n_elts00 = 1;
3499 if (VECTOR_MODE_P (mode01))
3501 elt_size = GET_MODE_UNIT_SIZE (mode01);
3502 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3504 else
3505 n_elts01 = 1;
3507 gcc_assert (n_elts == n_elts00 + n_elts01);
3509 /* Select correct operand of VEC_CONCAT
3510 and adjust selector. */
3511 if (elem < n_elts01)
3512 tmp_op = op00;
3513 else
3515 tmp_op = op01;
3516 elem -= n_elts00;
3519 else
3520 tmp_op = op0;
3522 vec = rtvec_alloc (1);
3523 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3525 tmp = gen_rtx_fmt_ee (code, mode,
3526 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3527 return tmp;
3529 if (GET_CODE (trueop0) == VEC_DUPLICATE
3530 && GET_MODE (XEXP (trueop0, 0)) == mode)
3531 return XEXP (trueop0, 0);
3533 else
3535 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3536 gcc_assert (GET_MODE_INNER (mode)
3537 == GET_MODE_INNER (GET_MODE (trueop0)));
3538 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3540 if (GET_CODE (trueop0) == CONST_VECTOR)
3542 int elt_size = GET_MODE_UNIT_SIZE (mode);
3543 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3544 rtvec v = rtvec_alloc (n_elts);
3545 unsigned int i;
3547 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3548 for (i = 0; i < n_elts; i++)
3550 rtx x = XVECEXP (trueop1, 0, i);
3552 gcc_assert (CONST_INT_P (x));
3553 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3554 INTVAL (x));
3557 return gen_rtx_CONST_VECTOR (mode, v);
3560 /* Recognize the identity. */
3561 if (GET_MODE (trueop0) == mode)
3563 bool maybe_ident = true;
3564 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3566 rtx j = XVECEXP (trueop1, 0, i);
3567 if (!CONST_INT_P (j) || INTVAL (j) != i)
3569 maybe_ident = false;
3570 break;
3573 if (maybe_ident)
3574 return trueop0;
3577 /* If we build {a,b} then permute it, build the result directly. */
3578 if (XVECLEN (trueop1, 0) == 2
3579 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3580 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3581 && GET_CODE (trueop0) == VEC_CONCAT
3582 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3583 && GET_MODE (XEXP (trueop0, 0)) == mode
3584 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3585 && GET_MODE (XEXP (trueop0, 1)) == mode)
3587 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3588 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3589 rtx subop0, subop1;
3591 gcc_assert (i0 < 4 && i1 < 4);
3592 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3593 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3595 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3598 if (XVECLEN (trueop1, 0) == 2
3599 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3600 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3601 && GET_CODE (trueop0) == VEC_CONCAT
3602 && GET_MODE (trueop0) == mode)
3604 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3605 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3606 rtx subop0, subop1;
3608 gcc_assert (i0 < 2 && i1 < 2);
3609 subop0 = XEXP (trueop0, i0);
3610 subop1 = XEXP (trueop0, i1);
3612 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3615 /* If we select one half of a vec_concat, return that. */
3616 if (GET_CODE (trueop0) == VEC_CONCAT
3617 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3619 rtx subop0 = XEXP (trueop0, 0);
3620 rtx subop1 = XEXP (trueop0, 1);
3621 machine_mode mode0 = GET_MODE (subop0);
3622 machine_mode mode1 = GET_MODE (subop1);
3623 int li = GET_MODE_UNIT_SIZE (mode0);
3624 int l0 = GET_MODE_SIZE (mode0) / li;
3625 int l1 = GET_MODE_SIZE (mode1) / li;
3626 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3627 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3629 bool success = true;
3630 for (int i = 1; i < l0; ++i)
3632 rtx j = XVECEXP (trueop1, 0, i);
3633 if (!CONST_INT_P (j) || INTVAL (j) != i)
3635 success = false;
3636 break;
3639 if (success)
3640 return subop0;
3642 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3644 bool success = true;
3645 for (int i = 1; i < l1; ++i)
3647 rtx j = XVECEXP (trueop1, 0, i);
3648 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3650 success = false;
3651 break;
3654 if (success)
3655 return subop1;
3660 if (XVECLEN (trueop1, 0) == 1
3661 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3662 && GET_CODE (trueop0) == VEC_CONCAT)
3664 rtx vec = trueop0;
3665 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3667 /* Try to find the element in the VEC_CONCAT. */
3668 while (GET_MODE (vec) != mode
3669 && GET_CODE (vec) == VEC_CONCAT)
3671 HOST_WIDE_INT vec_size;
3673 if (CONST_INT_P (XEXP (vec, 0)))
3675 /* vec_concat of two const_ints doesn't make sense with
3676 respect to modes. */
3677 if (CONST_INT_P (XEXP (vec, 1)))
3678 return 0;
3680 vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
3681 - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
3683 else
3684 vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3686 if (offset < vec_size)
3687 vec = XEXP (vec, 0);
3688 else
3690 offset -= vec_size;
3691 vec = XEXP (vec, 1);
3693 vec = avoid_constant_pool_reference (vec);
3696 if (GET_MODE (vec) == mode)
3697 return vec;
3700 /* If we select elements in a vec_merge that all come from the same
3701 operand, select from that operand directly. */
3702 if (GET_CODE (op0) == VEC_MERGE)
3704 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3705 if (CONST_INT_P (trueop02))
3707 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3708 bool all_operand0 = true;
3709 bool all_operand1 = true;
3710 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3712 rtx j = XVECEXP (trueop1, 0, i);
3713 if (sel & (HOST_WIDE_INT_1U << UINTVAL (j)))
3714 all_operand1 = false;
3715 else
3716 all_operand0 = false;
3718 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3719 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3720 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3721 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3725 /* If we have two nested selects that are inverses of each
3726 other, replace them with the source operand. */
3727 if (GET_CODE (trueop0) == VEC_SELECT
3728 && GET_MODE (XEXP (trueop0, 0)) == mode)
3730 rtx op0_subop1 = XEXP (trueop0, 1);
3731 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3732 gcc_assert (XVECLEN (trueop1, 0) == GET_MODE_NUNITS (mode));
3734 /* Apply the outer ordering vector to the inner one. (The inner
3735 ordering vector is expressly permitted to be of a different
3736 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3737 then the two VEC_SELECTs cancel. */
3738 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3740 rtx x = XVECEXP (trueop1, 0, i);
3741 if (!CONST_INT_P (x))
3742 return 0;
3743 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3744 if (!CONST_INT_P (y) || i != INTVAL (y))
3745 return 0;
3747 return XEXP (trueop0, 0);
3750 return 0;
3751 case VEC_CONCAT:
3753 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3754 ? GET_MODE (trueop0)
3755 : GET_MODE_INNER (mode));
3756 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3757 ? GET_MODE (trueop1)
3758 : GET_MODE_INNER (mode));
3760 gcc_assert (VECTOR_MODE_P (mode));
3761 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3762 == GET_MODE_SIZE (mode));
3764 if (VECTOR_MODE_P (op0_mode))
3765 gcc_assert (GET_MODE_INNER (mode)
3766 == GET_MODE_INNER (op0_mode));
3767 else
3768 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3770 if (VECTOR_MODE_P (op1_mode))
3771 gcc_assert (GET_MODE_INNER (mode)
3772 == GET_MODE_INNER (op1_mode));
3773 else
3774 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3776 if ((GET_CODE (trueop0) == CONST_VECTOR
3777 || CONST_SCALAR_INT_P (trueop0)
3778 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3779 && (GET_CODE (trueop1) == CONST_VECTOR
3780 || CONST_SCALAR_INT_P (trueop1)
3781 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3783 int elt_size = GET_MODE_UNIT_SIZE (mode);
3784 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3785 rtvec v = rtvec_alloc (n_elts);
3786 unsigned int i;
3787 unsigned in_n_elts = 1;
3789 if (VECTOR_MODE_P (op0_mode))
3790 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3791 for (i = 0; i < n_elts; i++)
3793 if (i < in_n_elts)
3795 if (!VECTOR_MODE_P (op0_mode))
3796 RTVEC_ELT (v, i) = trueop0;
3797 else
3798 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3800 else
3802 if (!VECTOR_MODE_P (op1_mode))
3803 RTVEC_ELT (v, i) = trueop1;
3804 else
3805 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3806 i - in_n_elts);
3810 return gen_rtx_CONST_VECTOR (mode, v);
3813 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3814 Restrict the transformation to avoid generating a VEC_SELECT with a
3815 mode unrelated to its operand. */
3816 if (GET_CODE (trueop0) == VEC_SELECT
3817 && GET_CODE (trueop1) == VEC_SELECT
3818 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3819 && GET_MODE (XEXP (trueop0, 0)) == mode)
3821 rtx par0 = XEXP (trueop0, 1);
3822 rtx par1 = XEXP (trueop1, 1);
3823 int len0 = XVECLEN (par0, 0);
3824 int len1 = XVECLEN (par1, 0);
3825 rtvec vec = rtvec_alloc (len0 + len1);
3826 for (int i = 0; i < len0; i++)
3827 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3828 for (int i = 0; i < len1; i++)
3829 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3830 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3831 gen_rtx_PARALLEL (VOIDmode, vec));
3834 return 0;
3836 default:
3837 gcc_unreachable ();
3840 return 0;
3844 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
3845 rtx op0, rtx op1)
3847 unsigned int width = GET_MODE_PRECISION (mode);
3849 if (VECTOR_MODE_P (mode)
3850 && code != VEC_CONCAT
3851 && GET_CODE (op0) == CONST_VECTOR
3852 && GET_CODE (op1) == CONST_VECTOR)
3854 unsigned n_elts = GET_MODE_NUNITS (mode);
3855 machine_mode op0mode = GET_MODE (op0);
3856 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3857 machine_mode op1mode = GET_MODE (op1);
3858 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3859 rtvec v = rtvec_alloc (n_elts);
3860 unsigned int i;
3862 gcc_assert (op0_n_elts == n_elts);
3863 gcc_assert (op1_n_elts == n_elts);
3864 for (i = 0; i < n_elts; i++)
3866 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3867 CONST_VECTOR_ELT (op0, i),
3868 CONST_VECTOR_ELT (op1, i));
3869 if (!x)
3870 return 0;
3871 RTVEC_ELT (v, i) = x;
3874 return gen_rtx_CONST_VECTOR (mode, v);
3877 if (VECTOR_MODE_P (mode)
3878 && code == VEC_CONCAT
3879 && (CONST_SCALAR_INT_P (op0)
3880 || GET_CODE (op0) == CONST_FIXED
3881 || CONST_DOUBLE_AS_FLOAT_P (op0))
3882 && (CONST_SCALAR_INT_P (op1)
3883 || CONST_DOUBLE_AS_FLOAT_P (op1)
3884 || GET_CODE (op1) == CONST_FIXED))
3886 unsigned n_elts = GET_MODE_NUNITS (mode);
3887 rtvec v = rtvec_alloc (n_elts);
3889 gcc_assert (n_elts >= 2);
3890 if (n_elts == 2)
3892 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3893 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3895 RTVEC_ELT (v, 0) = op0;
3896 RTVEC_ELT (v, 1) = op1;
3898 else
3900 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3901 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3902 unsigned i;
3904 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3905 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3906 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3908 for (i = 0; i < op0_n_elts; ++i)
3909 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3910 for (i = 0; i < op1_n_elts; ++i)
3911 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3914 return gen_rtx_CONST_VECTOR (mode, v);
3917 if (SCALAR_FLOAT_MODE_P (mode)
3918 && CONST_DOUBLE_AS_FLOAT_P (op0)
3919 && CONST_DOUBLE_AS_FLOAT_P (op1)
3920 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3922 if (code == AND
3923 || code == IOR
3924 || code == XOR)
3926 long tmp0[4];
3927 long tmp1[4];
3928 REAL_VALUE_TYPE r;
3929 int i;
3931 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3932 GET_MODE (op0));
3933 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3934 GET_MODE (op1));
3935 for (i = 0; i < 4; i++)
3937 switch (code)
3939 case AND:
3940 tmp0[i] &= tmp1[i];
3941 break;
3942 case IOR:
3943 tmp0[i] |= tmp1[i];
3944 break;
3945 case XOR:
3946 tmp0[i] ^= tmp1[i];
3947 break;
3948 default:
3949 gcc_unreachable ();
3952 real_from_target (&r, tmp0, mode);
3953 return const_double_from_real_value (r, mode);
3955 else
3957 REAL_VALUE_TYPE f0, f1, value, result;
3958 const REAL_VALUE_TYPE *opr0, *opr1;
3959 bool inexact;
3961 opr0 = CONST_DOUBLE_REAL_VALUE (op0);
3962 opr1 = CONST_DOUBLE_REAL_VALUE (op1);
3964 if (HONOR_SNANS (mode)
3965 && (REAL_VALUE_ISSIGNALING_NAN (*opr0)
3966 || REAL_VALUE_ISSIGNALING_NAN (*opr1)))
3967 return 0;
3969 real_convert (&f0, mode, opr0);
3970 real_convert (&f1, mode, opr1);
3972 if (code == DIV
3973 && real_equal (&f1, &dconst0)
3974 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3975 return 0;
3977 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3978 && flag_trapping_math
3979 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3981 int s0 = REAL_VALUE_NEGATIVE (f0);
3982 int s1 = REAL_VALUE_NEGATIVE (f1);
3984 switch (code)
3986 case PLUS:
3987 /* Inf + -Inf = NaN plus exception. */
3988 if (s0 != s1)
3989 return 0;
3990 break;
3991 case MINUS:
3992 /* Inf - Inf = NaN plus exception. */
3993 if (s0 == s1)
3994 return 0;
3995 break;
3996 case DIV:
3997 /* Inf / Inf = NaN plus exception. */
3998 return 0;
3999 default:
4000 break;
4004 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4005 && flag_trapping_math
4006 && ((REAL_VALUE_ISINF (f0) && real_equal (&f1, &dconst0))
4007 || (REAL_VALUE_ISINF (f1)
4008 && real_equal (&f0, &dconst0))))
4009 /* Inf * 0 = NaN plus exception. */
4010 return 0;
4012 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
4013 &f0, &f1);
4014 real_convert (&result, mode, &value);
4016 /* Don't constant fold this floating point operation if
4017 the result has overflowed and flag_trapping_math. */
4019 if (flag_trapping_math
4020 && MODE_HAS_INFINITIES (mode)
4021 && REAL_VALUE_ISINF (result)
4022 && !REAL_VALUE_ISINF (f0)
4023 && !REAL_VALUE_ISINF (f1))
4024 /* Overflow plus exception. */
4025 return 0;
4027 /* Don't constant fold this floating point operation if the
4028 result may dependent upon the run-time rounding mode and
4029 flag_rounding_math is set, or if GCC's software emulation
4030 is unable to accurately represent the result. */
4032 if ((flag_rounding_math
4033 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
4034 && (inexact || !real_identical (&result, &value)))
4035 return NULL_RTX;
4037 return const_double_from_real_value (result, mode);
4041 /* We can fold some multi-word operations. */
4042 if ((GET_MODE_CLASS (mode) == MODE_INT
4043 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
4044 && CONST_SCALAR_INT_P (op0)
4045 && CONST_SCALAR_INT_P (op1))
4047 wide_int result;
4048 bool overflow;
4049 rtx_mode_t pop0 = rtx_mode_t (op0, mode);
4050 rtx_mode_t pop1 = rtx_mode_t (op1, mode);
4052 #if TARGET_SUPPORTS_WIDE_INT == 0
4053 /* This assert keeps the simplification from producing a result
4054 that cannot be represented in a CONST_DOUBLE but a lot of
4055 upstream callers expect that this function never fails to
4056 simplify something and so you if you added this to the test
4057 above the code would die later anyway. If this assert
4058 happens, you just need to make the port support wide int. */
4059 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
4060 #endif
4061 switch (code)
4063 case MINUS:
4064 result = wi::sub (pop0, pop1);
4065 break;
4067 case PLUS:
4068 result = wi::add (pop0, pop1);
4069 break;
4071 case MULT:
4072 result = wi::mul (pop0, pop1);
4073 break;
4075 case DIV:
4076 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
4077 if (overflow)
4078 return NULL_RTX;
4079 break;
4081 case MOD:
4082 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
4083 if (overflow)
4084 return NULL_RTX;
4085 break;
4087 case UDIV:
4088 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
4089 if (overflow)
4090 return NULL_RTX;
4091 break;
4093 case UMOD:
4094 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
4095 if (overflow)
4096 return NULL_RTX;
4097 break;
4099 case AND:
4100 result = wi::bit_and (pop0, pop1);
4101 break;
4103 case IOR:
4104 result = wi::bit_or (pop0, pop1);
4105 break;
4107 case XOR:
4108 result = wi::bit_xor (pop0, pop1);
4109 break;
4111 case SMIN:
4112 result = wi::smin (pop0, pop1);
4113 break;
4115 case SMAX:
4116 result = wi::smax (pop0, pop1);
4117 break;
4119 case UMIN:
4120 result = wi::umin (pop0, pop1);
4121 break;
4123 case UMAX:
4124 result = wi::umax (pop0, pop1);
4125 break;
4127 case LSHIFTRT:
4128 case ASHIFTRT:
4129 case ASHIFT:
4131 wide_int wop1 = pop1;
4132 if (SHIFT_COUNT_TRUNCATED)
4133 wop1 = wi::umod_trunc (wop1, width);
4134 else if (wi::geu_p (wop1, width))
4135 return NULL_RTX;
4137 switch (code)
4139 case LSHIFTRT:
4140 result = wi::lrshift (pop0, wop1);
4141 break;
4143 case ASHIFTRT:
4144 result = wi::arshift (pop0, wop1);
4145 break;
4147 case ASHIFT:
4148 result = wi::lshift (pop0, wop1);
4149 break;
4151 default:
4152 gcc_unreachable ();
4154 break;
4156 case ROTATE:
4157 case ROTATERT:
4159 if (wi::neg_p (pop1))
4160 return NULL_RTX;
4162 switch (code)
4164 case ROTATE:
4165 result = wi::lrotate (pop0, pop1);
4166 break;
4168 case ROTATERT:
4169 result = wi::rrotate (pop0, pop1);
4170 break;
4172 default:
4173 gcc_unreachable ();
4175 break;
4177 default:
4178 return NULL_RTX;
4180 return immed_wide_int_const (result, mode);
4183 return NULL_RTX;
4188 /* Return a positive integer if X should sort after Y. The value
4189 returned is 1 if and only if X and Y are both regs. */
4191 static int
4192 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4194 int result;
4196 result = (commutative_operand_precedence (y)
4197 - commutative_operand_precedence (x));
4198 if (result)
4199 return result + result;
4201 /* Group together equal REGs to do more simplification. */
4202 if (REG_P (x) && REG_P (y))
4203 return REGNO (x) > REGNO (y);
4205 return 0;
4208 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4209 operands may be another PLUS or MINUS.
4211 Rather than test for specific case, we do this by a brute-force method
4212 and do all possible simplifications until no more changes occur. Then
4213 we rebuild the operation.
4215 May return NULL_RTX when no changes were made. */
4217 static rtx
4218 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4219 rtx op1)
4221 struct simplify_plus_minus_op_data
4223 rtx op;
4224 short neg;
4225 } ops[16];
4226 rtx result, tem;
4227 int n_ops = 2;
4228 int changed, n_constants, canonicalized = 0;
4229 int i, j;
4231 memset (ops, 0, sizeof ops);
4233 /* Set up the two operands and then expand them until nothing has been
4234 changed. If we run out of room in our array, give up; this should
4235 almost never happen. */
4237 ops[0].op = op0;
4238 ops[0].neg = 0;
4239 ops[1].op = op1;
4240 ops[1].neg = (code == MINUS);
4244 changed = 0;
4245 n_constants = 0;
4247 for (i = 0; i < n_ops; i++)
4249 rtx this_op = ops[i].op;
4250 int this_neg = ops[i].neg;
4251 enum rtx_code this_code = GET_CODE (this_op);
4253 switch (this_code)
4255 case PLUS:
4256 case MINUS:
4257 if (n_ops == ARRAY_SIZE (ops))
4258 return NULL_RTX;
4260 ops[n_ops].op = XEXP (this_op, 1);
4261 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4262 n_ops++;
4264 ops[i].op = XEXP (this_op, 0);
4265 changed = 1;
4266 /* If this operand was negated then we will potentially
4267 canonicalize the expression. Similarly if we don't
4268 place the operands adjacent we're re-ordering the
4269 expression and thus might be performing a
4270 canonicalization. Ignore register re-ordering.
4271 ??? It might be better to shuffle the ops array here,
4272 but then (plus (plus (A, B), plus (C, D))) wouldn't
4273 be seen as non-canonical. */
4274 if (this_neg
4275 || (i != n_ops - 2
4276 && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
4277 canonicalized = 1;
4278 break;
4280 case NEG:
4281 ops[i].op = XEXP (this_op, 0);
4282 ops[i].neg = ! this_neg;
4283 changed = 1;
4284 canonicalized = 1;
4285 break;
4287 case CONST:
4288 if (n_ops != ARRAY_SIZE (ops)
4289 && GET_CODE (XEXP (this_op, 0)) == PLUS
4290 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4291 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4293 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4294 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4295 ops[n_ops].neg = this_neg;
4296 n_ops++;
4297 changed = 1;
4298 canonicalized = 1;
4300 break;
4302 case NOT:
4303 /* ~a -> (-a - 1) */
4304 if (n_ops != ARRAY_SIZE (ops))
4306 ops[n_ops].op = CONSTM1_RTX (mode);
4307 ops[n_ops++].neg = this_neg;
4308 ops[i].op = XEXP (this_op, 0);
4309 ops[i].neg = !this_neg;
4310 changed = 1;
4311 canonicalized = 1;
4313 break;
4315 case CONST_INT:
4316 n_constants++;
4317 if (this_neg)
4319 ops[i].op = neg_const_int (mode, this_op);
4320 ops[i].neg = 0;
4321 changed = 1;
4322 canonicalized = 1;
4324 break;
4326 default:
4327 break;
4331 while (changed);
4333 if (n_constants > 1)
4334 canonicalized = 1;
4336 gcc_assert (n_ops >= 2);
4338 /* If we only have two operands, we can avoid the loops. */
4339 if (n_ops == 2)
4341 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4342 rtx lhs, rhs;
4344 /* Get the two operands. Be careful with the order, especially for
4345 the cases where code == MINUS. */
4346 if (ops[0].neg && ops[1].neg)
4348 lhs = gen_rtx_NEG (mode, ops[0].op);
4349 rhs = ops[1].op;
4351 else if (ops[0].neg)
4353 lhs = ops[1].op;
4354 rhs = ops[0].op;
4356 else
4358 lhs = ops[0].op;
4359 rhs = ops[1].op;
4362 return simplify_const_binary_operation (code, mode, lhs, rhs);
4365 /* Now simplify each pair of operands until nothing changes. */
4366 while (1)
4368 /* Insertion sort is good enough for a small array. */
4369 for (i = 1; i < n_ops; i++)
4371 struct simplify_plus_minus_op_data save;
4372 int cmp;
4374 j = i - 1;
4375 cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
4376 if (cmp <= 0)
4377 continue;
4378 /* Just swapping registers doesn't count as canonicalization. */
4379 if (cmp != 1)
4380 canonicalized = 1;
4382 save = ops[i];
4384 ops[j + 1] = ops[j];
4385 while (j--
4386 && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
4387 ops[j + 1] = save;
4390 changed = 0;
4391 for (i = n_ops - 1; i > 0; i--)
4392 for (j = i - 1; j >= 0; j--)
4394 rtx lhs = ops[j].op, rhs = ops[i].op;
4395 int lneg = ops[j].neg, rneg = ops[i].neg;
4397 if (lhs != 0 && rhs != 0)
4399 enum rtx_code ncode = PLUS;
4401 if (lneg != rneg)
4403 ncode = MINUS;
4404 if (lneg)
4405 std::swap (lhs, rhs);
4407 else if (swap_commutative_operands_p (lhs, rhs))
4408 std::swap (lhs, rhs);
4410 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4411 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4413 rtx tem_lhs, tem_rhs;
4415 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4416 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4417 tem = simplify_binary_operation (ncode, mode, tem_lhs,
4418 tem_rhs);
4420 if (tem && !CONSTANT_P (tem))
4421 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4423 else
4424 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4426 if (tem)
4428 /* Reject "simplifications" that just wrap the two
4429 arguments in a CONST. Failure to do so can result
4430 in infinite recursion with simplify_binary_operation
4431 when it calls us to simplify CONST operations.
4432 Also, if we find such a simplification, don't try
4433 any more combinations with this rhs: We must have
4434 something like symbol+offset, ie. one of the
4435 trivial CONST expressions we handle later. */
4436 if (GET_CODE (tem) == CONST
4437 && GET_CODE (XEXP (tem, 0)) == ncode
4438 && XEXP (XEXP (tem, 0), 0) == lhs
4439 && XEXP (XEXP (tem, 0), 1) == rhs)
4440 break;
4441 lneg &= rneg;
4442 if (GET_CODE (tem) == NEG)
4443 tem = XEXP (tem, 0), lneg = !lneg;
4444 if (CONST_INT_P (tem) && lneg)
4445 tem = neg_const_int (mode, tem), lneg = 0;
4447 ops[i].op = tem;
4448 ops[i].neg = lneg;
4449 ops[j].op = NULL_RTX;
4450 changed = 1;
4451 canonicalized = 1;
4456 if (!changed)
4457 break;
4459 /* Pack all the operands to the lower-numbered entries. */
4460 for (i = 0, j = 0; j < n_ops; j++)
4461 if (ops[j].op)
4463 ops[i] = ops[j];
4464 i++;
4466 n_ops = i;
4469 /* If nothing changed, check that rematerialization of rtl instructions
4470 is still required. */
4471 if (!canonicalized)
4473 /* Perform rematerialization if only all operands are registers and
4474 all operations are PLUS. */
4475 /* ??? Also disallow (non-global, non-frame) fixed registers to work
4476 around rs6000 and how it uses the CA register. See PR67145. */
4477 for (i = 0; i < n_ops; i++)
4478 if (ops[i].neg
4479 || !REG_P (ops[i].op)
4480 || (REGNO (ops[i].op) < FIRST_PSEUDO_REGISTER
4481 && fixed_regs[REGNO (ops[i].op)]
4482 && !global_regs[REGNO (ops[i].op)]
4483 && ops[i].op != frame_pointer_rtx
4484 && ops[i].op != arg_pointer_rtx
4485 && ops[i].op != stack_pointer_rtx))
4486 return NULL_RTX;
4487 goto gen_result;
4490 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4491 if (n_ops == 2
4492 && CONST_INT_P (ops[1].op)
4493 && CONSTANT_P (ops[0].op)
4494 && ops[0].neg)
4495 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4497 /* We suppressed creation of trivial CONST expressions in the
4498 combination loop to avoid recursion. Create one manually now.
4499 The combination loop should have ensured that there is exactly
4500 one CONST_INT, and the sort will have ensured that it is last
4501 in the array and that any other constant will be next-to-last. */
4503 if (n_ops > 1
4504 && CONST_INT_P (ops[n_ops - 1].op)
4505 && CONSTANT_P (ops[n_ops - 2].op))
4507 rtx value = ops[n_ops - 1].op;
4508 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4509 value = neg_const_int (mode, value);
4510 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4511 INTVAL (value));
4512 n_ops--;
4515 /* Put a non-negated operand first, if possible. */
4517 for (i = 0; i < n_ops && ops[i].neg; i++)
4518 continue;
4519 if (i == n_ops)
4520 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4521 else if (i != 0)
4523 tem = ops[0].op;
4524 ops[0] = ops[i];
4525 ops[i].op = tem;
4526 ops[i].neg = 1;
4529 /* Now make the result by performing the requested operations. */
4530 gen_result:
4531 result = ops[0].op;
4532 for (i = 1; i < n_ops; i++)
4533 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4534 mode, result, ops[i].op);
4536 return result;
4539 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4540 static bool
4541 plus_minus_operand_p (const_rtx x)
4543 return GET_CODE (x) == PLUS
4544 || GET_CODE (x) == MINUS
4545 || (GET_CODE (x) == CONST
4546 && GET_CODE (XEXP (x, 0)) == PLUS
4547 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4548 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4551 /* Like simplify_binary_operation except used for relational operators.
4552 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4553 not also be VOIDmode.
4555 CMP_MODE specifies in which mode the comparison is done in, so it is
4556 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4557 the operands or, if both are VOIDmode, the operands are compared in
4558 "infinite precision". */
4560 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4561 machine_mode cmp_mode, rtx op0, rtx op1)
4563 rtx tem, trueop0, trueop1;
4565 if (cmp_mode == VOIDmode)
4566 cmp_mode = GET_MODE (op0);
4567 if (cmp_mode == VOIDmode)
4568 cmp_mode = GET_MODE (op1);
4570 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4571 if (tem)
4573 if (SCALAR_FLOAT_MODE_P (mode))
4575 if (tem == const0_rtx)
4576 return CONST0_RTX (mode);
4577 #ifdef FLOAT_STORE_FLAG_VALUE
4579 REAL_VALUE_TYPE val;
4580 val = FLOAT_STORE_FLAG_VALUE (mode);
4581 return const_double_from_real_value (val, mode);
4583 #else
4584 return NULL_RTX;
4585 #endif
4587 if (VECTOR_MODE_P (mode))
4589 if (tem == const0_rtx)
4590 return CONST0_RTX (mode);
4591 #ifdef VECTOR_STORE_FLAG_VALUE
4593 int i, units;
4594 rtvec v;
4596 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4597 if (val == NULL_RTX)
4598 return NULL_RTX;
4599 if (val == const1_rtx)
4600 return CONST1_RTX (mode);
4602 units = GET_MODE_NUNITS (mode);
4603 v = rtvec_alloc (units);
4604 for (i = 0; i < units; i++)
4605 RTVEC_ELT (v, i) = val;
4606 return gen_rtx_raw_CONST_VECTOR (mode, v);
4608 #else
4609 return NULL_RTX;
4610 #endif
4613 return tem;
4616 /* For the following tests, ensure const0_rtx is op1. */
4617 if (swap_commutative_operands_p (op0, op1)
4618 || (op0 == const0_rtx && op1 != const0_rtx))
4619 std::swap (op0, op1), code = swap_condition (code);
4621 /* If op0 is a compare, extract the comparison arguments from it. */
4622 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4623 return simplify_gen_relational (code, mode, VOIDmode,
4624 XEXP (op0, 0), XEXP (op0, 1));
4626 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4627 || CC0_P (op0))
4628 return NULL_RTX;
4630 trueop0 = avoid_constant_pool_reference (op0);
4631 trueop1 = avoid_constant_pool_reference (op1);
4632 return simplify_relational_operation_1 (code, mode, cmp_mode,
4633 trueop0, trueop1);
4636 /* This part of simplify_relational_operation is only used when CMP_MODE
4637 is not in class MODE_CC (i.e. it is a real comparison).
4639 MODE is the mode of the result, while CMP_MODE specifies in which
4640 mode the comparison is done in, so it is the mode of the operands. */
4642 static rtx
4643 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4644 machine_mode cmp_mode, rtx op0, rtx op1)
4646 enum rtx_code op0code = GET_CODE (op0);
4648 if (op1 == const0_rtx && COMPARISON_P (op0))
4650 /* If op0 is a comparison, extract the comparison arguments
4651 from it. */
4652 if (code == NE)
4654 if (GET_MODE (op0) == mode)
4655 return simplify_rtx (op0);
4656 else
4657 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4658 XEXP (op0, 0), XEXP (op0, 1));
4660 else if (code == EQ)
4662 enum rtx_code new_code = reversed_comparison_code (op0, NULL);
4663 if (new_code != UNKNOWN)
4664 return simplify_gen_relational (new_code, mode, VOIDmode,
4665 XEXP (op0, 0), XEXP (op0, 1));
4669 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4670 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4671 if ((code == LTU || code == GEU)
4672 && GET_CODE (op0) == PLUS
4673 && CONST_INT_P (XEXP (op0, 1))
4674 && (rtx_equal_p (op1, XEXP (op0, 0))
4675 || rtx_equal_p (op1, XEXP (op0, 1)))
4676 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4677 && XEXP (op0, 1) != const0_rtx)
4679 rtx new_cmp
4680 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4681 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4682 cmp_mode, XEXP (op0, 0), new_cmp);
4685 /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
4686 transformed into (LTU a -C). */
4687 if (code == GTU && GET_CODE (op0) == PLUS && CONST_INT_P (op1)
4688 && CONST_INT_P (XEXP (op0, 1))
4689 && (UINTVAL (op1) == UINTVAL (XEXP (op0, 1)) - 1)
4690 && XEXP (op0, 1) != const0_rtx)
4692 rtx new_cmp
4693 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4694 return simplify_gen_relational (LTU, mode, cmp_mode,
4695 XEXP (op0, 0), new_cmp);
4698 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4699 if ((code == LTU || code == GEU)
4700 && GET_CODE (op0) == PLUS
4701 && rtx_equal_p (op1, XEXP (op0, 1))
4702 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4703 && !rtx_equal_p (op1, XEXP (op0, 0)))
4704 return simplify_gen_relational (code, mode, cmp_mode, op0,
4705 copy_rtx (XEXP (op0, 0)));
4707 if (op1 == const0_rtx)
4709 /* Canonicalize (GTU x 0) as (NE x 0). */
4710 if (code == GTU)
4711 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4712 /* Canonicalize (LEU x 0) as (EQ x 0). */
4713 if (code == LEU)
4714 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4716 else if (op1 == const1_rtx)
4718 switch (code)
4720 case GE:
4721 /* Canonicalize (GE x 1) as (GT x 0). */
4722 return simplify_gen_relational (GT, mode, cmp_mode,
4723 op0, const0_rtx);
4724 case GEU:
4725 /* Canonicalize (GEU x 1) as (NE x 0). */
4726 return simplify_gen_relational (NE, mode, cmp_mode,
4727 op0, const0_rtx);
4728 case LT:
4729 /* Canonicalize (LT x 1) as (LE x 0). */
4730 return simplify_gen_relational (LE, mode, cmp_mode,
4731 op0, const0_rtx);
4732 case LTU:
4733 /* Canonicalize (LTU x 1) as (EQ x 0). */
4734 return simplify_gen_relational (EQ, mode, cmp_mode,
4735 op0, const0_rtx);
4736 default:
4737 break;
4740 else if (op1 == constm1_rtx)
4742 /* Canonicalize (LE x -1) as (LT x 0). */
4743 if (code == LE)
4744 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4745 /* Canonicalize (GT x -1) as (GE x 0). */
4746 if (code == GT)
4747 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4750 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4751 if ((code == EQ || code == NE)
4752 && (op0code == PLUS || op0code == MINUS)
4753 && CONSTANT_P (op1)
4754 && CONSTANT_P (XEXP (op0, 1))
4755 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4757 rtx x = XEXP (op0, 0);
4758 rtx c = XEXP (op0, 1);
4759 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4760 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4762 /* Detect an infinite recursive condition, where we oscillate at this
4763 simplification case between:
4764 A + B == C <---> C - B == A,
4765 where A, B, and C are all constants with non-simplifiable expressions,
4766 usually SYMBOL_REFs. */
4767 if (GET_CODE (tem) == invcode
4768 && CONSTANT_P (x)
4769 && rtx_equal_p (c, XEXP (tem, 1)))
4770 return NULL_RTX;
4772 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4775 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4776 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4777 if (code == NE
4778 && op1 == const0_rtx
4779 && GET_MODE_CLASS (mode) == MODE_INT
4780 && cmp_mode != VOIDmode
4781 /* ??? Work-around BImode bugs in the ia64 backend. */
4782 && mode != BImode
4783 && cmp_mode != BImode
4784 && nonzero_bits (op0, cmp_mode) == 1
4785 && STORE_FLAG_VALUE == 1)
4786 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4787 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4788 : lowpart_subreg (mode, op0, cmp_mode);
4790 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4791 if ((code == EQ || code == NE)
4792 && op1 == const0_rtx
4793 && op0code == XOR)
4794 return simplify_gen_relational (code, mode, cmp_mode,
4795 XEXP (op0, 0), XEXP (op0, 1));
4797 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4798 if ((code == EQ || code == NE)
4799 && op0code == XOR
4800 && rtx_equal_p (XEXP (op0, 0), op1)
4801 && !side_effects_p (XEXP (op0, 0)))
4802 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
4803 CONST0_RTX (mode));
4805 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4806 if ((code == EQ || code == NE)
4807 && op0code == XOR
4808 && rtx_equal_p (XEXP (op0, 1), op1)
4809 && !side_effects_p (XEXP (op0, 1)))
4810 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4811 CONST0_RTX (mode));
4813 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4814 if ((code == EQ || code == NE)
4815 && op0code == XOR
4816 && CONST_SCALAR_INT_P (op1)
4817 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4818 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4819 simplify_gen_binary (XOR, cmp_mode,
4820 XEXP (op0, 1), op1));
4822 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4823 can be implemented with a BICS instruction on some targets, or
4824 constant-folded if y is a constant. */
4825 if ((code == EQ || code == NE)
4826 && op0code == AND
4827 && rtx_equal_p (XEXP (op0, 0), op1)
4828 && !side_effects_p (op1)
4829 && op1 != CONST0_RTX (cmp_mode))
4831 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1), cmp_mode);
4832 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
4834 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4835 CONST0_RTX (cmp_mode));
4838 /* Likewise for (eq/ne (and x y) y). */
4839 if ((code == EQ || code == NE)
4840 && op0code == AND
4841 && rtx_equal_p (XEXP (op0, 1), op1)
4842 && !side_effects_p (op1)
4843 && op1 != CONST0_RTX (cmp_mode))
4845 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0), cmp_mode);
4846 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
4848 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4849 CONST0_RTX (cmp_mode));
4852 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4853 if ((code == EQ || code == NE)
4854 && GET_CODE (op0) == BSWAP
4855 && CONST_SCALAR_INT_P (op1))
4856 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4857 simplify_gen_unary (BSWAP, cmp_mode,
4858 op1, cmp_mode));
4860 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4861 if ((code == EQ || code == NE)
4862 && GET_CODE (op0) == BSWAP
4863 && GET_CODE (op1) == BSWAP)
4864 return simplify_gen_relational (code, mode, cmp_mode,
4865 XEXP (op0, 0), XEXP (op1, 0));
4867 if (op0code == POPCOUNT && op1 == const0_rtx)
4868 switch (code)
4870 case EQ:
4871 case LE:
4872 case LEU:
4873 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4874 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4875 XEXP (op0, 0), const0_rtx);
4877 case NE:
4878 case GT:
4879 case GTU:
4880 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4881 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4882 XEXP (op0, 0), const0_rtx);
4884 default:
4885 break;
4888 return NULL_RTX;
4891 enum
4893 CMP_EQ = 1,
4894 CMP_LT = 2,
4895 CMP_GT = 4,
4896 CMP_LTU = 8,
4897 CMP_GTU = 16
4901 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4902 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4903 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4904 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4905 For floating-point comparisons, assume that the operands were ordered. */
4907 static rtx
4908 comparison_result (enum rtx_code code, int known_results)
4910 switch (code)
4912 case EQ:
4913 case UNEQ:
4914 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4915 case NE:
4916 case LTGT:
4917 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4919 case LT:
4920 case UNLT:
4921 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4922 case GE:
4923 case UNGE:
4924 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4926 case GT:
4927 case UNGT:
4928 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4929 case LE:
4930 case UNLE:
4931 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4933 case LTU:
4934 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4935 case GEU:
4936 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4938 case GTU:
4939 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4940 case LEU:
4941 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4943 case ORDERED:
4944 return const_true_rtx;
4945 case UNORDERED:
4946 return const0_rtx;
4947 default:
4948 gcc_unreachable ();
4952 /* Check if the given comparison (done in the given MODE) is actually
4953 a tautology or a contradiction. If the mode is VOID_mode, the
4954 comparison is done in "infinite precision". If no simplification
4955 is possible, this function returns zero. Otherwise, it returns
4956 either const_true_rtx or const0_rtx. */
4959 simplify_const_relational_operation (enum rtx_code code,
4960 machine_mode mode,
4961 rtx op0, rtx op1)
4963 rtx tem;
4964 rtx trueop0;
4965 rtx trueop1;
4967 gcc_assert (mode != VOIDmode
4968 || (GET_MODE (op0) == VOIDmode
4969 && GET_MODE (op1) == VOIDmode));
4971 /* If op0 is a compare, extract the comparison arguments from it. */
4972 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4974 op1 = XEXP (op0, 1);
4975 op0 = XEXP (op0, 0);
4977 if (GET_MODE (op0) != VOIDmode)
4978 mode = GET_MODE (op0);
4979 else if (GET_MODE (op1) != VOIDmode)
4980 mode = GET_MODE (op1);
4981 else
4982 return 0;
4985 /* We can't simplify MODE_CC values since we don't know what the
4986 actual comparison is. */
4987 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4988 return 0;
4990 /* Make sure the constant is second. */
4991 if (swap_commutative_operands_p (op0, op1))
4993 std::swap (op0, op1);
4994 code = swap_condition (code);
4997 trueop0 = avoid_constant_pool_reference (op0);
4998 trueop1 = avoid_constant_pool_reference (op1);
5000 /* For integer comparisons of A and B maybe we can simplify A - B and can
5001 then simplify a comparison of that with zero. If A and B are both either
5002 a register or a CONST_INT, this can't help; testing for these cases will
5003 prevent infinite recursion here and speed things up.
5005 We can only do this for EQ and NE comparisons as otherwise we may
5006 lose or introduce overflow which we cannot disregard as undefined as
5007 we do not know the signedness of the operation on either the left or
5008 the right hand side of the comparison. */
5010 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
5011 && (code == EQ || code == NE)
5012 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
5013 && (REG_P (op1) || CONST_INT_P (trueop1)))
5014 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
5015 /* We cannot do this if tem is a nonzero address. */
5016 && ! nonzero_address_p (tem))
5017 return simplify_const_relational_operation (signed_condition (code),
5018 mode, tem, const0_rtx);
5020 if (! HONOR_NANS (mode) && code == ORDERED)
5021 return const_true_rtx;
5023 if (! HONOR_NANS (mode) && code == UNORDERED)
5024 return const0_rtx;
5026 /* For modes without NaNs, if the two operands are equal, we know the
5027 result except if they have side-effects. Even with NaNs we know
5028 the result of unordered comparisons and, if signaling NaNs are
5029 irrelevant, also the result of LT/GT/LTGT. */
5030 if ((! HONOR_NANS (trueop0)
5031 || code == UNEQ || code == UNLE || code == UNGE
5032 || ((code == LT || code == GT || code == LTGT)
5033 && ! HONOR_SNANS (trueop0)))
5034 && rtx_equal_p (trueop0, trueop1)
5035 && ! side_effects_p (trueop0))
5036 return comparison_result (code, CMP_EQ);
5038 /* If the operands are floating-point constants, see if we can fold
5039 the result. */
5040 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
5041 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
5042 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
5044 const REAL_VALUE_TYPE *d0 = CONST_DOUBLE_REAL_VALUE (trueop0);
5045 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
5047 /* Comparisons are unordered iff at least one of the values is NaN. */
5048 if (REAL_VALUE_ISNAN (*d0) || REAL_VALUE_ISNAN (*d1))
5049 switch (code)
5051 case UNEQ:
5052 case UNLT:
5053 case UNGT:
5054 case UNLE:
5055 case UNGE:
5056 case NE:
5057 case UNORDERED:
5058 return const_true_rtx;
5059 case EQ:
5060 case LT:
5061 case GT:
5062 case LE:
5063 case GE:
5064 case LTGT:
5065 case ORDERED:
5066 return const0_rtx;
5067 default:
5068 return 0;
5071 return comparison_result (code,
5072 (real_equal (d0, d1) ? CMP_EQ :
5073 real_less (d0, d1) ? CMP_LT : CMP_GT));
5076 /* Otherwise, see if the operands are both integers. */
5077 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
5078 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
5080 /* It would be nice if we really had a mode here. However, the
5081 largest int representable on the target is as good as
5082 infinite. */
5083 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
5084 rtx_mode_t ptrueop0 = rtx_mode_t (trueop0, cmode);
5085 rtx_mode_t ptrueop1 = rtx_mode_t (trueop1, cmode);
5087 if (wi::eq_p (ptrueop0, ptrueop1))
5088 return comparison_result (code, CMP_EQ);
5089 else
5091 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
5092 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
5093 return comparison_result (code, cr);
5097 /* Optimize comparisons with upper and lower bounds. */
5098 if (HWI_COMPUTABLE_MODE_P (mode)
5099 && CONST_INT_P (trueop1)
5100 && !side_effects_p (trueop0))
5102 int sign;
5103 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
5104 HOST_WIDE_INT val = INTVAL (trueop1);
5105 HOST_WIDE_INT mmin, mmax;
5107 if (code == GEU
5108 || code == LEU
5109 || code == GTU
5110 || code == LTU)
5111 sign = 0;
5112 else
5113 sign = 1;
5115 /* Get a reduced range if the sign bit is zero. */
5116 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
5118 mmin = 0;
5119 mmax = nonzero;
5121 else
5123 rtx mmin_rtx, mmax_rtx;
5124 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
5126 mmin = INTVAL (mmin_rtx);
5127 mmax = INTVAL (mmax_rtx);
5128 if (sign)
5130 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
5132 mmin >>= (sign_copies - 1);
5133 mmax >>= (sign_copies - 1);
5137 switch (code)
5139 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5140 case GEU:
5141 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5142 return const_true_rtx;
5143 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5144 return const0_rtx;
5145 break;
5146 case GE:
5147 if (val <= mmin)
5148 return const_true_rtx;
5149 if (val > mmax)
5150 return const0_rtx;
5151 break;
5153 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5154 case LEU:
5155 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5156 return const_true_rtx;
5157 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5158 return const0_rtx;
5159 break;
5160 case LE:
5161 if (val >= mmax)
5162 return const_true_rtx;
5163 if (val < mmin)
5164 return const0_rtx;
5165 break;
5167 case EQ:
5168 /* x == y is always false for y out of range. */
5169 if (val < mmin || val > mmax)
5170 return const0_rtx;
5171 break;
5173 /* x > y is always false for y >= mmax, always true for y < mmin. */
5174 case GTU:
5175 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5176 return const0_rtx;
5177 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5178 return const_true_rtx;
5179 break;
5180 case GT:
5181 if (val >= mmax)
5182 return const0_rtx;
5183 if (val < mmin)
5184 return const_true_rtx;
5185 break;
5187 /* x < y is always false for y <= mmin, always true for y > mmax. */
5188 case LTU:
5189 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5190 return const0_rtx;
5191 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5192 return const_true_rtx;
5193 break;
5194 case LT:
5195 if (val <= mmin)
5196 return const0_rtx;
5197 if (val > mmax)
5198 return const_true_rtx;
5199 break;
5201 case NE:
5202 /* x != y is always true for y out of range. */
5203 if (val < mmin || val > mmax)
5204 return const_true_rtx;
5205 break;
5207 default:
5208 break;
5212 /* Optimize integer comparisons with zero. */
5213 if (trueop1 == const0_rtx && !side_effects_p (trueop0))
5215 /* Some addresses are known to be nonzero. We don't know
5216 their sign, but equality comparisons are known. */
5217 if (nonzero_address_p (trueop0))
5219 if (code == EQ || code == LEU)
5220 return const0_rtx;
5221 if (code == NE || code == GTU)
5222 return const_true_rtx;
5225 /* See if the first operand is an IOR with a constant. If so, we
5226 may be able to determine the result of this comparison. */
5227 if (GET_CODE (op0) == IOR)
5229 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5230 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5232 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
5233 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5234 && (UINTVAL (inner_const)
5235 & (HOST_WIDE_INT_1U
5236 << sign_bitnum)));
5238 switch (code)
5240 case EQ:
5241 case LEU:
5242 return const0_rtx;
5243 case NE:
5244 case GTU:
5245 return const_true_rtx;
5246 case LT:
5247 case LE:
5248 if (has_sign)
5249 return const_true_rtx;
5250 break;
5251 case GT:
5252 case GE:
5253 if (has_sign)
5254 return const0_rtx;
5255 break;
5256 default:
5257 break;
5263 /* Optimize comparison of ABS with zero. */
5264 if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
5265 && (GET_CODE (trueop0) == ABS
5266 || (GET_CODE (trueop0) == FLOAT_EXTEND
5267 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5269 switch (code)
5271 case LT:
5272 /* Optimize abs(x) < 0.0. */
5273 if (!HONOR_SNANS (mode)
5274 && (!INTEGRAL_MODE_P (mode)
5275 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5277 if (INTEGRAL_MODE_P (mode)
5278 && (issue_strict_overflow_warning
5279 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5280 warning (OPT_Wstrict_overflow,
5281 ("assuming signed overflow does not occur when "
5282 "assuming abs (x) < 0 is false"));
5283 return const0_rtx;
5285 break;
5287 case GE:
5288 /* Optimize abs(x) >= 0.0. */
5289 if (!HONOR_NANS (mode)
5290 && (!INTEGRAL_MODE_P (mode)
5291 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5293 if (INTEGRAL_MODE_P (mode)
5294 && (issue_strict_overflow_warning
5295 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5296 warning (OPT_Wstrict_overflow,
5297 ("assuming signed overflow does not occur when "
5298 "assuming abs (x) >= 0 is true"));
5299 return const_true_rtx;
5301 break;
5303 case UNGE:
5304 /* Optimize ! (abs(x) < 0.0). */
5305 return const_true_rtx;
5307 default:
5308 break;
5312 return 0;
5315 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
5316 where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
5317 or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
5318 can be simplified to that or NULL_RTX if not.
5319 Assume X is compared against zero with CMP_CODE and the true
5320 arm is TRUE_VAL and the false arm is FALSE_VAL. */
5322 static rtx
5323 simplify_cond_clz_ctz (rtx x, rtx_code cmp_code, rtx true_val, rtx false_val)
5325 if (cmp_code != EQ && cmp_code != NE)
5326 return NULL_RTX;
5328 /* Result on X == 0 and X !=0 respectively. */
5329 rtx on_zero, on_nonzero;
5330 if (cmp_code == EQ)
5332 on_zero = true_val;
5333 on_nonzero = false_val;
5335 else
5337 on_zero = false_val;
5338 on_nonzero = true_val;
5341 rtx_code op_code = GET_CODE (on_nonzero);
5342 if ((op_code != CLZ && op_code != CTZ)
5343 || !rtx_equal_p (XEXP (on_nonzero, 0), x)
5344 || !CONST_INT_P (on_zero))
5345 return NULL_RTX;
5347 HOST_WIDE_INT op_val;
5348 if (((op_code == CLZ
5349 && CLZ_DEFINED_VALUE_AT_ZERO (GET_MODE (on_nonzero), op_val))
5350 || (op_code == CTZ
5351 && CTZ_DEFINED_VALUE_AT_ZERO (GET_MODE (on_nonzero), op_val)))
5352 && op_val == INTVAL (on_zero))
5353 return on_nonzero;
5355 return NULL_RTX;
5359 /* Simplify CODE, an operation with result mode MODE and three operands,
5360 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5361 a constant. Return 0 if no simplifications is possible. */
5364 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5365 machine_mode op0_mode, rtx op0, rtx op1,
5366 rtx op2)
5368 unsigned int width = GET_MODE_PRECISION (mode);
5369 bool any_change = false;
5370 rtx tem, trueop2;
5372 /* VOIDmode means "infinite" precision. */
5373 if (width == 0)
5374 width = HOST_BITS_PER_WIDE_INT;
5376 switch (code)
5378 case FMA:
5379 /* Simplify negations around the multiplication. */
5380 /* -a * -b + c => a * b + c. */
5381 if (GET_CODE (op0) == NEG)
5383 tem = simplify_unary_operation (NEG, mode, op1, mode);
5384 if (tem)
5385 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5387 else if (GET_CODE (op1) == NEG)
5389 tem = simplify_unary_operation (NEG, mode, op0, mode);
5390 if (tem)
5391 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5394 /* Canonicalize the two multiplication operands. */
5395 /* a * -b + c => -b * a + c. */
5396 if (swap_commutative_operands_p (op0, op1))
5397 std::swap (op0, op1), any_change = true;
5399 if (any_change)
5400 return gen_rtx_FMA (mode, op0, op1, op2);
5401 return NULL_RTX;
5403 case SIGN_EXTRACT:
5404 case ZERO_EXTRACT:
5405 if (CONST_INT_P (op0)
5406 && CONST_INT_P (op1)
5407 && CONST_INT_P (op2)
5408 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5409 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5411 /* Extracting a bit-field from a constant */
5412 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5413 HOST_WIDE_INT op1val = INTVAL (op1);
5414 HOST_WIDE_INT op2val = INTVAL (op2);
5415 if (BITS_BIG_ENDIAN)
5416 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5417 else
5418 val >>= op2val;
5420 if (HOST_BITS_PER_WIDE_INT != op1val)
5422 /* First zero-extend. */
5423 val &= (HOST_WIDE_INT_1U << op1val) - 1;
5424 /* If desired, propagate sign bit. */
5425 if (code == SIGN_EXTRACT
5426 && (val & (HOST_WIDE_INT_1U << (op1val - 1)))
5427 != 0)
5428 val |= ~ ((HOST_WIDE_INT_1U << op1val) - 1);
5431 return gen_int_mode (val, mode);
5433 break;
5435 case IF_THEN_ELSE:
5436 if (CONST_INT_P (op0))
5437 return op0 != const0_rtx ? op1 : op2;
5439 /* Convert c ? a : a into "a". */
5440 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5441 return op1;
5443 /* Convert a != b ? a : b into "a". */
5444 if (GET_CODE (op0) == NE
5445 && ! side_effects_p (op0)
5446 && ! HONOR_NANS (mode)
5447 && ! HONOR_SIGNED_ZEROS (mode)
5448 && ((rtx_equal_p (XEXP (op0, 0), op1)
5449 && rtx_equal_p (XEXP (op0, 1), op2))
5450 || (rtx_equal_p (XEXP (op0, 0), op2)
5451 && rtx_equal_p (XEXP (op0, 1), op1))))
5452 return op1;
5454 /* Convert a == b ? a : b into "b". */
5455 if (GET_CODE (op0) == EQ
5456 && ! side_effects_p (op0)
5457 && ! HONOR_NANS (mode)
5458 && ! HONOR_SIGNED_ZEROS (mode)
5459 && ((rtx_equal_p (XEXP (op0, 0), op1)
5460 && rtx_equal_p (XEXP (op0, 1), op2))
5461 || (rtx_equal_p (XEXP (op0, 0), op2)
5462 && rtx_equal_p (XEXP (op0, 1), op1))))
5463 return op2;
5465 /* Convert (!c) != {0,...,0} ? a : b into
5466 c != {0,...,0} ? b : a for vector modes. */
5467 if (VECTOR_MODE_P (GET_MODE (op1))
5468 && GET_CODE (op0) == NE
5469 && GET_CODE (XEXP (op0, 0)) == NOT
5470 && GET_CODE (XEXP (op0, 1)) == CONST_VECTOR)
5472 rtx cv = XEXP (op0, 1);
5473 int nunits = CONST_VECTOR_NUNITS (cv);
5474 bool ok = true;
5475 for (int i = 0; i < nunits; ++i)
5476 if (CONST_VECTOR_ELT (cv, i) != const0_rtx)
5478 ok = false;
5479 break;
5481 if (ok)
5483 rtx new_op0 = gen_rtx_NE (GET_MODE (op0),
5484 XEXP (XEXP (op0, 0), 0),
5485 XEXP (op0, 1));
5486 rtx retval = gen_rtx_IF_THEN_ELSE (mode, new_op0, op2, op1);
5487 return retval;
5491 /* Convert x == 0 ? N : clz (x) into clz (x) when
5492 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
5493 Similarly for ctz (x). */
5494 if (COMPARISON_P (op0) && !side_effects_p (op0)
5495 && XEXP (op0, 1) == const0_rtx)
5497 rtx simplified
5498 = simplify_cond_clz_ctz (XEXP (op0, 0), GET_CODE (op0),
5499 op1, op2);
5500 if (simplified)
5501 return simplified;
5504 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5506 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5507 ? GET_MODE (XEXP (op0, 1))
5508 : GET_MODE (XEXP (op0, 0)));
5509 rtx temp;
5511 /* Look for happy constants in op1 and op2. */
5512 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5514 HOST_WIDE_INT t = INTVAL (op1);
5515 HOST_WIDE_INT f = INTVAL (op2);
5517 if (t == STORE_FLAG_VALUE && f == 0)
5518 code = GET_CODE (op0);
5519 else if (t == 0 && f == STORE_FLAG_VALUE)
5521 enum rtx_code tmp;
5522 tmp = reversed_comparison_code (op0, NULL);
5523 if (tmp == UNKNOWN)
5524 break;
5525 code = tmp;
5527 else
5528 break;
5530 return simplify_gen_relational (code, mode, cmp_mode,
5531 XEXP (op0, 0), XEXP (op0, 1));
5534 if (cmp_mode == VOIDmode)
5535 cmp_mode = op0_mode;
5536 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5537 cmp_mode, XEXP (op0, 0),
5538 XEXP (op0, 1));
5540 /* See if any simplifications were possible. */
5541 if (temp)
5543 if (CONST_INT_P (temp))
5544 return temp == const0_rtx ? op2 : op1;
5545 else if (temp)
5546 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5549 break;
5551 case VEC_MERGE:
5552 gcc_assert (GET_MODE (op0) == mode);
5553 gcc_assert (GET_MODE (op1) == mode);
5554 gcc_assert (VECTOR_MODE_P (mode));
5555 trueop2 = avoid_constant_pool_reference (op2);
5556 if (CONST_INT_P (trueop2))
5558 int elt_size = GET_MODE_UNIT_SIZE (mode);
5559 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5560 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5561 unsigned HOST_WIDE_INT mask;
5562 if (n_elts == HOST_BITS_PER_WIDE_INT)
5563 mask = -1;
5564 else
5565 mask = (HOST_WIDE_INT_1U << n_elts) - 1;
5567 if (!(sel & mask) && !side_effects_p (op0))
5568 return op1;
5569 if ((sel & mask) == mask && !side_effects_p (op1))
5570 return op0;
5572 rtx trueop0 = avoid_constant_pool_reference (op0);
5573 rtx trueop1 = avoid_constant_pool_reference (op1);
5574 if (GET_CODE (trueop0) == CONST_VECTOR
5575 && GET_CODE (trueop1) == CONST_VECTOR)
5577 rtvec v = rtvec_alloc (n_elts);
5578 unsigned int i;
5580 for (i = 0; i < n_elts; i++)
5581 RTVEC_ELT (v, i) = ((sel & (HOST_WIDE_INT_1U << i))
5582 ? CONST_VECTOR_ELT (trueop0, i)
5583 : CONST_VECTOR_ELT (trueop1, i));
5584 return gen_rtx_CONST_VECTOR (mode, v);
5587 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5588 if no element from a appears in the result. */
5589 if (GET_CODE (op0) == VEC_MERGE)
5591 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5592 if (CONST_INT_P (tem))
5594 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5595 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5596 return simplify_gen_ternary (code, mode, mode,
5597 XEXP (op0, 1), op1, op2);
5598 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5599 return simplify_gen_ternary (code, mode, mode,
5600 XEXP (op0, 0), op1, op2);
5603 if (GET_CODE (op1) == VEC_MERGE)
5605 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5606 if (CONST_INT_P (tem))
5608 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5609 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5610 return simplify_gen_ternary (code, mode, mode,
5611 op0, XEXP (op1, 1), op2);
5612 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5613 return simplify_gen_ternary (code, mode, mode,
5614 op0, XEXP (op1, 0), op2);
5618 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5619 with a. */
5620 if (GET_CODE (op0) == VEC_DUPLICATE
5621 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5622 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5623 && mode_nunits[GET_MODE (XEXP (op0, 0))] == 1)
5625 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5626 if (CONST_INT_P (tem) && CONST_INT_P (op2))
5628 if (XEXP (XEXP (op0, 0), 0) == op1
5629 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5630 return op1;
5635 if (rtx_equal_p (op0, op1)
5636 && !side_effects_p (op2) && !side_effects_p (op1))
5637 return op0;
5639 break;
5641 default:
5642 gcc_unreachable ();
5645 return 0;
5648 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5649 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5650 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5652 Works by unpacking OP into a collection of 8-bit values
5653 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5654 and then repacking them again for OUTERMODE. */
5656 static rtx
5657 simplify_immed_subreg (machine_mode outermode, rtx op,
5658 machine_mode innermode, unsigned int byte)
5660 enum {
5661 value_bit = 8,
5662 value_mask = (1 << value_bit) - 1
5664 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
5665 int value_start;
5666 int i;
5667 int elem;
5669 int num_elem;
5670 rtx * elems;
5671 int elem_bitsize;
5672 rtx result_s;
5673 rtvec result_v = NULL;
5674 enum mode_class outer_class;
5675 machine_mode outer_submode;
5676 int max_bitsize;
5678 /* Some ports misuse CCmode. */
5679 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5680 return op;
5682 /* We have no way to represent a complex constant at the rtl level. */
5683 if (COMPLEX_MODE_P (outermode))
5684 return NULL_RTX;
5686 /* We support any size mode. */
5687 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5688 GET_MODE_BITSIZE (innermode));
5690 /* Unpack the value. */
5692 if (GET_CODE (op) == CONST_VECTOR)
5694 num_elem = CONST_VECTOR_NUNITS (op);
5695 elems = &CONST_VECTOR_ELT (op, 0);
5696 elem_bitsize = GET_MODE_UNIT_BITSIZE (innermode);
5698 else
5700 num_elem = 1;
5701 elems = &op;
5702 elem_bitsize = max_bitsize;
5704 /* If this asserts, it is too complicated; reducing value_bit may help. */
5705 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5706 /* I don't know how to handle endianness of sub-units. */
5707 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5709 for (elem = 0; elem < num_elem; elem++)
5711 unsigned char * vp;
5712 rtx el = elems[elem];
5714 /* Vectors are kept in target memory order. (This is probably
5715 a mistake.) */
5717 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5718 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5719 / BITS_PER_UNIT);
5720 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5721 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5722 unsigned bytele = (subword_byte % UNITS_PER_WORD
5723 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5724 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5727 switch (GET_CODE (el))
5729 case CONST_INT:
5730 for (i = 0;
5731 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5732 i += value_bit)
5733 *vp++ = INTVAL (el) >> i;
5734 /* CONST_INTs are always logically sign-extended. */
5735 for (; i < elem_bitsize; i += value_bit)
5736 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5737 break;
5739 case CONST_WIDE_INT:
5741 rtx_mode_t val = rtx_mode_t (el, innermode);
5742 unsigned char extend = wi::sign_mask (val);
5744 for (i = 0; i < elem_bitsize; i += value_bit)
5745 *vp++ = wi::extract_uhwi (val, i, value_bit);
5746 for (; i < elem_bitsize; i += value_bit)
5747 *vp++ = extend;
5749 break;
5751 case CONST_DOUBLE:
5752 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
5754 unsigned char extend = 0;
5755 /* If this triggers, someone should have generated a
5756 CONST_INT instead. */
5757 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5759 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5760 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5761 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5763 *vp++
5764 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5765 i += value_bit;
5768 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5769 extend = -1;
5770 for (; i < elem_bitsize; i += value_bit)
5771 *vp++ = extend;
5773 else
5775 /* This is big enough for anything on the platform. */
5776 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5777 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5779 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5780 gcc_assert (bitsize <= elem_bitsize);
5781 gcc_assert (bitsize % value_bit == 0);
5783 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5784 GET_MODE (el));
5786 /* real_to_target produces its result in words affected by
5787 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5788 and use WORDS_BIG_ENDIAN instead; see the documentation
5789 of SUBREG in rtl.texi. */
5790 for (i = 0; i < bitsize; i += value_bit)
5792 int ibase;
5793 if (WORDS_BIG_ENDIAN)
5794 ibase = bitsize - 1 - i;
5795 else
5796 ibase = i;
5797 *vp++ = tmp[ibase / 32] >> i % 32;
5800 /* It shouldn't matter what's done here, so fill it with
5801 zero. */
5802 for (; i < elem_bitsize; i += value_bit)
5803 *vp++ = 0;
5805 break;
5807 case CONST_FIXED:
5808 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5810 for (i = 0; i < elem_bitsize; i += value_bit)
5811 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5813 else
5815 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5816 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5817 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5818 i += value_bit)
5819 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5820 >> (i - HOST_BITS_PER_WIDE_INT);
5821 for (; i < elem_bitsize; i += value_bit)
5822 *vp++ = 0;
5824 break;
5826 default:
5827 gcc_unreachable ();
5831 /* Now, pick the right byte to start with. */
5832 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5833 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5834 will already have offset 0. */
5835 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5837 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5838 - byte);
5839 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5840 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5841 byte = (subword_byte % UNITS_PER_WORD
5842 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5845 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5846 so if it's become negative it will instead be very large.) */
5847 gcc_assert (byte < GET_MODE_SIZE (innermode));
5849 /* Convert from bytes to chunks of size value_bit. */
5850 value_start = byte * (BITS_PER_UNIT / value_bit);
5852 /* Re-pack the value. */
5853 num_elem = GET_MODE_NUNITS (outermode);
5855 if (VECTOR_MODE_P (outermode))
5857 result_v = rtvec_alloc (num_elem);
5858 elems = &RTVEC_ELT (result_v, 0);
5860 else
5861 elems = &result_s;
5863 outer_submode = GET_MODE_INNER (outermode);
5864 outer_class = GET_MODE_CLASS (outer_submode);
5865 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5867 gcc_assert (elem_bitsize % value_bit == 0);
5868 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5870 for (elem = 0; elem < num_elem; elem++)
5872 unsigned char *vp;
5874 /* Vectors are stored in target memory order. (This is probably
5875 a mistake.) */
5877 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5878 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5879 / BITS_PER_UNIT);
5880 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5881 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5882 unsigned bytele = (subword_byte % UNITS_PER_WORD
5883 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5884 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5887 switch (outer_class)
5889 case MODE_INT:
5890 case MODE_PARTIAL_INT:
5892 int u;
5893 int base = 0;
5894 int units
5895 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
5896 / HOST_BITS_PER_WIDE_INT;
5897 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
5898 wide_int r;
5900 if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
5901 return NULL_RTX;
5902 for (u = 0; u < units; u++)
5904 unsigned HOST_WIDE_INT buf = 0;
5905 for (i = 0;
5906 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
5907 i += value_bit)
5908 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5910 tmp[u] = buf;
5911 base += HOST_BITS_PER_WIDE_INT;
5913 r = wide_int::from_array (tmp, units,
5914 GET_MODE_PRECISION (outer_submode));
5915 #if TARGET_SUPPORTS_WIDE_INT == 0
5916 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5917 if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
5918 return NULL_RTX;
5919 #endif
5920 elems[elem] = immed_wide_int_const (r, outer_submode);
5922 break;
5924 case MODE_FLOAT:
5925 case MODE_DECIMAL_FLOAT:
5927 REAL_VALUE_TYPE r;
5928 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32] = { 0 };
5930 /* real_from_target wants its input in words affected by
5931 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5932 and use WORDS_BIG_ENDIAN instead; see the documentation
5933 of SUBREG in rtl.texi. */
5934 for (i = 0; i < elem_bitsize; i += value_bit)
5936 int ibase;
5937 if (WORDS_BIG_ENDIAN)
5938 ibase = elem_bitsize - 1 - i;
5939 else
5940 ibase = i;
5941 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5944 real_from_target (&r, tmp, outer_submode);
5945 elems[elem] = const_double_from_real_value (r, outer_submode);
5947 break;
5949 case MODE_FRACT:
5950 case MODE_UFRACT:
5951 case MODE_ACCUM:
5952 case MODE_UACCUM:
5954 FIXED_VALUE_TYPE f;
5955 f.data.low = 0;
5956 f.data.high = 0;
5957 f.mode = outer_submode;
5959 for (i = 0;
5960 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5961 i += value_bit)
5962 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5963 for (; i < elem_bitsize; i += value_bit)
5964 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5965 << (i - HOST_BITS_PER_WIDE_INT));
5967 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5969 break;
5971 default:
5972 gcc_unreachable ();
5975 if (VECTOR_MODE_P (outermode))
5976 return gen_rtx_CONST_VECTOR (outermode, result_v);
5977 else
5978 return result_s;
5981 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5982 Return 0 if no simplifications are possible. */
5984 simplify_subreg (machine_mode outermode, rtx op,
5985 machine_mode innermode, unsigned int byte)
5987 /* Little bit of sanity checking. */
5988 gcc_assert (innermode != VOIDmode);
5989 gcc_assert (outermode != VOIDmode);
5990 gcc_assert (innermode != BLKmode);
5991 gcc_assert (outermode != BLKmode);
5993 gcc_assert (GET_MODE (op) == innermode
5994 || GET_MODE (op) == VOIDmode);
5996 if ((byte % GET_MODE_SIZE (outermode)) != 0)
5997 return NULL_RTX;
5999 if (byte >= GET_MODE_SIZE (innermode))
6000 return NULL_RTX;
6002 if (outermode == innermode && !byte)
6003 return op;
6005 if (CONST_SCALAR_INT_P (op)
6006 || CONST_DOUBLE_AS_FLOAT_P (op)
6007 || GET_CODE (op) == CONST_FIXED
6008 || GET_CODE (op) == CONST_VECTOR)
6009 return simplify_immed_subreg (outermode, op, innermode, byte);
6011 /* Changing mode twice with SUBREG => just change it once,
6012 or not at all if changing back op starting mode. */
6013 if (GET_CODE (op) == SUBREG)
6015 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
6016 int final_offset = byte + SUBREG_BYTE (op);
6017 rtx newx;
6019 if (outermode == innermostmode
6020 && byte == 0 && SUBREG_BYTE (op) == 0)
6021 return SUBREG_REG (op);
6023 /* The SUBREG_BYTE represents offset, as if the value were stored
6024 in memory. Irritating exception is paradoxical subreg, where
6025 we define SUBREG_BYTE to be 0. On big endian machines, this
6026 value should be negative. For a moment, undo this exception. */
6027 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
6029 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
6030 if (WORDS_BIG_ENDIAN)
6031 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6032 if (BYTES_BIG_ENDIAN)
6033 final_offset += difference % UNITS_PER_WORD;
6035 if (SUBREG_BYTE (op) == 0
6036 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
6038 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
6039 if (WORDS_BIG_ENDIAN)
6040 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6041 if (BYTES_BIG_ENDIAN)
6042 final_offset += difference % UNITS_PER_WORD;
6045 /* See whether resulting subreg will be paradoxical. */
6046 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
6048 /* In nonparadoxical subregs we can't handle negative offsets. */
6049 if (final_offset < 0)
6050 return NULL_RTX;
6051 /* Bail out in case resulting subreg would be incorrect. */
6052 if (final_offset % GET_MODE_SIZE (outermode)
6053 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
6054 return NULL_RTX;
6056 else
6058 int offset = 0;
6059 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
6061 /* In paradoxical subreg, see if we are still looking on lower part.
6062 If so, our SUBREG_BYTE will be 0. */
6063 if (WORDS_BIG_ENDIAN)
6064 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6065 if (BYTES_BIG_ENDIAN)
6066 offset += difference % UNITS_PER_WORD;
6067 if (offset == final_offset)
6068 final_offset = 0;
6069 else
6070 return NULL_RTX;
6073 /* Recurse for further possible simplifications. */
6074 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
6075 final_offset);
6076 if (newx)
6077 return newx;
6078 if (validate_subreg (outermode, innermostmode,
6079 SUBREG_REG (op), final_offset))
6081 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
6082 if (SUBREG_PROMOTED_VAR_P (op)
6083 && SUBREG_PROMOTED_SIGN (op) >= 0
6084 && GET_MODE_CLASS (outermode) == MODE_INT
6085 && IN_RANGE (GET_MODE_SIZE (outermode),
6086 GET_MODE_SIZE (innermode),
6087 GET_MODE_SIZE (innermostmode))
6088 && subreg_lowpart_p (newx))
6090 SUBREG_PROMOTED_VAR_P (newx) = 1;
6091 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
6093 return newx;
6095 return NULL_RTX;
6098 /* SUBREG of a hard register => just change the register number
6099 and/or mode. If the hard register is not valid in that mode,
6100 suppress this simplification. If the hard register is the stack,
6101 frame, or argument pointer, leave this as a SUBREG. */
6103 if (REG_P (op) && HARD_REGISTER_P (op))
6105 unsigned int regno, final_regno;
6107 regno = REGNO (op);
6108 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
6109 if (HARD_REGISTER_NUM_P (final_regno))
6111 rtx x;
6112 int final_offset = byte;
6114 /* Adjust offset for paradoxical subregs. */
6115 if (byte == 0
6116 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
6118 int difference = (GET_MODE_SIZE (innermode)
6119 - GET_MODE_SIZE (outermode));
6120 if (WORDS_BIG_ENDIAN)
6121 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6122 if (BYTES_BIG_ENDIAN)
6123 final_offset += difference % UNITS_PER_WORD;
6126 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
6128 /* Propagate original regno. We don't have any way to specify
6129 the offset inside original regno, so do so only for lowpart.
6130 The information is used only by alias analysis that can not
6131 grog partial register anyway. */
6133 if (subreg_lowpart_offset (outermode, innermode) == byte)
6134 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
6135 return x;
6139 /* If we have a SUBREG of a register that we are replacing and we are
6140 replacing it with a MEM, make a new MEM and try replacing the
6141 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6142 or if we would be widening it. */
6144 if (MEM_P (op)
6145 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
6146 /* Allow splitting of volatile memory references in case we don't
6147 have instruction to move the whole thing. */
6148 && (! MEM_VOLATILE_P (op)
6149 || ! have_insn_for (SET, innermode))
6150 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
6151 return adjust_address_nv (op, outermode, byte);
6153 /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
6154 of two parts. */
6155 if (GET_CODE (op) == CONCAT
6156 || GET_CODE (op) == VEC_CONCAT)
6158 unsigned int part_size, final_offset;
6159 rtx part, res;
6161 enum machine_mode part_mode = GET_MODE (XEXP (op, 0));
6162 if (part_mode == VOIDmode)
6163 part_mode = GET_MODE_INNER (GET_MODE (op));
6164 part_size = GET_MODE_SIZE (part_mode);
6165 if (byte < part_size)
6167 part = XEXP (op, 0);
6168 final_offset = byte;
6170 else
6172 part = XEXP (op, 1);
6173 final_offset = byte - part_size;
6176 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
6177 return NULL_RTX;
6179 part_mode = GET_MODE (part);
6180 if (part_mode == VOIDmode)
6181 part_mode = GET_MODE_INNER (GET_MODE (op));
6182 res = simplify_subreg (outermode, part, part_mode, final_offset);
6183 if (res)
6184 return res;
6185 if (validate_subreg (outermode, part_mode, part, final_offset))
6186 return gen_rtx_SUBREG (outermode, part, final_offset);
6187 return NULL_RTX;
6190 /* A SUBREG resulting from a zero extension may fold to zero if
6191 it extracts higher bits that the ZERO_EXTEND's source bits. */
6192 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
6194 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
6195 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
6196 return CONST0_RTX (outermode);
6199 if (SCALAR_INT_MODE_P (outermode)
6200 && SCALAR_INT_MODE_P (innermode)
6201 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
6202 && byte == subreg_lowpart_offset (outermode, innermode))
6204 rtx tem = simplify_truncation (outermode, op, innermode);
6205 if (tem)
6206 return tem;
6209 return NULL_RTX;
6212 /* Make a SUBREG operation or equivalent if it folds. */
6215 simplify_gen_subreg (machine_mode outermode, rtx op,
6216 machine_mode innermode, unsigned int byte)
6218 rtx newx;
6220 newx = simplify_subreg (outermode, op, innermode, byte);
6221 if (newx)
6222 return newx;
6224 if (GET_CODE (op) == SUBREG
6225 || GET_CODE (op) == CONCAT
6226 || GET_MODE (op) == VOIDmode)
6227 return NULL_RTX;
6229 if (validate_subreg (outermode, innermode, op, byte))
6230 return gen_rtx_SUBREG (outermode, op, byte);
6232 return NULL_RTX;
6235 /* Generates a subreg to get the least significant part of EXPR (in mode
6236 INNER_MODE) to OUTER_MODE. */
6239 lowpart_subreg (machine_mode outer_mode, rtx expr,
6240 machine_mode inner_mode)
6242 return simplify_gen_subreg (outer_mode, expr, inner_mode,
6243 subreg_lowpart_offset (outer_mode, inner_mode));
6246 /* Simplify X, an rtx expression.
6248 Return the simplified expression or NULL if no simplifications
6249 were possible.
6251 This is the preferred entry point into the simplification routines;
6252 however, we still allow passes to call the more specific routines.
6254 Right now GCC has three (yes, three) major bodies of RTL simplification
6255 code that need to be unified.
6257 1. fold_rtx in cse.c. This code uses various CSE specific
6258 information to aid in RTL simplification.
6260 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6261 it uses combine specific information to aid in RTL
6262 simplification.
6264 3. The routines in this file.
6267 Long term we want to only have one body of simplification code; to
6268 get to that state I recommend the following steps:
6270 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6271 which are not pass dependent state into these routines.
6273 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6274 use this routine whenever possible.
6276 3. Allow for pass dependent state to be provided to these
6277 routines and add simplifications based on the pass dependent
6278 state. Remove code from cse.c & combine.c that becomes
6279 redundant/dead.
6281 It will take time, but ultimately the compiler will be easier to
6282 maintain and improve. It's totally silly that when we add a
6283 simplification that it needs to be added to 4 places (3 for RTL
6284 simplification and 1 for tree simplification. */
6287 simplify_rtx (const_rtx x)
6289 const enum rtx_code code = GET_CODE (x);
6290 const machine_mode mode = GET_MODE (x);
6292 switch (GET_RTX_CLASS (code))
6294 case RTX_UNARY:
6295 return simplify_unary_operation (code, mode,
6296 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6297 case RTX_COMM_ARITH:
6298 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6299 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6301 /* Fall through. */
6303 case RTX_BIN_ARITH:
6304 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6306 case RTX_TERNARY:
6307 case RTX_BITFIELD_OPS:
6308 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6309 XEXP (x, 0), XEXP (x, 1),
6310 XEXP (x, 2));
6312 case RTX_COMPARE:
6313 case RTX_COMM_COMPARE:
6314 return simplify_relational_operation (code, mode,
6315 ((GET_MODE (XEXP (x, 0))
6316 != VOIDmode)
6317 ? GET_MODE (XEXP (x, 0))
6318 : GET_MODE (XEXP (x, 1))),
6319 XEXP (x, 0),
6320 XEXP (x, 1));
6322 case RTX_EXTRA:
6323 if (code == SUBREG)
6324 return simplify_subreg (mode, SUBREG_REG (x),
6325 GET_MODE (SUBREG_REG (x)),
6326 SUBREG_BYTE (x));
6327 break;
6329 case RTX_OBJ:
6330 if (code == LO_SUM)
6332 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6333 if (GET_CODE (XEXP (x, 0)) == HIGH
6334 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6335 return XEXP (x, 1);
6337 break;
6339 default:
6340 break;
6342 return NULL;