2015-10-23 Hristian Kirtchev <kirtchev@adacore.com>
[official-gcc.git] / gcc / simplify-rtx.c
blobeb3f153485c5918d224b1a133cdcf7797c84f950
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "predict.h"
26 #include "rtl.h"
27 #include "alias.h"
28 #include "tree.h"
29 #include "fold-const.h"
30 #include "varasm.h"
31 #include "tm_p.h"
32 #include "regs.h"
33 #include "flags.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "insn-codes.h"
37 #include "optabs.h"
38 #include "expmed.h"
39 #include "dojump.h"
40 #include "explow.h"
41 #include "calls.h"
42 #include "emit-rtl.h"
43 #include "stmt.h"
44 #include "expr.h"
45 #include "diagnostic-core.h"
46 #include "target.h"
48 /* Simplification and canonicalization of RTL. */
50 /* Much code operates on (low, high) pairs; the low value is an
51 unsigned wide int, the high value a signed wide int. We
52 occasionally need to sign extend from low to high as if low were a
53 signed wide int. */
54 #define HWI_SIGN_EXTEND(low) \
55 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
57 static rtx neg_const_int (machine_mode, const_rtx);
58 static bool plus_minus_operand_p (const_rtx);
59 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
60 static rtx simplify_immed_subreg (machine_mode, rtx, machine_mode,
61 unsigned int);
62 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
63 rtx, rtx);
64 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
65 machine_mode, rtx, rtx);
66 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
67 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
68 rtx, rtx, rtx, rtx);
70 /* Negate a CONST_INT rtx, truncating (because a conversion from a
71 maximally negative number can overflow). */
72 static rtx
73 neg_const_int (machine_mode mode, const_rtx i)
75 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
78 /* Test whether expression, X, is an immediate constant that represents
79 the most significant bit of machine mode MODE. */
81 bool
82 mode_signbit_p (machine_mode mode, const_rtx x)
84 unsigned HOST_WIDE_INT val;
85 unsigned int width;
87 if (GET_MODE_CLASS (mode) != MODE_INT)
88 return false;
90 width = GET_MODE_PRECISION (mode);
91 if (width == 0)
92 return false;
94 if (width <= HOST_BITS_PER_WIDE_INT
95 && CONST_INT_P (x))
96 val = INTVAL (x);
97 #if TARGET_SUPPORTS_WIDE_INT
98 else if (CONST_WIDE_INT_P (x))
100 unsigned int i;
101 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
102 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
103 return false;
104 for (i = 0; i < elts - 1; i++)
105 if (CONST_WIDE_INT_ELT (x, i) != 0)
106 return false;
107 val = CONST_WIDE_INT_ELT (x, elts - 1);
108 width %= HOST_BITS_PER_WIDE_INT;
109 if (width == 0)
110 width = HOST_BITS_PER_WIDE_INT;
112 #else
113 else if (width <= HOST_BITS_PER_DOUBLE_INT
114 && CONST_DOUBLE_AS_INT_P (x)
115 && CONST_DOUBLE_LOW (x) == 0)
117 val = CONST_DOUBLE_HIGH (x);
118 width -= HOST_BITS_PER_WIDE_INT;
120 #endif
121 else
122 /* X is not an integer constant. */
123 return false;
125 if (width < HOST_BITS_PER_WIDE_INT)
126 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
127 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
130 /* Test whether VAL is equal to the most significant bit of mode MODE
131 (after masking with the mode mask of MODE). Returns false if the
132 precision of MODE is too large to handle. */
134 bool
135 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
137 unsigned int width;
139 if (GET_MODE_CLASS (mode) != MODE_INT)
140 return false;
142 width = GET_MODE_PRECISION (mode);
143 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
144 return false;
146 val &= GET_MODE_MASK (mode);
147 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
150 /* Test whether the most significant bit of mode MODE is set in VAL.
151 Returns false if the precision of MODE is too large to handle. */
152 bool
153 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
155 unsigned int width;
157 if (GET_MODE_CLASS (mode) != MODE_INT)
158 return false;
160 width = GET_MODE_PRECISION (mode);
161 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
162 return false;
164 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
165 return val != 0;
168 /* Test whether the most significant bit of mode MODE is clear in VAL.
169 Returns false if the precision of MODE is too large to handle. */
170 bool
171 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
173 unsigned int width;
175 if (GET_MODE_CLASS (mode) != MODE_INT)
176 return false;
178 width = GET_MODE_PRECISION (mode);
179 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
180 return false;
182 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
183 return val == 0;
186 /* Make a binary operation by properly ordering the operands and
187 seeing if the expression folds. */
190 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
191 rtx op1)
193 rtx tem;
195 /* If this simplifies, do it. */
196 tem = simplify_binary_operation (code, mode, op0, op1);
197 if (tem)
198 return tem;
200 /* Put complex operands first and constants second if commutative. */
201 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
202 && swap_commutative_operands_p (op0, op1))
203 std::swap (op0, op1);
205 return gen_rtx_fmt_ee (code, mode, op0, op1);
208 /* If X is a MEM referencing the constant pool, return the real value.
209 Otherwise return X. */
211 avoid_constant_pool_reference (rtx x)
213 rtx c, tmp, addr;
214 machine_mode cmode;
215 HOST_WIDE_INT offset = 0;
217 switch (GET_CODE (x))
219 case MEM:
220 break;
222 case FLOAT_EXTEND:
223 /* Handle float extensions of constant pool references. */
224 tmp = XEXP (x, 0);
225 c = avoid_constant_pool_reference (tmp);
226 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
227 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c),
228 GET_MODE (x));
229 return x;
231 default:
232 return x;
235 if (GET_MODE (x) == BLKmode)
236 return x;
238 addr = XEXP (x, 0);
240 /* Call target hook to avoid the effects of -fpic etc.... */
241 addr = targetm.delegitimize_address (addr);
243 /* Split the address into a base and integer offset. */
244 if (GET_CODE (addr) == CONST
245 && GET_CODE (XEXP (addr, 0)) == PLUS
246 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
248 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
249 addr = XEXP (XEXP (addr, 0), 0);
252 if (GET_CODE (addr) == LO_SUM)
253 addr = XEXP (addr, 1);
255 /* If this is a constant pool reference, we can turn it into its
256 constant and hope that simplifications happen. */
257 if (GET_CODE (addr) == SYMBOL_REF
258 && CONSTANT_POOL_ADDRESS_P (addr))
260 c = get_pool_constant (addr);
261 cmode = get_pool_mode (addr);
263 /* If we're accessing the constant in a different mode than it was
264 originally stored, attempt to fix that up via subreg simplifications.
265 If that fails we have no choice but to return the original memory. */
266 if ((offset != 0 || cmode != GET_MODE (x))
267 && offset >= 0 && offset < GET_MODE_SIZE (cmode))
269 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
270 if (tem && CONSTANT_P (tem))
271 return tem;
273 else
274 return c;
277 return x;
280 /* Simplify a MEM based on its attributes. This is the default
281 delegitimize_address target hook, and it's recommended that every
282 overrider call it. */
285 delegitimize_mem_from_attrs (rtx x)
287 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
288 use their base addresses as equivalent. */
289 if (MEM_P (x)
290 && MEM_EXPR (x)
291 && MEM_OFFSET_KNOWN_P (x))
293 tree decl = MEM_EXPR (x);
294 machine_mode mode = GET_MODE (x);
295 HOST_WIDE_INT offset = 0;
297 switch (TREE_CODE (decl))
299 default:
300 decl = NULL;
301 break;
303 case VAR_DECL:
304 break;
306 case ARRAY_REF:
307 case ARRAY_RANGE_REF:
308 case COMPONENT_REF:
309 case BIT_FIELD_REF:
310 case REALPART_EXPR:
311 case IMAGPART_EXPR:
312 case VIEW_CONVERT_EXPR:
314 HOST_WIDE_INT bitsize, bitpos;
315 tree toffset;
316 int unsignedp, volatilep = 0;
318 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
319 &mode, &unsignedp, &volatilep, false);
320 if (bitsize != GET_MODE_BITSIZE (mode)
321 || (bitpos % BITS_PER_UNIT)
322 || (toffset && !tree_fits_shwi_p (toffset)))
323 decl = NULL;
324 else
326 offset += bitpos / BITS_PER_UNIT;
327 if (toffset)
328 offset += tree_to_shwi (toffset);
330 break;
334 if (decl
335 && mode == GET_MODE (x)
336 && TREE_CODE (decl) == VAR_DECL
337 && (TREE_STATIC (decl)
338 || DECL_THREAD_LOCAL_P (decl))
339 && DECL_RTL_SET_P (decl)
340 && MEM_P (DECL_RTL (decl)))
342 rtx newx;
344 offset += MEM_OFFSET (x);
346 newx = DECL_RTL (decl);
348 if (MEM_P (newx))
350 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
352 /* Avoid creating a new MEM needlessly if we already had
353 the same address. We do if there's no OFFSET and the
354 old address X is identical to NEWX, or if X is of the
355 form (plus NEWX OFFSET), or the NEWX is of the form
356 (plus Y (const_int Z)) and X is that with the offset
357 added: (plus Y (const_int Z+OFFSET)). */
358 if (!((offset == 0
359 || (GET_CODE (o) == PLUS
360 && GET_CODE (XEXP (o, 1)) == CONST_INT
361 && (offset == INTVAL (XEXP (o, 1))
362 || (GET_CODE (n) == PLUS
363 && GET_CODE (XEXP (n, 1)) == CONST_INT
364 && (INTVAL (XEXP (n, 1)) + offset
365 == INTVAL (XEXP (o, 1)))
366 && (n = XEXP (n, 0))))
367 && (o = XEXP (o, 0))))
368 && rtx_equal_p (o, n)))
369 x = adjust_address_nv (newx, mode, offset);
371 else if (GET_MODE (x) == GET_MODE (newx)
372 && offset == 0)
373 x = newx;
377 return x;
380 /* Make a unary operation by first seeing if it folds and otherwise making
381 the specified operation. */
384 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
385 machine_mode op_mode)
387 rtx tem;
389 /* If this simplifies, use it. */
390 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
391 return tem;
393 return gen_rtx_fmt_e (code, mode, op);
396 /* Likewise for ternary operations. */
399 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
400 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
402 rtx tem;
404 /* If this simplifies, use it. */
405 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
406 op0, op1, op2)))
407 return tem;
409 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
412 /* Likewise, for relational operations.
413 CMP_MODE specifies mode comparison is done in. */
416 simplify_gen_relational (enum rtx_code code, machine_mode mode,
417 machine_mode cmp_mode, rtx op0, rtx op1)
419 rtx tem;
421 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
422 op0, op1)))
423 return tem;
425 return gen_rtx_fmt_ee (code, mode, op0, op1);
428 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
429 and simplify the result. If FN is non-NULL, call this callback on each
430 X, if it returns non-NULL, replace X with its return value and simplify the
431 result. */
434 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
435 rtx (*fn) (rtx, const_rtx, void *), void *data)
437 enum rtx_code code = GET_CODE (x);
438 machine_mode mode = GET_MODE (x);
439 machine_mode op_mode;
440 const char *fmt;
441 rtx op0, op1, op2, newx, op;
442 rtvec vec, newvec;
443 int i, j;
445 if (__builtin_expect (fn != NULL, 0))
447 newx = fn (x, old_rtx, data);
448 if (newx)
449 return newx;
451 else if (rtx_equal_p (x, old_rtx))
452 return copy_rtx ((rtx) data);
454 switch (GET_RTX_CLASS (code))
456 case RTX_UNARY:
457 op0 = XEXP (x, 0);
458 op_mode = GET_MODE (op0);
459 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
460 if (op0 == XEXP (x, 0))
461 return x;
462 return simplify_gen_unary (code, mode, op0, op_mode);
464 case RTX_BIN_ARITH:
465 case RTX_COMM_ARITH:
466 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
467 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
468 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
469 return x;
470 return simplify_gen_binary (code, mode, op0, op1);
472 case RTX_COMPARE:
473 case RTX_COMM_COMPARE:
474 op0 = XEXP (x, 0);
475 op1 = XEXP (x, 1);
476 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
477 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
478 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
479 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
480 return x;
481 return simplify_gen_relational (code, mode, op_mode, op0, op1);
483 case RTX_TERNARY:
484 case RTX_BITFIELD_OPS:
485 op0 = XEXP (x, 0);
486 op_mode = GET_MODE (op0);
487 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
488 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
489 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
490 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
491 return x;
492 if (op_mode == VOIDmode)
493 op_mode = GET_MODE (op0);
494 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
496 case RTX_EXTRA:
497 if (code == SUBREG)
499 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
500 if (op0 == SUBREG_REG (x))
501 return x;
502 op0 = simplify_gen_subreg (GET_MODE (x), op0,
503 GET_MODE (SUBREG_REG (x)),
504 SUBREG_BYTE (x));
505 return op0 ? op0 : x;
507 break;
509 case RTX_OBJ:
510 if (code == MEM)
512 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
513 if (op0 == XEXP (x, 0))
514 return x;
515 return replace_equiv_address_nv (x, op0);
517 else if (code == LO_SUM)
519 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
520 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
522 /* (lo_sum (high x) y) -> y where x and y have the same base. */
523 if (GET_CODE (op0) == HIGH)
525 rtx base0, base1, offset0, offset1;
526 split_const (XEXP (op0, 0), &base0, &offset0);
527 split_const (op1, &base1, &offset1);
528 if (rtx_equal_p (base0, base1))
529 return op1;
532 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
533 return x;
534 return gen_rtx_LO_SUM (mode, op0, op1);
536 break;
538 default:
539 break;
542 newx = x;
543 fmt = GET_RTX_FORMAT (code);
544 for (i = 0; fmt[i]; i++)
545 switch (fmt[i])
547 case 'E':
548 vec = XVEC (x, i);
549 newvec = XVEC (newx, i);
550 for (j = 0; j < GET_NUM_ELEM (vec); j++)
552 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
553 old_rtx, fn, data);
554 if (op != RTVEC_ELT (vec, j))
556 if (newvec == vec)
558 newvec = shallow_copy_rtvec (vec);
559 if (x == newx)
560 newx = shallow_copy_rtx (x);
561 XVEC (newx, i) = newvec;
563 RTVEC_ELT (newvec, j) = op;
566 break;
568 case 'e':
569 if (XEXP (x, i))
571 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
572 if (op != XEXP (x, i))
574 if (x == newx)
575 newx = shallow_copy_rtx (x);
576 XEXP (newx, i) = op;
579 break;
581 return newx;
584 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
585 resulting RTX. Return a new RTX which is as simplified as possible. */
588 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
590 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
593 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
594 Only handle cases where the truncated value is inherently an rvalue.
596 RTL provides two ways of truncating a value:
598 1. a lowpart subreg. This form is only a truncation when both
599 the outer and inner modes (here MODE and OP_MODE respectively)
600 are scalar integers, and only then when the subreg is used as
601 an rvalue.
603 It is only valid to form such truncating subregs if the
604 truncation requires no action by the target. The onus for
605 proving this is on the creator of the subreg -- e.g. the
606 caller to simplify_subreg or simplify_gen_subreg -- and typically
607 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
609 2. a TRUNCATE. This form handles both scalar and compound integers.
611 The first form is preferred where valid. However, the TRUNCATE
612 handling in simplify_unary_operation turns the second form into the
613 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
614 so it is generally safe to form rvalue truncations using:
616 simplify_gen_unary (TRUNCATE, ...)
618 and leave simplify_unary_operation to work out which representation
619 should be used.
621 Because of the proof requirements on (1), simplify_truncation must
622 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
623 regardless of whether the outer truncation came from a SUBREG or a
624 TRUNCATE. For example, if the caller has proven that an SImode
625 truncation of:
627 (and:DI X Y)
629 is a no-op and can be represented as a subreg, it does not follow
630 that SImode truncations of X and Y are also no-ops. On a target
631 like 64-bit MIPS that requires SImode values to be stored in
632 sign-extended form, an SImode truncation of:
634 (and:DI (reg:DI X) (const_int 63))
636 is trivially a no-op because only the lower 6 bits can be set.
637 However, X is still an arbitrary 64-bit number and so we cannot
638 assume that truncating it too is a no-op. */
640 static rtx
641 simplify_truncation (machine_mode mode, rtx op,
642 machine_mode op_mode)
644 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
645 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
646 gcc_assert (precision <= op_precision);
648 /* Optimize truncations of zero and sign extended values. */
649 if (GET_CODE (op) == ZERO_EXTEND
650 || GET_CODE (op) == SIGN_EXTEND)
652 /* There are three possibilities. If MODE is the same as the
653 origmode, we can omit both the extension and the subreg.
654 If MODE is not larger than the origmode, we can apply the
655 truncation without the extension. Finally, if the outermode
656 is larger than the origmode, we can just extend to the appropriate
657 mode. */
658 machine_mode origmode = GET_MODE (XEXP (op, 0));
659 if (mode == origmode)
660 return XEXP (op, 0);
661 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
662 return simplify_gen_unary (TRUNCATE, mode,
663 XEXP (op, 0), origmode);
664 else
665 return simplify_gen_unary (GET_CODE (op), mode,
666 XEXP (op, 0), origmode);
669 /* If the machine can perform operations in the truncated mode, distribute
670 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
671 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
672 if (1
673 && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
674 && (GET_CODE (op) == PLUS
675 || GET_CODE (op) == MINUS
676 || GET_CODE (op) == MULT))
678 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
679 if (op0)
681 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
682 if (op1)
683 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
687 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
688 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
689 the outer subreg is effectively a truncation to the original mode. */
690 if ((GET_CODE (op) == LSHIFTRT
691 || GET_CODE (op) == ASHIFTRT)
692 /* Ensure that OP_MODE is at least twice as wide as MODE
693 to avoid the possibility that an outer LSHIFTRT shifts by more
694 than the sign extension's sign_bit_copies and introduces zeros
695 into the high bits of the result. */
696 && 2 * precision <= op_precision
697 && CONST_INT_P (XEXP (op, 1))
698 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
699 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
700 && UINTVAL (XEXP (op, 1)) < precision)
701 return simplify_gen_binary (ASHIFTRT, mode,
702 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
704 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
705 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
706 the outer subreg is effectively a truncation to the original mode. */
707 if ((GET_CODE (op) == LSHIFTRT
708 || GET_CODE (op) == ASHIFTRT)
709 && CONST_INT_P (XEXP (op, 1))
710 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
711 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
712 && UINTVAL (XEXP (op, 1)) < precision)
713 return simplify_gen_binary (LSHIFTRT, mode,
714 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
716 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
717 to (ashift:QI (x:QI) C), where C is a suitable small constant and
718 the outer subreg is effectively a truncation to the original mode. */
719 if (GET_CODE (op) == ASHIFT
720 && CONST_INT_P (XEXP (op, 1))
721 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
722 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
723 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
724 && UINTVAL (XEXP (op, 1)) < precision)
725 return simplify_gen_binary (ASHIFT, mode,
726 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
728 /* Recognize a word extraction from a multi-word subreg. */
729 if ((GET_CODE (op) == LSHIFTRT
730 || GET_CODE (op) == ASHIFTRT)
731 && SCALAR_INT_MODE_P (mode)
732 && SCALAR_INT_MODE_P (op_mode)
733 && precision >= BITS_PER_WORD
734 && 2 * precision <= op_precision
735 && CONST_INT_P (XEXP (op, 1))
736 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
737 && UINTVAL (XEXP (op, 1)) < op_precision)
739 int byte = subreg_lowpart_offset (mode, op_mode);
740 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
741 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
742 (WORDS_BIG_ENDIAN
743 ? byte - shifted_bytes
744 : byte + shifted_bytes));
747 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
748 and try replacing the TRUNCATE and shift with it. Don't do this
749 if the MEM has a mode-dependent address. */
750 if ((GET_CODE (op) == LSHIFTRT
751 || GET_CODE (op) == ASHIFTRT)
752 && SCALAR_INT_MODE_P (op_mode)
753 && MEM_P (XEXP (op, 0))
754 && CONST_INT_P (XEXP (op, 1))
755 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
756 && INTVAL (XEXP (op, 1)) > 0
757 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
758 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
759 MEM_ADDR_SPACE (XEXP (op, 0)))
760 && ! MEM_VOLATILE_P (XEXP (op, 0))
761 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
762 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
764 int byte = subreg_lowpart_offset (mode, op_mode);
765 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
766 return adjust_address_nv (XEXP (op, 0), mode,
767 (WORDS_BIG_ENDIAN
768 ? byte - shifted_bytes
769 : byte + shifted_bytes));
772 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
773 (OP:SI foo:SI) if OP is NEG or ABS. */
774 if ((GET_CODE (op) == ABS
775 || GET_CODE (op) == NEG)
776 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
777 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
778 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
779 return simplify_gen_unary (GET_CODE (op), mode,
780 XEXP (XEXP (op, 0), 0), mode);
782 /* (truncate:A (subreg:B (truncate:C X) 0)) is
783 (truncate:A X). */
784 if (GET_CODE (op) == SUBREG
785 && SCALAR_INT_MODE_P (mode)
786 && SCALAR_INT_MODE_P (op_mode)
787 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
788 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
789 && subreg_lowpart_p (op))
791 rtx inner = XEXP (SUBREG_REG (op), 0);
792 if (GET_MODE_PRECISION (mode)
793 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
794 return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
795 else
796 /* If subreg above is paradoxical and C is narrower
797 than A, return (subreg:A (truncate:C X) 0). */
798 return simplify_gen_subreg (mode, SUBREG_REG (op),
799 GET_MODE (SUBREG_REG (op)), 0);
802 /* (truncate:A (truncate:B X)) is (truncate:A X). */
803 if (GET_CODE (op) == TRUNCATE)
804 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
805 GET_MODE (XEXP (op, 0)));
807 return NULL_RTX;
810 /* Try to simplify a unary operation CODE whose output mode is to be
811 MODE with input operand OP whose mode was originally OP_MODE.
812 Return zero if no simplification can be made. */
814 simplify_unary_operation (enum rtx_code code, machine_mode mode,
815 rtx op, machine_mode op_mode)
817 rtx trueop, tem;
819 trueop = avoid_constant_pool_reference (op);
821 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
822 if (tem)
823 return tem;
825 return simplify_unary_operation_1 (code, mode, op);
828 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
829 to be exact. */
831 static bool
832 exact_int_to_float_conversion_p (const_rtx op)
834 int out_bits = significand_size (GET_MODE_INNER (GET_MODE (op)));
835 machine_mode op0_mode = GET_MODE (XEXP (op, 0));
836 /* Constants shouldn't reach here. */
837 gcc_assert (op0_mode != VOIDmode);
838 int in_prec = GET_MODE_UNIT_PRECISION (op0_mode);
839 int in_bits = in_prec;
840 if (HWI_COMPUTABLE_MODE_P (op0_mode))
842 unsigned HOST_WIDE_INT nonzero = nonzero_bits (XEXP (op, 0), op0_mode);
843 if (GET_CODE (op) == FLOAT)
844 in_bits -= num_sign_bit_copies (XEXP (op, 0), op0_mode);
845 else if (GET_CODE (op) == UNSIGNED_FLOAT)
846 in_bits = wi::min_precision (wi::uhwi (nonzero, in_prec), UNSIGNED);
847 else
848 gcc_unreachable ();
849 in_bits -= wi::ctz (wi::uhwi (nonzero, in_prec));
851 return in_bits <= out_bits;
854 /* Perform some simplifications we can do even if the operands
855 aren't constant. */
856 static rtx
857 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
859 enum rtx_code reversed;
860 rtx temp;
862 switch (code)
864 case NOT:
865 /* (not (not X)) == X. */
866 if (GET_CODE (op) == NOT)
867 return XEXP (op, 0);
869 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
870 comparison is all ones. */
871 if (COMPARISON_P (op)
872 && (mode == BImode || STORE_FLAG_VALUE == -1)
873 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
874 return simplify_gen_relational (reversed, mode, VOIDmode,
875 XEXP (op, 0), XEXP (op, 1));
877 /* (not (plus X -1)) can become (neg X). */
878 if (GET_CODE (op) == PLUS
879 && XEXP (op, 1) == constm1_rtx)
880 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
882 /* Similarly, (not (neg X)) is (plus X -1). */
883 if (GET_CODE (op) == NEG)
884 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
885 CONSTM1_RTX (mode));
887 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
888 if (GET_CODE (op) == XOR
889 && CONST_INT_P (XEXP (op, 1))
890 && (temp = simplify_unary_operation (NOT, mode,
891 XEXP (op, 1), mode)) != 0)
892 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
894 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
895 if (GET_CODE (op) == PLUS
896 && CONST_INT_P (XEXP (op, 1))
897 && mode_signbit_p (mode, XEXP (op, 1))
898 && (temp = simplify_unary_operation (NOT, mode,
899 XEXP (op, 1), mode)) != 0)
900 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
903 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
904 operands other than 1, but that is not valid. We could do a
905 similar simplification for (not (lshiftrt C X)) where C is
906 just the sign bit, but this doesn't seem common enough to
907 bother with. */
908 if (GET_CODE (op) == ASHIFT
909 && XEXP (op, 0) == const1_rtx)
911 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
912 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
915 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
916 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
917 so we can perform the above simplification. */
918 if (STORE_FLAG_VALUE == -1
919 && GET_CODE (op) == ASHIFTRT
920 && CONST_INT_P (XEXP (op, 1))
921 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
922 return simplify_gen_relational (GE, mode, VOIDmode,
923 XEXP (op, 0), const0_rtx);
926 if (GET_CODE (op) == SUBREG
927 && subreg_lowpart_p (op)
928 && (GET_MODE_SIZE (GET_MODE (op))
929 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
930 && GET_CODE (SUBREG_REG (op)) == ASHIFT
931 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
933 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
934 rtx x;
936 x = gen_rtx_ROTATE (inner_mode,
937 simplify_gen_unary (NOT, inner_mode, const1_rtx,
938 inner_mode),
939 XEXP (SUBREG_REG (op), 1));
940 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
941 if (temp)
942 return temp;
945 /* Apply De Morgan's laws to reduce number of patterns for machines
946 with negating logical insns (and-not, nand, etc.). If result has
947 only one NOT, put it first, since that is how the patterns are
948 coded. */
949 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
951 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
952 machine_mode op_mode;
954 op_mode = GET_MODE (in1);
955 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
957 op_mode = GET_MODE (in2);
958 if (op_mode == VOIDmode)
959 op_mode = mode;
960 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
962 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
963 std::swap (in1, in2);
965 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
966 mode, in1, in2);
969 /* (not (bswap x)) -> (bswap (not x)). */
970 if (GET_CODE (op) == BSWAP)
972 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
973 return simplify_gen_unary (BSWAP, mode, x, mode);
975 break;
977 case NEG:
978 /* (neg (neg X)) == X. */
979 if (GET_CODE (op) == NEG)
980 return XEXP (op, 0);
982 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
983 If comparison is not reversible use
984 x ? y : (neg y). */
985 if (GET_CODE (op) == IF_THEN_ELSE)
987 rtx cond = XEXP (op, 0);
988 rtx true_rtx = XEXP (op, 1);
989 rtx false_rtx = XEXP (op, 2);
991 if ((GET_CODE (true_rtx) == NEG
992 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
993 || (GET_CODE (false_rtx) == NEG
994 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)))
996 if (reversed_comparison_code (cond, NULL_RTX) != UNKNOWN)
997 temp = reversed_comparison (cond, mode);
998 else
1000 temp = cond;
1001 std::swap (true_rtx, false_rtx);
1003 return simplify_gen_ternary (IF_THEN_ELSE, mode,
1004 mode, temp, true_rtx, false_rtx);
1008 /* (neg (plus X 1)) can become (not X). */
1009 if (GET_CODE (op) == PLUS
1010 && XEXP (op, 1) == const1_rtx)
1011 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1013 /* Similarly, (neg (not X)) is (plus X 1). */
1014 if (GET_CODE (op) == NOT)
1015 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
1016 CONST1_RTX (mode));
1018 /* (neg (minus X Y)) can become (minus Y X). This transformation
1019 isn't safe for modes with signed zeros, since if X and Y are
1020 both +0, (minus Y X) is the same as (minus X Y). If the
1021 rounding mode is towards +infinity (or -infinity) then the two
1022 expressions will be rounded differently. */
1023 if (GET_CODE (op) == MINUS
1024 && !HONOR_SIGNED_ZEROS (mode)
1025 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1026 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
1028 if (GET_CODE (op) == PLUS
1029 && !HONOR_SIGNED_ZEROS (mode)
1030 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1032 /* (neg (plus A C)) is simplified to (minus -C A). */
1033 if (CONST_SCALAR_INT_P (XEXP (op, 1))
1034 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
1036 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1037 if (temp)
1038 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1041 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1042 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1043 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1046 /* (neg (mult A B)) becomes (mult A (neg B)).
1047 This works even for floating-point values. */
1048 if (GET_CODE (op) == MULT
1049 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1051 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1052 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1055 /* NEG commutes with ASHIFT since it is multiplication. Only do
1056 this if we can then eliminate the NEG (e.g., if the operand
1057 is a constant). */
1058 if (GET_CODE (op) == ASHIFT)
1060 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1061 if (temp)
1062 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1065 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1066 C is equal to the width of MODE minus 1. */
1067 if (GET_CODE (op) == ASHIFTRT
1068 && CONST_INT_P (XEXP (op, 1))
1069 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1070 return simplify_gen_binary (LSHIFTRT, mode,
1071 XEXP (op, 0), XEXP (op, 1));
1073 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1074 C is equal to the width of MODE minus 1. */
1075 if (GET_CODE (op) == LSHIFTRT
1076 && CONST_INT_P (XEXP (op, 1))
1077 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1078 return simplify_gen_binary (ASHIFTRT, mode,
1079 XEXP (op, 0), XEXP (op, 1));
1081 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1082 if (GET_CODE (op) == XOR
1083 && XEXP (op, 1) == const1_rtx
1084 && nonzero_bits (XEXP (op, 0), mode) == 1)
1085 return plus_constant (mode, XEXP (op, 0), -1);
1087 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1088 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1089 if (GET_CODE (op) == LT
1090 && XEXP (op, 1) == const0_rtx
1091 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1093 machine_mode inner = GET_MODE (XEXP (op, 0));
1094 int isize = GET_MODE_PRECISION (inner);
1095 if (STORE_FLAG_VALUE == 1)
1097 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1098 GEN_INT (isize - 1));
1099 if (mode == inner)
1100 return temp;
1101 if (GET_MODE_PRECISION (mode) > isize)
1102 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1103 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1105 else if (STORE_FLAG_VALUE == -1)
1107 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1108 GEN_INT (isize - 1));
1109 if (mode == inner)
1110 return temp;
1111 if (GET_MODE_PRECISION (mode) > isize)
1112 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1113 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1116 break;
1118 case TRUNCATE:
1119 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1120 with the umulXi3_highpart patterns. */
1121 if (GET_CODE (op) == LSHIFTRT
1122 && GET_CODE (XEXP (op, 0)) == MULT)
1123 break;
1125 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1127 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1129 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1130 if (temp)
1131 return temp;
1133 /* We can't handle truncation to a partial integer mode here
1134 because we don't know the real bitsize of the partial
1135 integer mode. */
1136 break;
1139 if (GET_MODE (op) != VOIDmode)
1141 temp = simplify_truncation (mode, op, GET_MODE (op));
1142 if (temp)
1143 return temp;
1146 /* If we know that the value is already truncated, we can
1147 replace the TRUNCATE with a SUBREG. */
1148 if (GET_MODE_NUNITS (mode) == 1
1149 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1150 || truncated_to_mode (mode, op)))
1152 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1153 if (temp)
1154 return temp;
1157 /* A truncate of a comparison can be replaced with a subreg if
1158 STORE_FLAG_VALUE permits. This is like the previous test,
1159 but it works even if the comparison is done in a mode larger
1160 than HOST_BITS_PER_WIDE_INT. */
1161 if (HWI_COMPUTABLE_MODE_P (mode)
1162 && COMPARISON_P (op)
1163 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1165 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1166 if (temp)
1167 return temp;
1170 /* A truncate of a memory is just loading the low part of the memory
1171 if we are not changing the meaning of the address. */
1172 if (GET_CODE (op) == MEM
1173 && !VECTOR_MODE_P (mode)
1174 && !MEM_VOLATILE_P (op)
1175 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1177 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1178 if (temp)
1179 return temp;
1182 break;
1184 case FLOAT_TRUNCATE:
1185 if (DECIMAL_FLOAT_MODE_P (mode))
1186 break;
1188 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1189 if (GET_CODE (op) == FLOAT_EXTEND
1190 && GET_MODE (XEXP (op, 0)) == mode)
1191 return XEXP (op, 0);
1193 /* (float_truncate:SF (float_truncate:DF foo:XF))
1194 = (float_truncate:SF foo:XF).
1195 This may eliminate double rounding, so it is unsafe.
1197 (float_truncate:SF (float_extend:XF foo:DF))
1198 = (float_truncate:SF foo:DF).
1200 (float_truncate:DF (float_extend:XF foo:SF))
1201 = (float_extend:DF foo:SF). */
1202 if ((GET_CODE (op) == FLOAT_TRUNCATE
1203 && flag_unsafe_math_optimizations)
1204 || GET_CODE (op) == FLOAT_EXTEND)
1205 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1206 0)))
1207 > GET_MODE_SIZE (mode)
1208 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1209 mode,
1210 XEXP (op, 0), mode);
1212 /* (float_truncate (float x)) is (float x) */
1213 if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1214 && (flag_unsafe_math_optimizations
1215 || exact_int_to_float_conversion_p (op)))
1216 return simplify_gen_unary (GET_CODE (op), mode,
1217 XEXP (op, 0),
1218 GET_MODE (XEXP (op, 0)));
1220 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1221 (OP:SF foo:SF) if OP is NEG or ABS. */
1222 if ((GET_CODE (op) == ABS
1223 || GET_CODE (op) == NEG)
1224 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1225 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1226 return simplify_gen_unary (GET_CODE (op), mode,
1227 XEXP (XEXP (op, 0), 0), mode);
1229 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1230 is (float_truncate:SF x). */
1231 if (GET_CODE (op) == SUBREG
1232 && subreg_lowpart_p (op)
1233 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1234 return SUBREG_REG (op);
1235 break;
1237 case FLOAT_EXTEND:
1238 if (DECIMAL_FLOAT_MODE_P (mode))
1239 break;
1241 /* (float_extend (float_extend x)) is (float_extend x)
1243 (float_extend (float x)) is (float x) assuming that double
1244 rounding can't happen.
1246 if (GET_CODE (op) == FLOAT_EXTEND
1247 || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1248 && exact_int_to_float_conversion_p (op)))
1249 return simplify_gen_unary (GET_CODE (op), mode,
1250 XEXP (op, 0),
1251 GET_MODE (XEXP (op, 0)));
1253 break;
1255 case ABS:
1256 /* (abs (neg <foo>)) -> (abs <foo>) */
1257 if (GET_CODE (op) == NEG)
1258 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1259 GET_MODE (XEXP (op, 0)));
1261 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1262 do nothing. */
1263 if (GET_MODE (op) == VOIDmode)
1264 break;
1266 /* If operand is something known to be positive, ignore the ABS. */
1267 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1268 || val_signbit_known_clear_p (GET_MODE (op),
1269 nonzero_bits (op, GET_MODE (op))))
1270 return op;
1272 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1273 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1274 return gen_rtx_NEG (mode, op);
1276 break;
1278 case FFS:
1279 /* (ffs (*_extend <X>)) = (ffs <X>) */
1280 if (GET_CODE (op) == SIGN_EXTEND
1281 || GET_CODE (op) == ZERO_EXTEND)
1282 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1283 GET_MODE (XEXP (op, 0)));
1284 break;
1286 case POPCOUNT:
1287 switch (GET_CODE (op))
1289 case BSWAP:
1290 case ZERO_EXTEND:
1291 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1292 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1293 GET_MODE (XEXP (op, 0)));
1295 case ROTATE:
1296 case ROTATERT:
1297 /* Rotations don't affect popcount. */
1298 if (!side_effects_p (XEXP (op, 1)))
1299 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1300 GET_MODE (XEXP (op, 0)));
1301 break;
1303 default:
1304 break;
1306 break;
1308 case PARITY:
1309 switch (GET_CODE (op))
1311 case NOT:
1312 case BSWAP:
1313 case ZERO_EXTEND:
1314 case SIGN_EXTEND:
1315 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1316 GET_MODE (XEXP (op, 0)));
1318 case ROTATE:
1319 case ROTATERT:
1320 /* Rotations don't affect parity. */
1321 if (!side_effects_p (XEXP (op, 1)))
1322 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1323 GET_MODE (XEXP (op, 0)));
1324 break;
1326 default:
1327 break;
1329 break;
1331 case BSWAP:
1332 /* (bswap (bswap x)) -> x. */
1333 if (GET_CODE (op) == BSWAP)
1334 return XEXP (op, 0);
1335 break;
1337 case FLOAT:
1338 /* (float (sign_extend <X>)) = (float <X>). */
1339 if (GET_CODE (op) == SIGN_EXTEND)
1340 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1341 GET_MODE (XEXP (op, 0)));
1342 break;
1344 case SIGN_EXTEND:
1345 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1346 becomes just the MINUS if its mode is MODE. This allows
1347 folding switch statements on machines using casesi (such as
1348 the VAX). */
1349 if (GET_CODE (op) == TRUNCATE
1350 && GET_MODE (XEXP (op, 0)) == mode
1351 && GET_CODE (XEXP (op, 0)) == MINUS
1352 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1353 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1354 return XEXP (op, 0);
1356 /* Extending a widening multiplication should be canonicalized to
1357 a wider widening multiplication. */
1358 if (GET_CODE (op) == MULT)
1360 rtx lhs = XEXP (op, 0);
1361 rtx rhs = XEXP (op, 1);
1362 enum rtx_code lcode = GET_CODE (lhs);
1363 enum rtx_code rcode = GET_CODE (rhs);
1365 /* Widening multiplies usually extend both operands, but sometimes
1366 they use a shift to extract a portion of a register. */
1367 if ((lcode == SIGN_EXTEND
1368 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1369 && (rcode == SIGN_EXTEND
1370 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1372 machine_mode lmode = GET_MODE (lhs);
1373 machine_mode rmode = GET_MODE (rhs);
1374 int bits;
1376 if (lcode == ASHIFTRT)
1377 /* Number of bits not shifted off the end. */
1378 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1379 else /* lcode == SIGN_EXTEND */
1380 /* Size of inner mode. */
1381 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1383 if (rcode == ASHIFTRT)
1384 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1385 else /* rcode == SIGN_EXTEND */
1386 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1388 /* We can only widen multiplies if the result is mathematiclly
1389 equivalent. I.e. if overflow was impossible. */
1390 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1391 return simplify_gen_binary
1392 (MULT, mode,
1393 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1394 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1398 /* Check for a sign extension of a subreg of a promoted
1399 variable, where the promotion is sign-extended, and the
1400 target mode is the same as the variable's promotion. */
1401 if (GET_CODE (op) == SUBREG
1402 && SUBREG_PROMOTED_VAR_P (op)
1403 && SUBREG_PROMOTED_SIGNED_P (op)
1404 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1406 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1407 if (temp)
1408 return temp;
1411 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1412 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1413 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1415 gcc_assert (GET_MODE_PRECISION (mode)
1416 > GET_MODE_PRECISION (GET_MODE (op)));
1417 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1418 GET_MODE (XEXP (op, 0)));
1421 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1422 is (sign_extend:M (subreg:O <X>)) if there is mode with
1423 GET_MODE_BITSIZE (N) - I bits.
1424 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1425 is similarly (zero_extend:M (subreg:O <X>)). */
1426 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1427 && GET_CODE (XEXP (op, 0)) == ASHIFT
1428 && CONST_INT_P (XEXP (op, 1))
1429 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1430 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1432 machine_mode tmode
1433 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1434 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1435 gcc_assert (GET_MODE_BITSIZE (mode)
1436 > GET_MODE_BITSIZE (GET_MODE (op)));
1437 if (tmode != BLKmode)
1439 rtx inner =
1440 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1441 if (inner)
1442 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1443 ? SIGN_EXTEND : ZERO_EXTEND,
1444 mode, inner, tmode);
1448 #if defined(POINTERS_EXTEND_UNSIGNED)
1449 /* As we do not know which address space the pointer is referring to,
1450 we can do this only if the target does not support different pointer
1451 or address modes depending on the address space. */
1452 if (target_default_pointer_address_modes_p ()
1453 && ! POINTERS_EXTEND_UNSIGNED
1454 && mode == Pmode && GET_MODE (op) == ptr_mode
1455 && (CONSTANT_P (op)
1456 || (GET_CODE (op) == SUBREG
1457 && REG_P (SUBREG_REG (op))
1458 && REG_POINTER (SUBREG_REG (op))
1459 && GET_MODE (SUBREG_REG (op)) == Pmode))
1460 && !targetm.have_ptr_extend ())
1461 return convert_memory_address (Pmode, op);
1462 #endif
1463 break;
1465 case ZERO_EXTEND:
1466 /* Check for a zero extension of a subreg of a promoted
1467 variable, where the promotion is zero-extended, and the
1468 target mode is the same as the variable's promotion. */
1469 if (GET_CODE (op) == SUBREG
1470 && SUBREG_PROMOTED_VAR_P (op)
1471 && SUBREG_PROMOTED_UNSIGNED_P (op)
1472 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1474 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1475 if (temp)
1476 return temp;
1479 /* Extending a widening multiplication should be canonicalized to
1480 a wider widening multiplication. */
1481 if (GET_CODE (op) == MULT)
1483 rtx lhs = XEXP (op, 0);
1484 rtx rhs = XEXP (op, 1);
1485 enum rtx_code lcode = GET_CODE (lhs);
1486 enum rtx_code rcode = GET_CODE (rhs);
1488 /* Widening multiplies usually extend both operands, but sometimes
1489 they use a shift to extract a portion of a register. */
1490 if ((lcode == ZERO_EXTEND
1491 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1492 && (rcode == ZERO_EXTEND
1493 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1495 machine_mode lmode = GET_MODE (lhs);
1496 machine_mode rmode = GET_MODE (rhs);
1497 int bits;
1499 if (lcode == LSHIFTRT)
1500 /* Number of bits not shifted off the end. */
1501 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1502 else /* lcode == ZERO_EXTEND */
1503 /* Size of inner mode. */
1504 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1506 if (rcode == LSHIFTRT)
1507 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1508 else /* rcode == ZERO_EXTEND */
1509 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1511 /* We can only widen multiplies if the result is mathematiclly
1512 equivalent. I.e. if overflow was impossible. */
1513 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1514 return simplify_gen_binary
1515 (MULT, mode,
1516 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1517 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1521 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1522 if (GET_CODE (op) == ZERO_EXTEND)
1523 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1524 GET_MODE (XEXP (op, 0)));
1526 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1527 is (zero_extend:M (subreg:O <X>)) if there is mode with
1528 GET_MODE_PRECISION (N) - I bits. */
1529 if (GET_CODE (op) == LSHIFTRT
1530 && GET_CODE (XEXP (op, 0)) == ASHIFT
1531 && CONST_INT_P (XEXP (op, 1))
1532 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1533 && GET_MODE_PRECISION (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1535 machine_mode tmode
1536 = mode_for_size (GET_MODE_PRECISION (GET_MODE (op))
1537 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1538 if (tmode != BLKmode)
1540 rtx inner =
1541 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1542 if (inner)
1543 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1547 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1548 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1549 of mode N. E.g.
1550 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1551 (and:SI (reg:SI) (const_int 63)). */
1552 if (GET_CODE (op) == SUBREG
1553 && GET_MODE_PRECISION (GET_MODE (op))
1554 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1555 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1556 <= HOST_BITS_PER_WIDE_INT
1557 && GET_MODE_PRECISION (mode)
1558 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1559 && subreg_lowpart_p (op)
1560 && (nonzero_bits (SUBREG_REG (op), GET_MODE (SUBREG_REG (op)))
1561 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1563 if (GET_MODE_PRECISION (mode)
1564 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
1565 return SUBREG_REG (op);
1566 return simplify_gen_unary (ZERO_EXTEND, mode, SUBREG_REG (op),
1567 GET_MODE (SUBREG_REG (op)));
1570 #if defined(POINTERS_EXTEND_UNSIGNED)
1571 /* As we do not know which address space the pointer is referring to,
1572 we can do this only if the target does not support different pointer
1573 or address modes depending on the address space. */
1574 if (target_default_pointer_address_modes_p ()
1575 && POINTERS_EXTEND_UNSIGNED > 0
1576 && mode == Pmode && GET_MODE (op) == ptr_mode
1577 && (CONSTANT_P (op)
1578 || (GET_CODE (op) == SUBREG
1579 && REG_P (SUBREG_REG (op))
1580 && REG_POINTER (SUBREG_REG (op))
1581 && GET_MODE (SUBREG_REG (op)) == Pmode))
1582 && !targetm.have_ptr_extend ())
1583 return convert_memory_address (Pmode, op);
1584 #endif
1585 break;
1587 default:
1588 break;
1591 return 0;
1594 /* Try to compute the value of a unary operation CODE whose output mode is to
1595 be MODE with input operand OP whose mode was originally OP_MODE.
1596 Return zero if the value cannot be computed. */
1598 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1599 rtx op, machine_mode op_mode)
1601 unsigned int width = GET_MODE_PRECISION (mode);
1603 if (code == VEC_DUPLICATE)
1605 gcc_assert (VECTOR_MODE_P (mode));
1606 if (GET_MODE (op) != VOIDmode)
1608 if (!VECTOR_MODE_P (GET_MODE (op)))
1609 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1610 else
1611 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1612 (GET_MODE (op)));
1614 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1615 || GET_CODE (op) == CONST_VECTOR)
1617 int elt_size = GET_MODE_UNIT_SIZE (mode);
1618 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1619 rtvec v = rtvec_alloc (n_elts);
1620 unsigned int i;
1622 if (GET_CODE (op) != CONST_VECTOR)
1623 for (i = 0; i < n_elts; i++)
1624 RTVEC_ELT (v, i) = op;
1625 else
1627 machine_mode inmode = GET_MODE (op);
1628 int in_elt_size = GET_MODE_UNIT_SIZE (inmode);
1629 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1631 gcc_assert (in_n_elts < n_elts);
1632 gcc_assert ((n_elts % in_n_elts) == 0);
1633 for (i = 0; i < n_elts; i++)
1634 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1636 return gen_rtx_CONST_VECTOR (mode, v);
1640 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1642 int elt_size = GET_MODE_UNIT_SIZE (mode);
1643 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1644 machine_mode opmode = GET_MODE (op);
1645 int op_elt_size = GET_MODE_UNIT_SIZE (opmode);
1646 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1647 rtvec v = rtvec_alloc (n_elts);
1648 unsigned int i;
1650 gcc_assert (op_n_elts == n_elts);
1651 for (i = 0; i < n_elts; i++)
1653 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1654 CONST_VECTOR_ELT (op, i),
1655 GET_MODE_INNER (opmode));
1656 if (!x)
1657 return 0;
1658 RTVEC_ELT (v, i) = x;
1660 return gen_rtx_CONST_VECTOR (mode, v);
1663 /* The order of these tests is critical so that, for example, we don't
1664 check the wrong mode (input vs. output) for a conversion operation,
1665 such as FIX. At some point, this should be simplified. */
1667 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1669 REAL_VALUE_TYPE d;
1671 if (op_mode == VOIDmode)
1673 /* CONST_INT have VOIDmode as the mode. We assume that all
1674 the bits of the constant are significant, though, this is
1675 a dangerous assumption as many times CONST_INTs are
1676 created and used with garbage in the bits outside of the
1677 precision of the implied mode of the const_int. */
1678 op_mode = MAX_MODE_INT;
1681 real_from_integer (&d, mode, std::make_pair (op, op_mode), SIGNED);
1682 d = real_value_truncate (mode, d);
1683 return const_double_from_real_value (d, mode);
1685 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1687 REAL_VALUE_TYPE d;
1689 if (op_mode == VOIDmode)
1691 /* CONST_INT have VOIDmode as the mode. We assume that all
1692 the bits of the constant are significant, though, this is
1693 a dangerous assumption as many times CONST_INTs are
1694 created and used with garbage in the bits outside of the
1695 precision of the implied mode of the const_int. */
1696 op_mode = MAX_MODE_INT;
1699 real_from_integer (&d, mode, std::make_pair (op, op_mode), UNSIGNED);
1700 d = real_value_truncate (mode, d);
1701 return const_double_from_real_value (d, mode);
1704 if (CONST_SCALAR_INT_P (op) && width > 0)
1706 wide_int result;
1707 machine_mode imode = op_mode == VOIDmode ? mode : op_mode;
1708 rtx_mode_t op0 = std::make_pair (op, imode);
1709 int int_value;
1711 #if TARGET_SUPPORTS_WIDE_INT == 0
1712 /* This assert keeps the simplification from producing a result
1713 that cannot be represented in a CONST_DOUBLE but a lot of
1714 upstream callers expect that this function never fails to
1715 simplify something and so you if you added this to the test
1716 above the code would die later anyway. If this assert
1717 happens, you just need to make the port support wide int. */
1718 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1719 #endif
1721 switch (code)
1723 case NOT:
1724 result = wi::bit_not (op0);
1725 break;
1727 case NEG:
1728 result = wi::neg (op0);
1729 break;
1731 case ABS:
1732 result = wi::abs (op0);
1733 break;
1735 case FFS:
1736 result = wi::shwi (wi::ffs (op0), mode);
1737 break;
1739 case CLZ:
1740 if (wi::ne_p (op0, 0))
1741 int_value = wi::clz (op0);
1742 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1743 int_value = GET_MODE_PRECISION (mode);
1744 result = wi::shwi (int_value, mode);
1745 break;
1747 case CLRSB:
1748 result = wi::shwi (wi::clrsb (op0), mode);
1749 break;
1751 case CTZ:
1752 if (wi::ne_p (op0, 0))
1753 int_value = wi::ctz (op0);
1754 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1755 int_value = GET_MODE_PRECISION (mode);
1756 result = wi::shwi (int_value, mode);
1757 break;
1759 case POPCOUNT:
1760 result = wi::shwi (wi::popcount (op0), mode);
1761 break;
1763 case PARITY:
1764 result = wi::shwi (wi::parity (op0), mode);
1765 break;
1767 case BSWAP:
1768 result = wide_int (op0).bswap ();
1769 break;
1771 case TRUNCATE:
1772 case ZERO_EXTEND:
1773 result = wide_int::from (op0, width, UNSIGNED);
1774 break;
1776 case SIGN_EXTEND:
1777 result = wide_int::from (op0, width, SIGNED);
1778 break;
1780 case SQRT:
1781 default:
1782 return 0;
1785 return immed_wide_int_const (result, mode);
1788 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1789 && SCALAR_FLOAT_MODE_P (mode)
1790 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1792 REAL_VALUE_TYPE d = *CONST_DOUBLE_REAL_VALUE (op);
1793 switch (code)
1795 case SQRT:
1796 return 0;
1797 case ABS:
1798 d = real_value_abs (&d);
1799 break;
1800 case NEG:
1801 d = real_value_negate (&d);
1802 break;
1803 case FLOAT_TRUNCATE:
1804 d = real_value_truncate (mode, d);
1805 break;
1806 case FLOAT_EXTEND:
1807 /* All this does is change the mode, unless changing
1808 mode class. */
1809 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1810 real_convert (&d, mode, &d);
1811 break;
1812 case FIX:
1813 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1814 break;
1815 case NOT:
1817 long tmp[4];
1818 int i;
1820 real_to_target (tmp, &d, GET_MODE (op));
1821 for (i = 0; i < 4; i++)
1822 tmp[i] = ~tmp[i];
1823 real_from_target (&d, tmp, mode);
1824 break;
1826 default:
1827 gcc_unreachable ();
1829 return const_double_from_real_value (d, mode);
1831 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1832 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1833 && GET_MODE_CLASS (mode) == MODE_INT
1834 && width > 0)
1836 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1837 operators are intentionally left unspecified (to ease implementation
1838 by target backends), for consistency, this routine implements the
1839 same semantics for constant folding as used by the middle-end. */
1841 /* This was formerly used only for non-IEEE float.
1842 eggert@twinsun.com says it is safe for IEEE also. */
1843 REAL_VALUE_TYPE t;
1844 const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (op);
1845 wide_int wmax, wmin;
1846 /* This is part of the abi to real_to_integer, but we check
1847 things before making this call. */
1848 bool fail;
1850 switch (code)
1852 case FIX:
1853 if (REAL_VALUE_ISNAN (*x))
1854 return const0_rtx;
1856 /* Test against the signed upper bound. */
1857 wmax = wi::max_value (width, SIGNED);
1858 real_from_integer (&t, VOIDmode, wmax, SIGNED);
1859 if (real_less (&t, x))
1860 return immed_wide_int_const (wmax, mode);
1862 /* Test against the signed lower bound. */
1863 wmin = wi::min_value (width, SIGNED);
1864 real_from_integer (&t, VOIDmode, wmin, SIGNED);
1865 if (real_less (x, &t))
1866 return immed_wide_int_const (wmin, mode);
1868 return immed_wide_int_const (real_to_integer (x, &fail, width),
1869 mode);
1871 case UNSIGNED_FIX:
1872 if (REAL_VALUE_ISNAN (*x) || REAL_VALUE_NEGATIVE (*x))
1873 return const0_rtx;
1875 /* Test against the unsigned upper bound. */
1876 wmax = wi::max_value (width, UNSIGNED);
1877 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
1878 if (real_less (&t, x))
1879 return immed_wide_int_const (wmax, mode);
1881 return immed_wide_int_const (real_to_integer (x, &fail, width),
1882 mode);
1884 default:
1885 gcc_unreachable ();
1889 return NULL_RTX;
1892 /* Subroutine of simplify_binary_operation to simplify a binary operation
1893 CODE that can commute with byte swapping, with result mode MODE and
1894 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
1895 Return zero if no simplification or canonicalization is possible. */
1897 static rtx
1898 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
1899 rtx op0, rtx op1)
1901 rtx tem;
1903 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
1904 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
1906 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
1907 simplify_gen_unary (BSWAP, mode, op1, mode));
1908 return simplify_gen_unary (BSWAP, mode, tem, mode);
1911 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
1912 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
1914 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
1915 return simplify_gen_unary (BSWAP, mode, tem, mode);
1918 return NULL_RTX;
1921 /* Subroutine of simplify_binary_operation to simplify a commutative,
1922 associative binary operation CODE with result mode MODE, operating
1923 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1924 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1925 canonicalization is possible. */
1927 static rtx
1928 simplify_associative_operation (enum rtx_code code, machine_mode mode,
1929 rtx op0, rtx op1)
1931 rtx tem;
1933 /* Linearize the operator to the left. */
1934 if (GET_CODE (op1) == code)
1936 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1937 if (GET_CODE (op0) == code)
1939 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1940 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1943 /* "a op (b op c)" becomes "(b op c) op a". */
1944 if (! swap_commutative_operands_p (op1, op0))
1945 return simplify_gen_binary (code, mode, op1, op0);
1947 std::swap (op0, op1);
1950 if (GET_CODE (op0) == code)
1952 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1953 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1955 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1956 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1959 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1960 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1961 if (tem != 0)
1962 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1964 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1965 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1966 if (tem != 0)
1967 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1970 return 0;
1974 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1975 and OP1. Return 0 if no simplification is possible.
1977 Don't use this for relational operations such as EQ or LT.
1978 Use simplify_relational_operation instead. */
1980 simplify_binary_operation (enum rtx_code code, machine_mode mode,
1981 rtx op0, rtx op1)
1983 rtx trueop0, trueop1;
1984 rtx tem;
1986 /* Relational operations don't work here. We must know the mode
1987 of the operands in order to do the comparison correctly.
1988 Assuming a full word can give incorrect results.
1989 Consider comparing 128 with -128 in QImode. */
1990 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1991 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1993 /* Make sure the constant is second. */
1994 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1995 && swap_commutative_operands_p (op0, op1))
1996 std::swap (op0, op1);
1998 trueop0 = avoid_constant_pool_reference (op0);
1999 trueop1 = avoid_constant_pool_reference (op1);
2001 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2002 if (tem)
2003 return tem;
2004 tem = simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2006 if (tem)
2007 return tem;
2009 /* If the above steps did not result in a simplification and op0 or op1
2010 were constant pool references, use the referenced constants directly. */
2011 if (trueop0 != op0 || trueop1 != op1)
2012 return simplify_gen_binary (code, mode, trueop0, trueop1);
2014 return NULL_RTX;
2017 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2018 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2019 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2020 actual constants. */
2022 static rtx
2023 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
2024 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2026 rtx tem, reversed, opleft, opright;
2027 HOST_WIDE_INT val;
2028 unsigned int width = GET_MODE_PRECISION (mode);
2030 /* Even if we can't compute a constant result,
2031 there are some cases worth simplifying. */
2033 switch (code)
2035 case PLUS:
2036 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2037 when x is NaN, infinite, or finite and nonzero. They aren't
2038 when x is -0 and the rounding mode is not towards -infinity,
2039 since (-0) + 0 is then 0. */
2040 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2041 return op0;
2043 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2044 transformations are safe even for IEEE. */
2045 if (GET_CODE (op0) == NEG)
2046 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2047 else if (GET_CODE (op1) == NEG)
2048 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2050 /* (~a) + 1 -> -a */
2051 if (INTEGRAL_MODE_P (mode)
2052 && GET_CODE (op0) == NOT
2053 && trueop1 == const1_rtx)
2054 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2056 /* Handle both-operands-constant cases. We can only add
2057 CONST_INTs to constants since the sum of relocatable symbols
2058 can't be handled by most assemblers. Don't add CONST_INT
2059 to CONST_INT since overflow won't be computed properly if wider
2060 than HOST_BITS_PER_WIDE_INT. */
2062 if ((GET_CODE (op0) == CONST
2063 || GET_CODE (op0) == SYMBOL_REF
2064 || GET_CODE (op0) == LABEL_REF)
2065 && CONST_INT_P (op1))
2066 return plus_constant (mode, op0, INTVAL (op1));
2067 else if ((GET_CODE (op1) == CONST
2068 || GET_CODE (op1) == SYMBOL_REF
2069 || GET_CODE (op1) == LABEL_REF)
2070 && CONST_INT_P (op0))
2071 return plus_constant (mode, op1, INTVAL (op0));
2073 /* See if this is something like X * C - X or vice versa or
2074 if the multiplication is written as a shift. If so, we can
2075 distribute and make a new multiply, shift, or maybe just
2076 have X (if C is 2 in the example above). But don't make
2077 something more expensive than we had before. */
2079 if (SCALAR_INT_MODE_P (mode))
2081 rtx lhs = op0, rhs = op1;
2083 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2084 wide_int coeff1 = wi::one (GET_MODE_PRECISION (mode));
2086 if (GET_CODE (lhs) == NEG)
2088 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2089 lhs = XEXP (lhs, 0);
2091 else if (GET_CODE (lhs) == MULT
2092 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2094 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2095 lhs = XEXP (lhs, 0);
2097 else if (GET_CODE (lhs) == ASHIFT
2098 && CONST_INT_P (XEXP (lhs, 1))
2099 && INTVAL (XEXP (lhs, 1)) >= 0
2100 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2102 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2103 GET_MODE_PRECISION (mode));
2104 lhs = XEXP (lhs, 0);
2107 if (GET_CODE (rhs) == NEG)
2109 coeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2110 rhs = XEXP (rhs, 0);
2112 else if (GET_CODE (rhs) == MULT
2113 && CONST_INT_P (XEXP (rhs, 1)))
2115 coeff1 = std::make_pair (XEXP (rhs, 1), mode);
2116 rhs = XEXP (rhs, 0);
2118 else if (GET_CODE (rhs) == ASHIFT
2119 && CONST_INT_P (XEXP (rhs, 1))
2120 && INTVAL (XEXP (rhs, 1)) >= 0
2121 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2123 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2124 GET_MODE_PRECISION (mode));
2125 rhs = XEXP (rhs, 0);
2128 if (rtx_equal_p (lhs, rhs))
2130 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2131 rtx coeff;
2132 bool speed = optimize_function_for_speed_p (cfun);
2134 coeff = immed_wide_int_const (coeff0 + coeff1, mode);
2136 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2137 return (set_src_cost (tem, mode, speed)
2138 <= set_src_cost (orig, mode, speed) ? tem : 0);
2142 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2143 if (CONST_SCALAR_INT_P (op1)
2144 && GET_CODE (op0) == XOR
2145 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2146 && mode_signbit_p (mode, op1))
2147 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2148 simplify_gen_binary (XOR, mode, op1,
2149 XEXP (op0, 1)));
2151 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2152 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2153 && GET_CODE (op0) == MULT
2154 && GET_CODE (XEXP (op0, 0)) == NEG)
2156 rtx in1, in2;
2158 in1 = XEXP (XEXP (op0, 0), 0);
2159 in2 = XEXP (op0, 1);
2160 return simplify_gen_binary (MINUS, mode, op1,
2161 simplify_gen_binary (MULT, mode,
2162 in1, in2));
2165 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2166 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2167 is 1. */
2168 if (COMPARISON_P (op0)
2169 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2170 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2171 && (reversed = reversed_comparison (op0, mode)))
2172 return
2173 simplify_gen_unary (NEG, mode, reversed, mode);
2175 /* If one of the operands is a PLUS or a MINUS, see if we can
2176 simplify this by the associative law.
2177 Don't use the associative law for floating point.
2178 The inaccuracy makes it nonassociative,
2179 and subtle programs can break if operations are associated. */
2181 if (INTEGRAL_MODE_P (mode)
2182 && (plus_minus_operand_p (op0)
2183 || plus_minus_operand_p (op1))
2184 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2185 return tem;
2187 /* Reassociate floating point addition only when the user
2188 specifies associative math operations. */
2189 if (FLOAT_MODE_P (mode)
2190 && flag_associative_math)
2192 tem = simplify_associative_operation (code, mode, op0, op1);
2193 if (tem)
2194 return tem;
2196 break;
2198 case COMPARE:
2199 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2200 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2201 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2202 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2204 rtx xop00 = XEXP (op0, 0);
2205 rtx xop10 = XEXP (op1, 0);
2207 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2208 return xop00;
2210 if (REG_P (xop00) && REG_P (xop10)
2211 && GET_MODE (xop00) == GET_MODE (xop10)
2212 && REGNO (xop00) == REGNO (xop10)
2213 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2214 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2215 return xop00;
2217 break;
2219 case MINUS:
2220 /* We can't assume x-x is 0 even with non-IEEE floating point,
2221 but since it is zero except in very strange circumstances, we
2222 will treat it as zero with -ffinite-math-only. */
2223 if (rtx_equal_p (trueop0, trueop1)
2224 && ! side_effects_p (op0)
2225 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2226 return CONST0_RTX (mode);
2228 /* Change subtraction from zero into negation. (0 - x) is the
2229 same as -x when x is NaN, infinite, or finite and nonzero.
2230 But if the mode has signed zeros, and does not round towards
2231 -infinity, then 0 - 0 is 0, not -0. */
2232 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2233 return simplify_gen_unary (NEG, mode, op1, mode);
2235 /* (-1 - a) is ~a. */
2236 if (trueop0 == constm1_rtx)
2237 return simplify_gen_unary (NOT, mode, op1, mode);
2239 /* Subtracting 0 has no effect unless the mode has signed zeros
2240 and supports rounding towards -infinity. In such a case,
2241 0 - 0 is -0. */
2242 if (!(HONOR_SIGNED_ZEROS (mode)
2243 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2244 && trueop1 == CONST0_RTX (mode))
2245 return op0;
2247 /* See if this is something like X * C - X or vice versa or
2248 if the multiplication is written as a shift. If so, we can
2249 distribute and make a new multiply, shift, or maybe just
2250 have X (if C is 2 in the example above). But don't make
2251 something more expensive than we had before. */
2253 if (SCALAR_INT_MODE_P (mode))
2255 rtx lhs = op0, rhs = op1;
2257 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2258 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2260 if (GET_CODE (lhs) == NEG)
2262 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2263 lhs = XEXP (lhs, 0);
2265 else if (GET_CODE (lhs) == MULT
2266 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2268 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2269 lhs = XEXP (lhs, 0);
2271 else if (GET_CODE (lhs) == ASHIFT
2272 && CONST_INT_P (XEXP (lhs, 1))
2273 && INTVAL (XEXP (lhs, 1)) >= 0
2274 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2276 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2277 GET_MODE_PRECISION (mode));
2278 lhs = XEXP (lhs, 0);
2281 if (GET_CODE (rhs) == NEG)
2283 negcoeff1 = wi::one (GET_MODE_PRECISION (mode));
2284 rhs = XEXP (rhs, 0);
2286 else if (GET_CODE (rhs) == MULT
2287 && CONST_INT_P (XEXP (rhs, 1)))
2289 negcoeff1 = wi::neg (std::make_pair (XEXP (rhs, 1), mode));
2290 rhs = XEXP (rhs, 0);
2292 else if (GET_CODE (rhs) == ASHIFT
2293 && CONST_INT_P (XEXP (rhs, 1))
2294 && INTVAL (XEXP (rhs, 1)) >= 0
2295 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2297 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2298 GET_MODE_PRECISION (mode));
2299 negcoeff1 = -negcoeff1;
2300 rhs = XEXP (rhs, 0);
2303 if (rtx_equal_p (lhs, rhs))
2305 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2306 rtx coeff;
2307 bool speed = optimize_function_for_speed_p (cfun);
2309 coeff = immed_wide_int_const (coeff0 + negcoeff1, mode);
2311 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2312 return (set_src_cost (tem, mode, speed)
2313 <= set_src_cost (orig, mode, speed) ? tem : 0);
2317 /* (a - (-b)) -> (a + b). True even for IEEE. */
2318 if (GET_CODE (op1) == NEG)
2319 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2321 /* (-x - c) may be simplified as (-c - x). */
2322 if (GET_CODE (op0) == NEG
2323 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2325 tem = simplify_unary_operation (NEG, mode, op1, mode);
2326 if (tem)
2327 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2330 /* Don't let a relocatable value get a negative coeff. */
2331 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2332 return simplify_gen_binary (PLUS, mode,
2333 op0,
2334 neg_const_int (mode, op1));
2336 /* (x - (x & y)) -> (x & ~y) */
2337 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2339 if (rtx_equal_p (op0, XEXP (op1, 0)))
2341 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2342 GET_MODE (XEXP (op1, 1)));
2343 return simplify_gen_binary (AND, mode, op0, tem);
2345 if (rtx_equal_p (op0, XEXP (op1, 1)))
2347 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2348 GET_MODE (XEXP (op1, 0)));
2349 return simplify_gen_binary (AND, mode, op0, tem);
2353 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2354 by reversing the comparison code if valid. */
2355 if (STORE_FLAG_VALUE == 1
2356 && trueop0 == const1_rtx
2357 && COMPARISON_P (op1)
2358 && (reversed = reversed_comparison (op1, mode)))
2359 return reversed;
2361 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2362 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2363 && GET_CODE (op1) == MULT
2364 && GET_CODE (XEXP (op1, 0)) == NEG)
2366 rtx in1, in2;
2368 in1 = XEXP (XEXP (op1, 0), 0);
2369 in2 = XEXP (op1, 1);
2370 return simplify_gen_binary (PLUS, mode,
2371 simplify_gen_binary (MULT, mode,
2372 in1, in2),
2373 op0);
2376 /* Canonicalize (minus (neg A) (mult B C)) to
2377 (minus (mult (neg B) C) A). */
2378 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2379 && GET_CODE (op1) == MULT
2380 && GET_CODE (op0) == NEG)
2382 rtx in1, in2;
2384 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2385 in2 = XEXP (op1, 1);
2386 return simplify_gen_binary (MINUS, mode,
2387 simplify_gen_binary (MULT, mode,
2388 in1, in2),
2389 XEXP (op0, 0));
2392 /* If one of the operands is a PLUS or a MINUS, see if we can
2393 simplify this by the associative law. This will, for example,
2394 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2395 Don't use the associative law for floating point.
2396 The inaccuracy makes it nonassociative,
2397 and subtle programs can break if operations are associated. */
2399 if (INTEGRAL_MODE_P (mode)
2400 && (plus_minus_operand_p (op0)
2401 || plus_minus_operand_p (op1))
2402 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2403 return tem;
2404 break;
2406 case MULT:
2407 if (trueop1 == constm1_rtx)
2408 return simplify_gen_unary (NEG, mode, op0, mode);
2410 if (GET_CODE (op0) == NEG)
2412 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2413 /* If op1 is a MULT as well and simplify_unary_operation
2414 just moved the NEG to the second operand, simplify_gen_binary
2415 below could through simplify_associative_operation move
2416 the NEG around again and recurse endlessly. */
2417 if (temp
2418 && GET_CODE (op1) == MULT
2419 && GET_CODE (temp) == MULT
2420 && XEXP (op1, 0) == XEXP (temp, 0)
2421 && GET_CODE (XEXP (temp, 1)) == NEG
2422 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2423 temp = NULL_RTX;
2424 if (temp)
2425 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2427 if (GET_CODE (op1) == NEG)
2429 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2430 /* If op0 is a MULT as well and simplify_unary_operation
2431 just moved the NEG to the second operand, simplify_gen_binary
2432 below could through simplify_associative_operation move
2433 the NEG around again and recurse endlessly. */
2434 if (temp
2435 && GET_CODE (op0) == MULT
2436 && GET_CODE (temp) == MULT
2437 && XEXP (op0, 0) == XEXP (temp, 0)
2438 && GET_CODE (XEXP (temp, 1)) == NEG
2439 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2440 temp = NULL_RTX;
2441 if (temp)
2442 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2445 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2446 x is NaN, since x * 0 is then also NaN. Nor is it valid
2447 when the mode has signed zeros, since multiplying a negative
2448 number by 0 will give -0, not 0. */
2449 if (!HONOR_NANS (mode)
2450 && !HONOR_SIGNED_ZEROS (mode)
2451 && trueop1 == CONST0_RTX (mode)
2452 && ! side_effects_p (op0))
2453 return op1;
2455 /* In IEEE floating point, x*1 is not equivalent to x for
2456 signalling NaNs. */
2457 if (!HONOR_SNANS (mode)
2458 && trueop1 == CONST1_RTX (mode))
2459 return op0;
2461 /* Convert multiply by constant power of two into shift. */
2462 if (CONST_SCALAR_INT_P (trueop1))
2464 val = wi::exact_log2 (std::make_pair (trueop1, mode));
2465 if (val >= 0)
2466 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2469 /* x*2 is x+x and x*(-1) is -x */
2470 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2471 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2472 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2473 && GET_MODE (op0) == mode)
2475 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
2477 if (real_equal (d1, &dconst2))
2478 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2480 if (!HONOR_SNANS (mode)
2481 && real_equal (d1, &dconstm1))
2482 return simplify_gen_unary (NEG, mode, op0, mode);
2485 /* Optimize -x * -x as x * x. */
2486 if (FLOAT_MODE_P (mode)
2487 && GET_CODE (op0) == NEG
2488 && GET_CODE (op1) == NEG
2489 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2490 && !side_effects_p (XEXP (op0, 0)))
2491 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2493 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2494 if (SCALAR_FLOAT_MODE_P (mode)
2495 && GET_CODE (op0) == ABS
2496 && GET_CODE (op1) == ABS
2497 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2498 && !side_effects_p (XEXP (op0, 0)))
2499 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2501 /* Reassociate multiplication, but for floating point MULTs
2502 only when the user specifies unsafe math optimizations. */
2503 if (! FLOAT_MODE_P (mode)
2504 || flag_unsafe_math_optimizations)
2506 tem = simplify_associative_operation (code, mode, op0, op1);
2507 if (tem)
2508 return tem;
2510 break;
2512 case IOR:
2513 if (trueop1 == CONST0_RTX (mode))
2514 return op0;
2515 if (INTEGRAL_MODE_P (mode)
2516 && trueop1 == CONSTM1_RTX (mode)
2517 && !side_effects_p (op0))
2518 return op1;
2519 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2520 return op0;
2521 /* A | (~A) -> -1 */
2522 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2523 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2524 && ! side_effects_p (op0)
2525 && SCALAR_INT_MODE_P (mode))
2526 return constm1_rtx;
2528 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2529 if (CONST_INT_P (op1)
2530 && HWI_COMPUTABLE_MODE_P (mode)
2531 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2532 && !side_effects_p (op0))
2533 return op1;
2535 /* Canonicalize (X & C1) | C2. */
2536 if (GET_CODE (op0) == AND
2537 && CONST_INT_P (trueop1)
2538 && CONST_INT_P (XEXP (op0, 1)))
2540 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2541 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2542 HOST_WIDE_INT c2 = INTVAL (trueop1);
2544 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2545 if ((c1 & c2) == c1
2546 && !side_effects_p (XEXP (op0, 0)))
2547 return trueop1;
2549 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2550 if (((c1|c2) & mask) == mask)
2551 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2553 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2554 if (((c1 & ~c2) & mask) != (c1 & mask))
2556 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2557 gen_int_mode (c1 & ~c2, mode));
2558 return simplify_gen_binary (IOR, mode, tem, op1);
2562 /* Convert (A & B) | A to A. */
2563 if (GET_CODE (op0) == AND
2564 && (rtx_equal_p (XEXP (op0, 0), op1)
2565 || rtx_equal_p (XEXP (op0, 1), op1))
2566 && ! side_effects_p (XEXP (op0, 0))
2567 && ! side_effects_p (XEXP (op0, 1)))
2568 return op1;
2570 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2571 mode size to (rotate A CX). */
2573 if (GET_CODE (op1) == ASHIFT
2574 || GET_CODE (op1) == SUBREG)
2576 opleft = op1;
2577 opright = op0;
2579 else
2581 opright = op1;
2582 opleft = op0;
2585 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2586 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2587 && CONST_INT_P (XEXP (opleft, 1))
2588 && CONST_INT_P (XEXP (opright, 1))
2589 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2590 == GET_MODE_PRECISION (mode)))
2591 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2593 /* Same, but for ashift that has been "simplified" to a wider mode
2594 by simplify_shift_const. */
2596 if (GET_CODE (opleft) == SUBREG
2597 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2598 && GET_CODE (opright) == LSHIFTRT
2599 && GET_CODE (XEXP (opright, 0)) == SUBREG
2600 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2601 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2602 && (GET_MODE_SIZE (GET_MODE (opleft))
2603 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2604 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2605 SUBREG_REG (XEXP (opright, 0)))
2606 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2607 && CONST_INT_P (XEXP (opright, 1))
2608 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2609 == GET_MODE_PRECISION (mode)))
2610 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2611 XEXP (SUBREG_REG (opleft), 1));
2613 /* If we have (ior (and (X C1) C2)), simplify this by making
2614 C1 as small as possible if C1 actually changes. */
2615 if (CONST_INT_P (op1)
2616 && (HWI_COMPUTABLE_MODE_P (mode)
2617 || INTVAL (op1) > 0)
2618 && GET_CODE (op0) == AND
2619 && CONST_INT_P (XEXP (op0, 1))
2620 && CONST_INT_P (op1)
2621 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2623 rtx tmp = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2624 gen_int_mode (UINTVAL (XEXP (op0, 1))
2625 & ~UINTVAL (op1),
2626 mode));
2627 return simplify_gen_binary (IOR, mode, tmp, op1);
2630 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2631 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2632 the PLUS does not affect any of the bits in OP1: then we can do
2633 the IOR as a PLUS and we can associate. This is valid if OP1
2634 can be safely shifted left C bits. */
2635 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2636 && GET_CODE (XEXP (op0, 0)) == PLUS
2637 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2638 && CONST_INT_P (XEXP (op0, 1))
2639 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2641 int count = INTVAL (XEXP (op0, 1));
2642 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2644 if (mask >> count == INTVAL (trueop1)
2645 && trunc_int_for_mode (mask, mode) == mask
2646 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2647 return simplify_gen_binary (ASHIFTRT, mode,
2648 plus_constant (mode, XEXP (op0, 0),
2649 mask),
2650 XEXP (op0, 1));
2653 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2654 if (tem)
2655 return tem;
2657 tem = simplify_associative_operation (code, mode, op0, op1);
2658 if (tem)
2659 return tem;
2660 break;
2662 case XOR:
2663 if (trueop1 == CONST0_RTX (mode))
2664 return op0;
2665 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2666 return simplify_gen_unary (NOT, mode, op0, mode);
2667 if (rtx_equal_p (trueop0, trueop1)
2668 && ! side_effects_p (op0)
2669 && GET_MODE_CLASS (mode) != MODE_CC)
2670 return CONST0_RTX (mode);
2672 /* Canonicalize XOR of the most significant bit to PLUS. */
2673 if (CONST_SCALAR_INT_P (op1)
2674 && mode_signbit_p (mode, op1))
2675 return simplify_gen_binary (PLUS, mode, op0, op1);
2676 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2677 if (CONST_SCALAR_INT_P (op1)
2678 && GET_CODE (op0) == PLUS
2679 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2680 && mode_signbit_p (mode, XEXP (op0, 1)))
2681 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2682 simplify_gen_binary (XOR, mode, op1,
2683 XEXP (op0, 1)));
2685 /* If we are XORing two things that have no bits in common,
2686 convert them into an IOR. This helps to detect rotation encoded
2687 using those methods and possibly other simplifications. */
2689 if (HWI_COMPUTABLE_MODE_P (mode)
2690 && (nonzero_bits (op0, mode)
2691 & nonzero_bits (op1, mode)) == 0)
2692 return (simplify_gen_binary (IOR, mode, op0, op1));
2694 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2695 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2696 (NOT y). */
2698 int num_negated = 0;
2700 if (GET_CODE (op0) == NOT)
2701 num_negated++, op0 = XEXP (op0, 0);
2702 if (GET_CODE (op1) == NOT)
2703 num_negated++, op1 = XEXP (op1, 0);
2705 if (num_negated == 2)
2706 return simplify_gen_binary (XOR, mode, op0, op1);
2707 else if (num_negated == 1)
2708 return simplify_gen_unary (NOT, mode,
2709 simplify_gen_binary (XOR, mode, op0, op1),
2710 mode);
2713 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2714 correspond to a machine insn or result in further simplifications
2715 if B is a constant. */
2717 if (GET_CODE (op0) == AND
2718 && rtx_equal_p (XEXP (op0, 1), op1)
2719 && ! side_effects_p (op1))
2720 return simplify_gen_binary (AND, mode,
2721 simplify_gen_unary (NOT, mode,
2722 XEXP (op0, 0), mode),
2723 op1);
2725 else if (GET_CODE (op0) == AND
2726 && rtx_equal_p (XEXP (op0, 0), op1)
2727 && ! side_effects_p (op1))
2728 return simplify_gen_binary (AND, mode,
2729 simplify_gen_unary (NOT, mode,
2730 XEXP (op0, 1), mode),
2731 op1);
2733 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2734 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2735 out bits inverted twice and not set by C. Similarly, given
2736 (xor (and (xor A B) C) D), simplify without inverting C in
2737 the xor operand: (xor (and A C) (B&C)^D).
2739 else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
2740 && GET_CODE (XEXP (op0, 0)) == XOR
2741 && CONST_INT_P (op1)
2742 && CONST_INT_P (XEXP (op0, 1))
2743 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
2745 enum rtx_code op = GET_CODE (op0);
2746 rtx a = XEXP (XEXP (op0, 0), 0);
2747 rtx b = XEXP (XEXP (op0, 0), 1);
2748 rtx c = XEXP (op0, 1);
2749 rtx d = op1;
2750 HOST_WIDE_INT bval = INTVAL (b);
2751 HOST_WIDE_INT cval = INTVAL (c);
2752 HOST_WIDE_INT dval = INTVAL (d);
2753 HOST_WIDE_INT xcval;
2755 if (op == IOR)
2756 xcval = ~cval;
2757 else
2758 xcval = cval;
2760 return simplify_gen_binary (XOR, mode,
2761 simplify_gen_binary (op, mode, a, c),
2762 gen_int_mode ((bval & xcval) ^ dval,
2763 mode));
2766 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2767 we can transform like this:
2768 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2769 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2770 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2771 Attempt a few simplifications when B and C are both constants. */
2772 if (GET_CODE (op0) == AND
2773 && CONST_INT_P (op1)
2774 && CONST_INT_P (XEXP (op0, 1)))
2776 rtx a = XEXP (op0, 0);
2777 rtx b = XEXP (op0, 1);
2778 rtx c = op1;
2779 HOST_WIDE_INT bval = INTVAL (b);
2780 HOST_WIDE_INT cval = INTVAL (c);
2782 /* Instead of computing ~A&C, we compute its negated value,
2783 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2784 optimize for sure. If it does not simplify, we still try
2785 to compute ~A&C below, but since that always allocates
2786 RTL, we don't try that before committing to returning a
2787 simplified expression. */
2788 rtx n_na_c = simplify_binary_operation (IOR, mode, a,
2789 GEN_INT (~cval));
2791 if ((~cval & bval) == 0)
2793 rtx na_c = NULL_RTX;
2794 if (n_na_c)
2795 na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
2796 else
2798 /* If ~A does not simplify, don't bother: we don't
2799 want to simplify 2 operations into 3, and if na_c
2800 were to simplify with na, n_na_c would have
2801 simplified as well. */
2802 rtx na = simplify_unary_operation (NOT, mode, a, mode);
2803 if (na)
2804 na_c = simplify_gen_binary (AND, mode, na, c);
2807 /* Try to simplify ~A&C | ~B&C. */
2808 if (na_c != NULL_RTX)
2809 return simplify_gen_binary (IOR, mode, na_c,
2810 gen_int_mode (~bval & cval, mode));
2812 else
2814 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2815 if (n_na_c == CONSTM1_RTX (mode))
2817 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2818 gen_int_mode (~cval & bval,
2819 mode));
2820 return simplify_gen_binary (IOR, mode, a_nc_b,
2821 gen_int_mode (~bval & cval,
2822 mode));
2827 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2828 comparison if STORE_FLAG_VALUE is 1. */
2829 if (STORE_FLAG_VALUE == 1
2830 && trueop1 == const1_rtx
2831 && COMPARISON_P (op0)
2832 && (reversed = reversed_comparison (op0, mode)))
2833 return reversed;
2835 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2836 is (lt foo (const_int 0)), so we can perform the above
2837 simplification if STORE_FLAG_VALUE is 1. */
2839 if (STORE_FLAG_VALUE == 1
2840 && trueop1 == const1_rtx
2841 && GET_CODE (op0) == LSHIFTRT
2842 && CONST_INT_P (XEXP (op0, 1))
2843 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2844 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2846 /* (xor (comparison foo bar) (const_int sign-bit))
2847 when STORE_FLAG_VALUE is the sign bit. */
2848 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2849 && trueop1 == const_true_rtx
2850 && COMPARISON_P (op0)
2851 && (reversed = reversed_comparison (op0, mode)))
2852 return reversed;
2854 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2855 if (tem)
2856 return tem;
2858 tem = simplify_associative_operation (code, mode, op0, op1);
2859 if (tem)
2860 return tem;
2861 break;
2863 case AND:
2864 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2865 return trueop1;
2866 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2867 return op0;
2868 if (HWI_COMPUTABLE_MODE_P (mode))
2870 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2871 HOST_WIDE_INT nzop1;
2872 if (CONST_INT_P (trueop1))
2874 HOST_WIDE_INT val1 = INTVAL (trueop1);
2875 /* If we are turning off bits already known off in OP0, we need
2876 not do an AND. */
2877 if ((nzop0 & ~val1) == 0)
2878 return op0;
2880 nzop1 = nonzero_bits (trueop1, mode);
2881 /* If we are clearing all the nonzero bits, the result is zero. */
2882 if ((nzop1 & nzop0) == 0
2883 && !side_effects_p (op0) && !side_effects_p (op1))
2884 return CONST0_RTX (mode);
2886 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2887 && GET_MODE_CLASS (mode) != MODE_CC)
2888 return op0;
2889 /* A & (~A) -> 0 */
2890 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2891 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2892 && ! side_effects_p (op0)
2893 && GET_MODE_CLASS (mode) != MODE_CC)
2894 return CONST0_RTX (mode);
2896 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2897 there are no nonzero bits of C outside of X's mode. */
2898 if ((GET_CODE (op0) == SIGN_EXTEND
2899 || GET_CODE (op0) == ZERO_EXTEND)
2900 && CONST_INT_P (trueop1)
2901 && HWI_COMPUTABLE_MODE_P (mode)
2902 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2903 & UINTVAL (trueop1)) == 0)
2905 machine_mode imode = GET_MODE (XEXP (op0, 0));
2906 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2907 gen_int_mode (INTVAL (trueop1),
2908 imode));
2909 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2912 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2913 we might be able to further simplify the AND with X and potentially
2914 remove the truncation altogether. */
2915 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2917 rtx x = XEXP (op0, 0);
2918 machine_mode xmode = GET_MODE (x);
2919 tem = simplify_gen_binary (AND, xmode, x,
2920 gen_int_mode (INTVAL (trueop1), xmode));
2921 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2924 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2925 if (GET_CODE (op0) == IOR
2926 && CONST_INT_P (trueop1)
2927 && CONST_INT_P (XEXP (op0, 1)))
2929 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2930 return simplify_gen_binary (IOR, mode,
2931 simplify_gen_binary (AND, mode,
2932 XEXP (op0, 0), op1),
2933 gen_int_mode (tmp, mode));
2936 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2937 insn (and may simplify more). */
2938 if (GET_CODE (op0) == XOR
2939 && rtx_equal_p (XEXP (op0, 0), op1)
2940 && ! side_effects_p (op1))
2941 return simplify_gen_binary (AND, mode,
2942 simplify_gen_unary (NOT, mode,
2943 XEXP (op0, 1), mode),
2944 op1);
2946 if (GET_CODE (op0) == XOR
2947 && rtx_equal_p (XEXP (op0, 1), op1)
2948 && ! side_effects_p (op1))
2949 return simplify_gen_binary (AND, mode,
2950 simplify_gen_unary (NOT, mode,
2951 XEXP (op0, 0), mode),
2952 op1);
2954 /* Similarly for (~(A ^ B)) & A. */
2955 if (GET_CODE (op0) == NOT
2956 && GET_CODE (XEXP (op0, 0)) == XOR
2957 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2958 && ! side_effects_p (op1))
2959 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2961 if (GET_CODE (op0) == NOT
2962 && GET_CODE (XEXP (op0, 0)) == XOR
2963 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2964 && ! side_effects_p (op1))
2965 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2967 /* Convert (A | B) & A to A. */
2968 if (GET_CODE (op0) == IOR
2969 && (rtx_equal_p (XEXP (op0, 0), op1)
2970 || rtx_equal_p (XEXP (op0, 1), op1))
2971 && ! side_effects_p (XEXP (op0, 0))
2972 && ! side_effects_p (XEXP (op0, 1)))
2973 return op1;
2975 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2976 ((A & N) + B) & M -> (A + B) & M
2977 Similarly if (N & M) == 0,
2978 ((A | N) + B) & M -> (A + B) & M
2979 and for - instead of + and/or ^ instead of |.
2980 Also, if (N & M) == 0, then
2981 (A +- N) & M -> A & M. */
2982 if (CONST_INT_P (trueop1)
2983 && HWI_COMPUTABLE_MODE_P (mode)
2984 && ~UINTVAL (trueop1)
2985 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
2986 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2988 rtx pmop[2];
2989 int which;
2991 pmop[0] = XEXP (op0, 0);
2992 pmop[1] = XEXP (op0, 1);
2994 if (CONST_INT_P (pmop[1])
2995 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
2996 return simplify_gen_binary (AND, mode, pmop[0], op1);
2998 for (which = 0; which < 2; which++)
3000 tem = pmop[which];
3001 switch (GET_CODE (tem))
3003 case AND:
3004 if (CONST_INT_P (XEXP (tem, 1))
3005 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3006 == UINTVAL (trueop1))
3007 pmop[which] = XEXP (tem, 0);
3008 break;
3009 case IOR:
3010 case XOR:
3011 if (CONST_INT_P (XEXP (tem, 1))
3012 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3013 pmop[which] = XEXP (tem, 0);
3014 break;
3015 default:
3016 break;
3020 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3022 tem = simplify_gen_binary (GET_CODE (op0), mode,
3023 pmop[0], pmop[1]);
3024 return simplify_gen_binary (code, mode, tem, op1);
3028 /* (and X (ior (not X) Y) -> (and X Y) */
3029 if (GET_CODE (op1) == IOR
3030 && GET_CODE (XEXP (op1, 0)) == NOT
3031 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
3032 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3034 /* (and (ior (not X) Y) X) -> (and X Y) */
3035 if (GET_CODE (op0) == IOR
3036 && GET_CODE (XEXP (op0, 0)) == NOT
3037 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
3038 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3040 /* (and X (ior Y (not X)) -> (and X Y) */
3041 if (GET_CODE (op1) == IOR
3042 && GET_CODE (XEXP (op1, 1)) == NOT
3043 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3044 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3046 /* (and (ior Y (not X)) X) -> (and X Y) */
3047 if (GET_CODE (op0) == IOR
3048 && GET_CODE (XEXP (op0, 1)) == NOT
3049 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3050 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3052 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3053 if (tem)
3054 return tem;
3056 tem = simplify_associative_operation (code, mode, op0, op1);
3057 if (tem)
3058 return tem;
3059 break;
3061 case UDIV:
3062 /* 0/x is 0 (or x&0 if x has side-effects). */
3063 if (trueop0 == CONST0_RTX (mode))
3065 if (side_effects_p (op1))
3066 return simplify_gen_binary (AND, mode, op1, trueop0);
3067 return trueop0;
3069 /* x/1 is x. */
3070 if (trueop1 == CONST1_RTX (mode))
3072 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3073 if (tem)
3074 return tem;
3076 /* Convert divide by power of two into shift. */
3077 if (CONST_INT_P (trueop1)
3078 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3079 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3080 break;
3082 case DIV:
3083 /* Handle floating point and integers separately. */
3084 if (SCALAR_FLOAT_MODE_P (mode))
3086 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3087 safe for modes with NaNs, since 0.0 / 0.0 will then be
3088 NaN rather than 0.0. Nor is it safe for modes with signed
3089 zeros, since dividing 0 by a negative number gives -0.0 */
3090 if (trueop0 == CONST0_RTX (mode)
3091 && !HONOR_NANS (mode)
3092 && !HONOR_SIGNED_ZEROS (mode)
3093 && ! side_effects_p (op1))
3094 return op0;
3095 /* x/1.0 is x. */
3096 if (trueop1 == CONST1_RTX (mode)
3097 && !HONOR_SNANS (mode))
3098 return op0;
3100 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3101 && trueop1 != CONST0_RTX (mode))
3103 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
3105 /* x/-1.0 is -x. */
3106 if (real_equal (d1, &dconstm1)
3107 && !HONOR_SNANS (mode))
3108 return simplify_gen_unary (NEG, mode, op0, mode);
3110 /* Change FP division by a constant into multiplication.
3111 Only do this with -freciprocal-math. */
3112 if (flag_reciprocal_math
3113 && !real_equal (d1, &dconst0))
3115 REAL_VALUE_TYPE d;
3116 real_arithmetic (&d, RDIV_EXPR, &dconst1, d1);
3117 tem = const_double_from_real_value (d, mode);
3118 return simplify_gen_binary (MULT, mode, op0, tem);
3122 else if (SCALAR_INT_MODE_P (mode))
3124 /* 0/x is 0 (or x&0 if x has side-effects). */
3125 if (trueop0 == CONST0_RTX (mode)
3126 && !cfun->can_throw_non_call_exceptions)
3128 if (side_effects_p (op1))
3129 return simplify_gen_binary (AND, mode, op1, trueop0);
3130 return trueop0;
3132 /* x/1 is x. */
3133 if (trueop1 == CONST1_RTX (mode))
3135 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3136 if (tem)
3137 return tem;
3139 /* x/-1 is -x. */
3140 if (trueop1 == constm1_rtx)
3142 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3143 if (x)
3144 return simplify_gen_unary (NEG, mode, x, mode);
3147 break;
3149 case UMOD:
3150 /* 0%x is 0 (or x&0 if x has side-effects). */
3151 if (trueop0 == CONST0_RTX (mode))
3153 if (side_effects_p (op1))
3154 return simplify_gen_binary (AND, mode, op1, trueop0);
3155 return trueop0;
3157 /* x%1 is 0 (of x&0 if x has side-effects). */
3158 if (trueop1 == CONST1_RTX (mode))
3160 if (side_effects_p (op0))
3161 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3162 return CONST0_RTX (mode);
3164 /* Implement modulus by power of two as AND. */
3165 if (CONST_INT_P (trueop1)
3166 && exact_log2 (UINTVAL (trueop1)) > 0)
3167 return simplify_gen_binary (AND, mode, op0,
3168 gen_int_mode (INTVAL (op1) - 1, mode));
3169 break;
3171 case MOD:
3172 /* 0%x is 0 (or x&0 if x has side-effects). */
3173 if (trueop0 == CONST0_RTX (mode))
3175 if (side_effects_p (op1))
3176 return simplify_gen_binary (AND, mode, op1, trueop0);
3177 return trueop0;
3179 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3180 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3182 if (side_effects_p (op0))
3183 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3184 return CONST0_RTX (mode);
3186 break;
3188 case ROTATERT:
3189 case ROTATE:
3190 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3191 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3192 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3193 amount instead. */
3194 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3195 if (CONST_INT_P (trueop1)
3196 && IN_RANGE (INTVAL (trueop1),
3197 GET_MODE_PRECISION (mode) / 2 + (code == ROTATE),
3198 GET_MODE_PRECISION (mode) - 1))
3199 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3200 mode, op0, GEN_INT (GET_MODE_PRECISION (mode)
3201 - INTVAL (trueop1)));
3202 #endif
3203 /* FALLTHRU */
3204 case ASHIFTRT:
3205 if (trueop1 == CONST0_RTX (mode))
3206 return op0;
3207 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3208 return op0;
3209 /* Rotating ~0 always results in ~0. */
3210 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3211 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3212 && ! side_effects_p (op1))
3213 return op0;
3214 /* Given:
3215 scalar modes M1, M2
3216 scalar constants c1, c2
3217 size (M2) > size (M1)
3218 c1 == size (M2) - size (M1)
3219 optimize:
3220 (ashiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3221 <low_part>)
3222 (const_int <c2>))
3224 (subreg:M1 (ashiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3225 <low_part>). */
3226 if (code == ASHIFTRT
3227 && !VECTOR_MODE_P (mode)
3228 && SUBREG_P (op0)
3229 && CONST_INT_P (op1)
3230 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3231 && !VECTOR_MODE_P (GET_MODE (SUBREG_REG (op0)))
3232 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3233 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3234 > GET_MODE_BITSIZE (mode))
3235 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3236 == (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3237 - GET_MODE_BITSIZE (mode)))
3238 && subreg_lowpart_p (op0))
3240 rtx tmp = GEN_INT (INTVAL (XEXP (SUBREG_REG (op0), 1))
3241 + INTVAL (op1));
3242 machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
3243 tmp = simplify_gen_binary (ASHIFTRT,
3244 GET_MODE (SUBREG_REG (op0)),
3245 XEXP (SUBREG_REG (op0), 0),
3246 tmp);
3247 return lowpart_subreg (mode, tmp, inner_mode);
3249 canonicalize_shift:
3250 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3252 val = INTVAL (op1) & (GET_MODE_PRECISION (mode) - 1);
3253 if (val != INTVAL (op1))
3254 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3256 break;
3258 case ASHIFT:
3259 case SS_ASHIFT:
3260 case US_ASHIFT:
3261 if (trueop1 == CONST0_RTX (mode))
3262 return op0;
3263 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3264 return op0;
3265 goto canonicalize_shift;
3267 case LSHIFTRT:
3268 if (trueop1 == CONST0_RTX (mode))
3269 return op0;
3270 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3271 return op0;
3272 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3273 if (GET_CODE (op0) == CLZ
3274 && CONST_INT_P (trueop1)
3275 && STORE_FLAG_VALUE == 1
3276 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3278 machine_mode imode = GET_MODE (XEXP (op0, 0));
3279 unsigned HOST_WIDE_INT zero_val = 0;
3281 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3282 && zero_val == GET_MODE_PRECISION (imode)
3283 && INTVAL (trueop1) == exact_log2 (zero_val))
3284 return simplify_gen_relational (EQ, mode, imode,
3285 XEXP (op0, 0), const0_rtx);
3287 goto canonicalize_shift;
3289 case SMIN:
3290 if (width <= HOST_BITS_PER_WIDE_INT
3291 && mode_signbit_p (mode, trueop1)
3292 && ! side_effects_p (op0))
3293 return op1;
3294 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3295 return op0;
3296 tem = simplify_associative_operation (code, mode, op0, op1);
3297 if (tem)
3298 return tem;
3299 break;
3301 case SMAX:
3302 if (width <= HOST_BITS_PER_WIDE_INT
3303 && CONST_INT_P (trueop1)
3304 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3305 && ! side_effects_p (op0))
3306 return op1;
3307 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3308 return op0;
3309 tem = simplify_associative_operation (code, mode, op0, op1);
3310 if (tem)
3311 return tem;
3312 break;
3314 case UMIN:
3315 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3316 return op1;
3317 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3318 return op0;
3319 tem = simplify_associative_operation (code, mode, op0, op1);
3320 if (tem)
3321 return tem;
3322 break;
3324 case UMAX:
3325 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3326 return op1;
3327 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3328 return op0;
3329 tem = simplify_associative_operation (code, mode, op0, op1);
3330 if (tem)
3331 return tem;
3332 break;
3334 case SS_PLUS:
3335 case US_PLUS:
3336 case SS_MINUS:
3337 case US_MINUS:
3338 case SS_MULT:
3339 case US_MULT:
3340 case SS_DIV:
3341 case US_DIV:
3342 /* ??? There are simplifications that can be done. */
3343 return 0;
3345 case VEC_SELECT:
3346 if (!VECTOR_MODE_P (mode))
3348 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3349 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3350 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3351 gcc_assert (XVECLEN (trueop1, 0) == 1);
3352 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3354 if (GET_CODE (trueop0) == CONST_VECTOR)
3355 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3356 (trueop1, 0, 0)));
3358 /* Extract a scalar element from a nested VEC_SELECT expression
3359 (with optional nested VEC_CONCAT expression). Some targets
3360 (i386) extract scalar element from a vector using chain of
3361 nested VEC_SELECT expressions. When input operand is a memory
3362 operand, this operation can be simplified to a simple scalar
3363 load from an offseted memory address. */
3364 if (GET_CODE (trueop0) == VEC_SELECT)
3366 rtx op0 = XEXP (trueop0, 0);
3367 rtx op1 = XEXP (trueop0, 1);
3369 machine_mode opmode = GET_MODE (op0);
3370 int elt_size = GET_MODE_UNIT_SIZE (opmode);
3371 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3373 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3374 int elem;
3376 rtvec vec;
3377 rtx tmp_op, tmp;
3379 gcc_assert (GET_CODE (op1) == PARALLEL);
3380 gcc_assert (i < n_elts);
3382 /* Select element, pointed by nested selector. */
3383 elem = INTVAL (XVECEXP (op1, 0, i));
3385 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3386 if (GET_CODE (op0) == VEC_CONCAT)
3388 rtx op00 = XEXP (op0, 0);
3389 rtx op01 = XEXP (op0, 1);
3391 machine_mode mode00, mode01;
3392 int n_elts00, n_elts01;
3394 mode00 = GET_MODE (op00);
3395 mode01 = GET_MODE (op01);
3397 /* Find out number of elements of each operand. */
3398 if (VECTOR_MODE_P (mode00))
3400 elt_size = GET_MODE_UNIT_SIZE (mode00);
3401 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3403 else
3404 n_elts00 = 1;
3406 if (VECTOR_MODE_P (mode01))
3408 elt_size = GET_MODE_UNIT_SIZE (mode01);
3409 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3411 else
3412 n_elts01 = 1;
3414 gcc_assert (n_elts == n_elts00 + n_elts01);
3416 /* Select correct operand of VEC_CONCAT
3417 and adjust selector. */
3418 if (elem < n_elts01)
3419 tmp_op = op00;
3420 else
3422 tmp_op = op01;
3423 elem -= n_elts00;
3426 else
3427 tmp_op = op0;
3429 vec = rtvec_alloc (1);
3430 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3432 tmp = gen_rtx_fmt_ee (code, mode,
3433 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3434 return tmp;
3436 if (GET_CODE (trueop0) == VEC_DUPLICATE
3437 && GET_MODE (XEXP (trueop0, 0)) == mode)
3438 return XEXP (trueop0, 0);
3440 else
3442 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3443 gcc_assert (GET_MODE_INNER (mode)
3444 == GET_MODE_INNER (GET_MODE (trueop0)));
3445 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3447 if (GET_CODE (trueop0) == CONST_VECTOR)
3449 int elt_size = GET_MODE_UNIT_SIZE (mode);
3450 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3451 rtvec v = rtvec_alloc (n_elts);
3452 unsigned int i;
3454 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3455 for (i = 0; i < n_elts; i++)
3457 rtx x = XVECEXP (trueop1, 0, i);
3459 gcc_assert (CONST_INT_P (x));
3460 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3461 INTVAL (x));
3464 return gen_rtx_CONST_VECTOR (mode, v);
3467 /* Recognize the identity. */
3468 if (GET_MODE (trueop0) == mode)
3470 bool maybe_ident = true;
3471 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3473 rtx j = XVECEXP (trueop1, 0, i);
3474 if (!CONST_INT_P (j) || INTVAL (j) != i)
3476 maybe_ident = false;
3477 break;
3480 if (maybe_ident)
3481 return trueop0;
3484 /* If we build {a,b} then permute it, build the result directly. */
3485 if (XVECLEN (trueop1, 0) == 2
3486 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3487 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3488 && GET_CODE (trueop0) == VEC_CONCAT
3489 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3490 && GET_MODE (XEXP (trueop0, 0)) == mode
3491 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3492 && GET_MODE (XEXP (trueop0, 1)) == mode)
3494 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3495 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3496 rtx subop0, subop1;
3498 gcc_assert (i0 < 4 && i1 < 4);
3499 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3500 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3502 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3505 if (XVECLEN (trueop1, 0) == 2
3506 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3507 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3508 && GET_CODE (trueop0) == VEC_CONCAT
3509 && GET_MODE (trueop0) == mode)
3511 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3512 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3513 rtx subop0, subop1;
3515 gcc_assert (i0 < 2 && i1 < 2);
3516 subop0 = XEXP (trueop0, i0);
3517 subop1 = XEXP (trueop0, i1);
3519 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3522 /* If we select one half of a vec_concat, return that. */
3523 if (GET_CODE (trueop0) == VEC_CONCAT
3524 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3526 rtx subop0 = XEXP (trueop0, 0);
3527 rtx subop1 = XEXP (trueop0, 1);
3528 machine_mode mode0 = GET_MODE (subop0);
3529 machine_mode mode1 = GET_MODE (subop1);
3530 int li = GET_MODE_UNIT_SIZE (mode0);
3531 int l0 = GET_MODE_SIZE (mode0) / li;
3532 int l1 = GET_MODE_SIZE (mode1) / li;
3533 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3534 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3536 bool success = true;
3537 for (int i = 1; i < l0; ++i)
3539 rtx j = XVECEXP (trueop1, 0, i);
3540 if (!CONST_INT_P (j) || INTVAL (j) != i)
3542 success = false;
3543 break;
3546 if (success)
3547 return subop0;
3549 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3551 bool success = true;
3552 for (int i = 1; i < l1; ++i)
3554 rtx j = XVECEXP (trueop1, 0, i);
3555 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3557 success = false;
3558 break;
3561 if (success)
3562 return subop1;
3567 if (XVECLEN (trueop1, 0) == 1
3568 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3569 && GET_CODE (trueop0) == VEC_CONCAT)
3571 rtx vec = trueop0;
3572 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3574 /* Try to find the element in the VEC_CONCAT. */
3575 while (GET_MODE (vec) != mode
3576 && GET_CODE (vec) == VEC_CONCAT)
3578 HOST_WIDE_INT vec_size;
3580 if (CONST_INT_P (XEXP (vec, 0)))
3582 /* vec_concat of two const_ints doesn't make sense with
3583 respect to modes. */
3584 if (CONST_INT_P (XEXP (vec, 1)))
3585 return 0;
3587 vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
3588 - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
3590 else
3591 vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3593 if (offset < vec_size)
3594 vec = XEXP (vec, 0);
3595 else
3597 offset -= vec_size;
3598 vec = XEXP (vec, 1);
3600 vec = avoid_constant_pool_reference (vec);
3603 if (GET_MODE (vec) == mode)
3604 return vec;
3607 /* If we select elements in a vec_merge that all come from the same
3608 operand, select from that operand directly. */
3609 if (GET_CODE (op0) == VEC_MERGE)
3611 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3612 if (CONST_INT_P (trueop02))
3614 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3615 bool all_operand0 = true;
3616 bool all_operand1 = true;
3617 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3619 rtx j = XVECEXP (trueop1, 0, i);
3620 if (sel & (1 << UINTVAL (j)))
3621 all_operand1 = false;
3622 else
3623 all_operand0 = false;
3625 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3626 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3627 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3628 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3632 /* If we have two nested selects that are inverses of each
3633 other, replace them with the source operand. */
3634 if (GET_CODE (trueop0) == VEC_SELECT
3635 && GET_MODE (XEXP (trueop0, 0)) == mode)
3637 rtx op0_subop1 = XEXP (trueop0, 1);
3638 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3639 gcc_assert (XVECLEN (trueop1, 0) == GET_MODE_NUNITS (mode));
3641 /* Apply the outer ordering vector to the inner one. (The inner
3642 ordering vector is expressly permitted to be of a different
3643 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3644 then the two VEC_SELECTs cancel. */
3645 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3647 rtx x = XVECEXP (trueop1, 0, i);
3648 if (!CONST_INT_P (x))
3649 return 0;
3650 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3651 if (!CONST_INT_P (y) || i != INTVAL (y))
3652 return 0;
3654 return XEXP (trueop0, 0);
3657 return 0;
3658 case VEC_CONCAT:
3660 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3661 ? GET_MODE (trueop0)
3662 : GET_MODE_INNER (mode));
3663 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3664 ? GET_MODE (trueop1)
3665 : GET_MODE_INNER (mode));
3667 gcc_assert (VECTOR_MODE_P (mode));
3668 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3669 == GET_MODE_SIZE (mode));
3671 if (VECTOR_MODE_P (op0_mode))
3672 gcc_assert (GET_MODE_INNER (mode)
3673 == GET_MODE_INNER (op0_mode));
3674 else
3675 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3677 if (VECTOR_MODE_P (op1_mode))
3678 gcc_assert (GET_MODE_INNER (mode)
3679 == GET_MODE_INNER (op1_mode));
3680 else
3681 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3683 if ((GET_CODE (trueop0) == CONST_VECTOR
3684 || CONST_SCALAR_INT_P (trueop0)
3685 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3686 && (GET_CODE (trueop1) == CONST_VECTOR
3687 || CONST_SCALAR_INT_P (trueop1)
3688 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3690 int elt_size = GET_MODE_UNIT_SIZE (mode);
3691 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3692 rtvec v = rtvec_alloc (n_elts);
3693 unsigned int i;
3694 unsigned in_n_elts = 1;
3696 if (VECTOR_MODE_P (op0_mode))
3697 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3698 for (i = 0; i < n_elts; i++)
3700 if (i < in_n_elts)
3702 if (!VECTOR_MODE_P (op0_mode))
3703 RTVEC_ELT (v, i) = trueop0;
3704 else
3705 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3707 else
3709 if (!VECTOR_MODE_P (op1_mode))
3710 RTVEC_ELT (v, i) = trueop1;
3711 else
3712 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3713 i - in_n_elts);
3717 return gen_rtx_CONST_VECTOR (mode, v);
3720 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3721 Restrict the transformation to avoid generating a VEC_SELECT with a
3722 mode unrelated to its operand. */
3723 if (GET_CODE (trueop0) == VEC_SELECT
3724 && GET_CODE (trueop1) == VEC_SELECT
3725 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3726 && GET_MODE (XEXP (trueop0, 0)) == mode)
3728 rtx par0 = XEXP (trueop0, 1);
3729 rtx par1 = XEXP (trueop1, 1);
3730 int len0 = XVECLEN (par0, 0);
3731 int len1 = XVECLEN (par1, 0);
3732 rtvec vec = rtvec_alloc (len0 + len1);
3733 for (int i = 0; i < len0; i++)
3734 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3735 for (int i = 0; i < len1; i++)
3736 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3737 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3738 gen_rtx_PARALLEL (VOIDmode, vec));
3741 return 0;
3743 default:
3744 gcc_unreachable ();
3747 return 0;
3751 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
3752 rtx op0, rtx op1)
3754 unsigned int width = GET_MODE_PRECISION (mode);
3756 if (VECTOR_MODE_P (mode)
3757 && code != VEC_CONCAT
3758 && GET_CODE (op0) == CONST_VECTOR
3759 && GET_CODE (op1) == CONST_VECTOR)
3761 unsigned n_elts = GET_MODE_NUNITS (mode);
3762 machine_mode op0mode = GET_MODE (op0);
3763 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3764 machine_mode op1mode = GET_MODE (op1);
3765 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3766 rtvec v = rtvec_alloc (n_elts);
3767 unsigned int i;
3769 gcc_assert (op0_n_elts == n_elts);
3770 gcc_assert (op1_n_elts == n_elts);
3771 for (i = 0; i < n_elts; i++)
3773 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3774 CONST_VECTOR_ELT (op0, i),
3775 CONST_VECTOR_ELT (op1, i));
3776 if (!x)
3777 return 0;
3778 RTVEC_ELT (v, i) = x;
3781 return gen_rtx_CONST_VECTOR (mode, v);
3784 if (VECTOR_MODE_P (mode)
3785 && code == VEC_CONCAT
3786 && (CONST_SCALAR_INT_P (op0)
3787 || GET_CODE (op0) == CONST_FIXED
3788 || CONST_DOUBLE_AS_FLOAT_P (op0))
3789 && (CONST_SCALAR_INT_P (op1)
3790 || CONST_DOUBLE_AS_FLOAT_P (op1)
3791 || GET_CODE (op1) == CONST_FIXED))
3793 unsigned n_elts = GET_MODE_NUNITS (mode);
3794 rtvec v = rtvec_alloc (n_elts);
3796 gcc_assert (n_elts >= 2);
3797 if (n_elts == 2)
3799 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3800 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3802 RTVEC_ELT (v, 0) = op0;
3803 RTVEC_ELT (v, 1) = op1;
3805 else
3807 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3808 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3809 unsigned i;
3811 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3812 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3813 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3815 for (i = 0; i < op0_n_elts; ++i)
3816 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3817 for (i = 0; i < op1_n_elts; ++i)
3818 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3821 return gen_rtx_CONST_VECTOR (mode, v);
3824 if (SCALAR_FLOAT_MODE_P (mode)
3825 && CONST_DOUBLE_AS_FLOAT_P (op0)
3826 && CONST_DOUBLE_AS_FLOAT_P (op1)
3827 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3829 if (code == AND
3830 || code == IOR
3831 || code == XOR)
3833 long tmp0[4];
3834 long tmp1[4];
3835 REAL_VALUE_TYPE r;
3836 int i;
3838 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3839 GET_MODE (op0));
3840 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3841 GET_MODE (op1));
3842 for (i = 0; i < 4; i++)
3844 switch (code)
3846 case AND:
3847 tmp0[i] &= tmp1[i];
3848 break;
3849 case IOR:
3850 tmp0[i] |= tmp1[i];
3851 break;
3852 case XOR:
3853 tmp0[i] ^= tmp1[i];
3854 break;
3855 default:
3856 gcc_unreachable ();
3859 real_from_target (&r, tmp0, mode);
3860 return const_double_from_real_value (r, mode);
3862 else
3864 REAL_VALUE_TYPE f0, f1, value, result;
3865 bool inexact;
3867 real_convert (&f0, mode, CONST_DOUBLE_REAL_VALUE (op0));
3868 real_convert (&f1, mode, CONST_DOUBLE_REAL_VALUE (op1));
3870 if (HONOR_SNANS (mode)
3871 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3872 return 0;
3874 if (code == DIV
3875 && real_equal (&f1, &dconst0)
3876 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3877 return 0;
3879 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3880 && flag_trapping_math
3881 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3883 int s0 = REAL_VALUE_NEGATIVE (f0);
3884 int s1 = REAL_VALUE_NEGATIVE (f1);
3886 switch (code)
3888 case PLUS:
3889 /* Inf + -Inf = NaN plus exception. */
3890 if (s0 != s1)
3891 return 0;
3892 break;
3893 case MINUS:
3894 /* Inf - Inf = NaN plus exception. */
3895 if (s0 == s1)
3896 return 0;
3897 break;
3898 case DIV:
3899 /* Inf / Inf = NaN plus exception. */
3900 return 0;
3901 default:
3902 break;
3906 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3907 && flag_trapping_math
3908 && ((REAL_VALUE_ISINF (f0) && real_equal (&f1, &dconst0))
3909 || (REAL_VALUE_ISINF (f1)
3910 && real_equal (&f0, &dconst0))))
3911 /* Inf * 0 = NaN plus exception. */
3912 return 0;
3914 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3915 &f0, &f1);
3916 real_convert (&result, mode, &value);
3918 /* Don't constant fold this floating point operation if
3919 the result has overflowed and flag_trapping_math. */
3921 if (flag_trapping_math
3922 && MODE_HAS_INFINITIES (mode)
3923 && REAL_VALUE_ISINF (result)
3924 && !REAL_VALUE_ISINF (f0)
3925 && !REAL_VALUE_ISINF (f1))
3926 /* Overflow plus exception. */
3927 return 0;
3929 /* Don't constant fold this floating point operation if the
3930 result may dependent upon the run-time rounding mode and
3931 flag_rounding_math is set, or if GCC's software emulation
3932 is unable to accurately represent the result. */
3934 if ((flag_rounding_math
3935 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3936 && (inexact || !real_identical (&result, &value)))
3937 return NULL_RTX;
3939 return const_double_from_real_value (result, mode);
3943 /* We can fold some multi-word operations. */
3944 if ((GET_MODE_CLASS (mode) == MODE_INT
3945 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
3946 && CONST_SCALAR_INT_P (op0)
3947 && CONST_SCALAR_INT_P (op1))
3949 wide_int result;
3950 bool overflow;
3951 rtx_mode_t pop0 = std::make_pair (op0, mode);
3952 rtx_mode_t pop1 = std::make_pair (op1, mode);
3954 #if TARGET_SUPPORTS_WIDE_INT == 0
3955 /* This assert keeps the simplification from producing a result
3956 that cannot be represented in a CONST_DOUBLE but a lot of
3957 upstream callers expect that this function never fails to
3958 simplify something and so you if you added this to the test
3959 above the code would die later anyway. If this assert
3960 happens, you just need to make the port support wide int. */
3961 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
3962 #endif
3963 switch (code)
3965 case MINUS:
3966 result = wi::sub (pop0, pop1);
3967 break;
3969 case PLUS:
3970 result = wi::add (pop0, pop1);
3971 break;
3973 case MULT:
3974 result = wi::mul (pop0, pop1);
3975 break;
3977 case DIV:
3978 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
3979 if (overflow)
3980 return NULL_RTX;
3981 break;
3983 case MOD:
3984 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
3985 if (overflow)
3986 return NULL_RTX;
3987 break;
3989 case UDIV:
3990 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
3991 if (overflow)
3992 return NULL_RTX;
3993 break;
3995 case UMOD:
3996 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
3997 if (overflow)
3998 return NULL_RTX;
3999 break;
4001 case AND:
4002 result = wi::bit_and (pop0, pop1);
4003 break;
4005 case IOR:
4006 result = wi::bit_or (pop0, pop1);
4007 break;
4009 case XOR:
4010 result = wi::bit_xor (pop0, pop1);
4011 break;
4013 case SMIN:
4014 result = wi::smin (pop0, pop1);
4015 break;
4017 case SMAX:
4018 result = wi::smax (pop0, pop1);
4019 break;
4021 case UMIN:
4022 result = wi::umin (pop0, pop1);
4023 break;
4025 case UMAX:
4026 result = wi::umax (pop0, pop1);
4027 break;
4029 case LSHIFTRT:
4030 case ASHIFTRT:
4031 case ASHIFT:
4033 wide_int wop1 = pop1;
4034 if (SHIFT_COUNT_TRUNCATED)
4035 wop1 = wi::umod_trunc (wop1, width);
4036 else if (wi::geu_p (wop1, width))
4037 return NULL_RTX;
4039 switch (code)
4041 case LSHIFTRT:
4042 result = wi::lrshift (pop0, wop1);
4043 break;
4045 case ASHIFTRT:
4046 result = wi::arshift (pop0, wop1);
4047 break;
4049 case ASHIFT:
4050 result = wi::lshift (pop0, wop1);
4051 break;
4053 default:
4054 gcc_unreachable ();
4056 break;
4058 case ROTATE:
4059 case ROTATERT:
4061 if (wi::neg_p (pop1))
4062 return NULL_RTX;
4064 switch (code)
4066 case ROTATE:
4067 result = wi::lrotate (pop0, pop1);
4068 break;
4070 case ROTATERT:
4071 result = wi::rrotate (pop0, pop1);
4072 break;
4074 default:
4075 gcc_unreachable ();
4077 break;
4079 default:
4080 return NULL_RTX;
4082 return immed_wide_int_const (result, mode);
4085 return NULL_RTX;
4090 /* Return a positive integer if X should sort after Y. The value
4091 returned is 1 if and only if X and Y are both regs. */
4093 static int
4094 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4096 int result;
4098 result = (commutative_operand_precedence (y)
4099 - commutative_operand_precedence (x));
4100 if (result)
4101 return result + result;
4103 /* Group together equal REGs to do more simplification. */
4104 if (REG_P (x) && REG_P (y))
4105 return REGNO (x) > REGNO (y);
4107 return 0;
4110 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4111 operands may be another PLUS or MINUS.
4113 Rather than test for specific case, we do this by a brute-force method
4114 and do all possible simplifications until no more changes occur. Then
4115 we rebuild the operation.
4117 May return NULL_RTX when no changes were made. */
4119 static rtx
4120 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4121 rtx op1)
4123 struct simplify_plus_minus_op_data
4125 rtx op;
4126 short neg;
4127 } ops[16];
4128 rtx result, tem;
4129 int n_ops = 2;
4130 int changed, n_constants, canonicalized = 0;
4131 int i, j;
4133 memset (ops, 0, sizeof ops);
4135 /* Set up the two operands and then expand them until nothing has been
4136 changed. If we run out of room in our array, give up; this should
4137 almost never happen. */
4139 ops[0].op = op0;
4140 ops[0].neg = 0;
4141 ops[1].op = op1;
4142 ops[1].neg = (code == MINUS);
4146 changed = 0;
4147 n_constants = 0;
4149 for (i = 0; i < n_ops; i++)
4151 rtx this_op = ops[i].op;
4152 int this_neg = ops[i].neg;
4153 enum rtx_code this_code = GET_CODE (this_op);
4155 switch (this_code)
4157 case PLUS:
4158 case MINUS:
4159 if (n_ops == ARRAY_SIZE (ops))
4160 return NULL_RTX;
4162 ops[n_ops].op = XEXP (this_op, 1);
4163 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4164 n_ops++;
4166 ops[i].op = XEXP (this_op, 0);
4167 changed = 1;
4168 /* If this operand was negated then we will potentially
4169 canonicalize the expression. Similarly if we don't
4170 place the operands adjacent we're re-ordering the
4171 expression and thus might be performing a
4172 canonicalization. Ignore register re-ordering.
4173 ??? It might be better to shuffle the ops array here,
4174 but then (plus (plus (A, B), plus (C, D))) wouldn't
4175 be seen as non-canonical. */
4176 if (this_neg
4177 || (i != n_ops - 2
4178 && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
4179 canonicalized = 1;
4180 break;
4182 case NEG:
4183 ops[i].op = XEXP (this_op, 0);
4184 ops[i].neg = ! this_neg;
4185 changed = 1;
4186 canonicalized = 1;
4187 break;
4189 case CONST:
4190 if (n_ops != ARRAY_SIZE (ops)
4191 && GET_CODE (XEXP (this_op, 0)) == PLUS
4192 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4193 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4195 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4196 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4197 ops[n_ops].neg = this_neg;
4198 n_ops++;
4199 changed = 1;
4200 canonicalized = 1;
4202 break;
4204 case NOT:
4205 /* ~a -> (-a - 1) */
4206 if (n_ops != ARRAY_SIZE (ops))
4208 ops[n_ops].op = CONSTM1_RTX (mode);
4209 ops[n_ops++].neg = this_neg;
4210 ops[i].op = XEXP (this_op, 0);
4211 ops[i].neg = !this_neg;
4212 changed = 1;
4213 canonicalized = 1;
4215 break;
4217 case CONST_INT:
4218 n_constants++;
4219 if (this_neg)
4221 ops[i].op = neg_const_int (mode, this_op);
4222 ops[i].neg = 0;
4223 changed = 1;
4224 canonicalized = 1;
4226 break;
4228 default:
4229 break;
4233 while (changed);
4235 if (n_constants > 1)
4236 canonicalized = 1;
4238 gcc_assert (n_ops >= 2);
4240 /* If we only have two operands, we can avoid the loops. */
4241 if (n_ops == 2)
4243 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4244 rtx lhs, rhs;
4246 /* Get the two operands. Be careful with the order, especially for
4247 the cases where code == MINUS. */
4248 if (ops[0].neg && ops[1].neg)
4250 lhs = gen_rtx_NEG (mode, ops[0].op);
4251 rhs = ops[1].op;
4253 else if (ops[0].neg)
4255 lhs = ops[1].op;
4256 rhs = ops[0].op;
4258 else
4260 lhs = ops[0].op;
4261 rhs = ops[1].op;
4264 return simplify_const_binary_operation (code, mode, lhs, rhs);
4267 /* Now simplify each pair of operands until nothing changes. */
4268 while (1)
4270 /* Insertion sort is good enough for a small array. */
4271 for (i = 1; i < n_ops; i++)
4273 struct simplify_plus_minus_op_data save;
4274 int cmp;
4276 j = i - 1;
4277 cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
4278 if (cmp <= 0)
4279 continue;
4280 /* Just swapping registers doesn't count as canonicalization. */
4281 if (cmp != 1)
4282 canonicalized = 1;
4284 save = ops[i];
4286 ops[j + 1] = ops[j];
4287 while (j--
4288 && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
4289 ops[j + 1] = save;
4292 changed = 0;
4293 for (i = n_ops - 1; i > 0; i--)
4294 for (j = i - 1; j >= 0; j--)
4296 rtx lhs = ops[j].op, rhs = ops[i].op;
4297 int lneg = ops[j].neg, rneg = ops[i].neg;
4299 if (lhs != 0 && rhs != 0)
4301 enum rtx_code ncode = PLUS;
4303 if (lneg != rneg)
4305 ncode = MINUS;
4306 if (lneg)
4307 std::swap (lhs, rhs);
4309 else if (swap_commutative_operands_p (lhs, rhs))
4310 std::swap (lhs, rhs);
4312 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4313 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4315 rtx tem_lhs, tem_rhs;
4317 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4318 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4319 tem = simplify_binary_operation (ncode, mode, tem_lhs,
4320 tem_rhs);
4322 if (tem && !CONSTANT_P (tem))
4323 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4325 else
4326 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4328 if (tem)
4330 /* Reject "simplifications" that just wrap the two
4331 arguments in a CONST. Failure to do so can result
4332 in infinite recursion with simplify_binary_operation
4333 when it calls us to simplify CONST operations.
4334 Also, if we find such a simplification, don't try
4335 any more combinations with this rhs: We must have
4336 something like symbol+offset, ie. one of the
4337 trivial CONST expressions we handle later. */
4338 if (GET_CODE (tem) == CONST
4339 && GET_CODE (XEXP (tem, 0)) == ncode
4340 && XEXP (XEXP (tem, 0), 0) == lhs
4341 && XEXP (XEXP (tem, 0), 1) == rhs)
4342 break;
4343 lneg &= rneg;
4344 if (GET_CODE (tem) == NEG)
4345 tem = XEXP (tem, 0), lneg = !lneg;
4346 if (CONST_INT_P (tem) && lneg)
4347 tem = neg_const_int (mode, tem), lneg = 0;
4349 ops[i].op = tem;
4350 ops[i].neg = lneg;
4351 ops[j].op = NULL_RTX;
4352 changed = 1;
4353 canonicalized = 1;
4358 if (!changed)
4359 break;
4361 /* Pack all the operands to the lower-numbered entries. */
4362 for (i = 0, j = 0; j < n_ops; j++)
4363 if (ops[j].op)
4365 ops[i] = ops[j];
4366 i++;
4368 n_ops = i;
4371 /* If nothing changed, fail. */
4372 if (!canonicalized)
4373 return NULL_RTX;
4375 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4376 if (n_ops == 2
4377 && CONST_INT_P (ops[1].op)
4378 && CONSTANT_P (ops[0].op)
4379 && ops[0].neg)
4380 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4382 /* We suppressed creation of trivial CONST expressions in the
4383 combination loop to avoid recursion. Create one manually now.
4384 The combination loop should have ensured that there is exactly
4385 one CONST_INT, and the sort will have ensured that it is last
4386 in the array and that any other constant will be next-to-last. */
4388 if (n_ops > 1
4389 && CONST_INT_P (ops[n_ops - 1].op)
4390 && CONSTANT_P (ops[n_ops - 2].op))
4392 rtx value = ops[n_ops - 1].op;
4393 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4394 value = neg_const_int (mode, value);
4395 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4396 INTVAL (value));
4397 n_ops--;
4400 /* Put a non-negated operand first, if possible. */
4402 for (i = 0; i < n_ops && ops[i].neg; i++)
4403 continue;
4404 if (i == n_ops)
4405 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4406 else if (i != 0)
4408 tem = ops[0].op;
4409 ops[0] = ops[i];
4410 ops[i].op = tem;
4411 ops[i].neg = 1;
4414 /* Now make the result by performing the requested operations. */
4415 result = ops[0].op;
4416 for (i = 1; i < n_ops; i++)
4417 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4418 mode, result, ops[i].op);
4420 return result;
4423 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4424 static bool
4425 plus_minus_operand_p (const_rtx x)
4427 return GET_CODE (x) == PLUS
4428 || GET_CODE (x) == MINUS
4429 || (GET_CODE (x) == CONST
4430 && GET_CODE (XEXP (x, 0)) == PLUS
4431 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4432 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4435 /* Like simplify_binary_operation except used for relational operators.
4436 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4437 not also be VOIDmode.
4439 CMP_MODE specifies in which mode the comparison is done in, so it is
4440 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4441 the operands or, if both are VOIDmode, the operands are compared in
4442 "infinite precision". */
4444 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4445 machine_mode cmp_mode, rtx op0, rtx op1)
4447 rtx tem, trueop0, trueop1;
4449 if (cmp_mode == VOIDmode)
4450 cmp_mode = GET_MODE (op0);
4451 if (cmp_mode == VOIDmode)
4452 cmp_mode = GET_MODE (op1);
4454 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4455 if (tem)
4457 if (SCALAR_FLOAT_MODE_P (mode))
4459 if (tem == const0_rtx)
4460 return CONST0_RTX (mode);
4461 #ifdef FLOAT_STORE_FLAG_VALUE
4463 REAL_VALUE_TYPE val;
4464 val = FLOAT_STORE_FLAG_VALUE (mode);
4465 return const_double_from_real_value (val, mode);
4467 #else
4468 return NULL_RTX;
4469 #endif
4471 if (VECTOR_MODE_P (mode))
4473 if (tem == const0_rtx)
4474 return CONST0_RTX (mode);
4475 #ifdef VECTOR_STORE_FLAG_VALUE
4477 int i, units;
4478 rtvec v;
4480 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4481 if (val == NULL_RTX)
4482 return NULL_RTX;
4483 if (val == const1_rtx)
4484 return CONST1_RTX (mode);
4486 units = GET_MODE_NUNITS (mode);
4487 v = rtvec_alloc (units);
4488 for (i = 0; i < units; i++)
4489 RTVEC_ELT (v, i) = val;
4490 return gen_rtx_raw_CONST_VECTOR (mode, v);
4492 #else
4493 return NULL_RTX;
4494 #endif
4497 return tem;
4500 /* For the following tests, ensure const0_rtx is op1. */
4501 if (swap_commutative_operands_p (op0, op1)
4502 || (op0 == const0_rtx && op1 != const0_rtx))
4503 std::swap (op0, op1), code = swap_condition (code);
4505 /* If op0 is a compare, extract the comparison arguments from it. */
4506 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4507 return simplify_gen_relational (code, mode, VOIDmode,
4508 XEXP (op0, 0), XEXP (op0, 1));
4510 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4511 || CC0_P (op0))
4512 return NULL_RTX;
4514 trueop0 = avoid_constant_pool_reference (op0);
4515 trueop1 = avoid_constant_pool_reference (op1);
4516 return simplify_relational_operation_1 (code, mode, cmp_mode,
4517 trueop0, trueop1);
4520 /* This part of simplify_relational_operation is only used when CMP_MODE
4521 is not in class MODE_CC (i.e. it is a real comparison).
4523 MODE is the mode of the result, while CMP_MODE specifies in which
4524 mode the comparison is done in, so it is the mode of the operands. */
4526 static rtx
4527 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4528 machine_mode cmp_mode, rtx op0, rtx op1)
4530 enum rtx_code op0code = GET_CODE (op0);
4532 if (op1 == const0_rtx && COMPARISON_P (op0))
4534 /* If op0 is a comparison, extract the comparison arguments
4535 from it. */
4536 if (code == NE)
4538 if (GET_MODE (op0) == mode)
4539 return simplify_rtx (op0);
4540 else
4541 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4542 XEXP (op0, 0), XEXP (op0, 1));
4544 else if (code == EQ)
4546 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4547 if (new_code != UNKNOWN)
4548 return simplify_gen_relational (new_code, mode, VOIDmode,
4549 XEXP (op0, 0), XEXP (op0, 1));
4553 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4554 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4555 if ((code == LTU || code == GEU)
4556 && GET_CODE (op0) == PLUS
4557 && CONST_INT_P (XEXP (op0, 1))
4558 && (rtx_equal_p (op1, XEXP (op0, 0))
4559 || rtx_equal_p (op1, XEXP (op0, 1)))
4560 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4561 && XEXP (op0, 1) != const0_rtx)
4563 rtx new_cmp
4564 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4565 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4566 cmp_mode, XEXP (op0, 0), new_cmp);
4569 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4570 if ((code == LTU || code == GEU)
4571 && GET_CODE (op0) == PLUS
4572 && rtx_equal_p (op1, XEXP (op0, 1))
4573 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4574 && !rtx_equal_p (op1, XEXP (op0, 0)))
4575 return simplify_gen_relational (code, mode, cmp_mode, op0,
4576 copy_rtx (XEXP (op0, 0)));
4578 if (op1 == const0_rtx)
4580 /* Canonicalize (GTU x 0) as (NE x 0). */
4581 if (code == GTU)
4582 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4583 /* Canonicalize (LEU x 0) as (EQ x 0). */
4584 if (code == LEU)
4585 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4587 else if (op1 == const1_rtx)
4589 switch (code)
4591 case GE:
4592 /* Canonicalize (GE x 1) as (GT x 0). */
4593 return simplify_gen_relational (GT, mode, cmp_mode,
4594 op0, const0_rtx);
4595 case GEU:
4596 /* Canonicalize (GEU x 1) as (NE x 0). */
4597 return simplify_gen_relational (NE, mode, cmp_mode,
4598 op0, const0_rtx);
4599 case LT:
4600 /* Canonicalize (LT x 1) as (LE x 0). */
4601 return simplify_gen_relational (LE, mode, cmp_mode,
4602 op0, const0_rtx);
4603 case LTU:
4604 /* Canonicalize (LTU x 1) as (EQ x 0). */
4605 return simplify_gen_relational (EQ, mode, cmp_mode,
4606 op0, const0_rtx);
4607 default:
4608 break;
4611 else if (op1 == constm1_rtx)
4613 /* Canonicalize (LE x -1) as (LT x 0). */
4614 if (code == LE)
4615 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4616 /* Canonicalize (GT x -1) as (GE x 0). */
4617 if (code == GT)
4618 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4621 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4622 if ((code == EQ || code == NE)
4623 && (op0code == PLUS || op0code == MINUS)
4624 && CONSTANT_P (op1)
4625 && CONSTANT_P (XEXP (op0, 1))
4626 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4628 rtx x = XEXP (op0, 0);
4629 rtx c = XEXP (op0, 1);
4630 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4631 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4633 /* Detect an infinite recursive condition, where we oscillate at this
4634 simplification case between:
4635 A + B == C <---> C - B == A,
4636 where A, B, and C are all constants with non-simplifiable expressions,
4637 usually SYMBOL_REFs. */
4638 if (GET_CODE (tem) == invcode
4639 && CONSTANT_P (x)
4640 && rtx_equal_p (c, XEXP (tem, 1)))
4641 return NULL_RTX;
4643 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4646 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4647 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4648 if (code == NE
4649 && op1 == const0_rtx
4650 && GET_MODE_CLASS (mode) == MODE_INT
4651 && cmp_mode != VOIDmode
4652 /* ??? Work-around BImode bugs in the ia64 backend. */
4653 && mode != BImode
4654 && cmp_mode != BImode
4655 && nonzero_bits (op0, cmp_mode) == 1
4656 && STORE_FLAG_VALUE == 1)
4657 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4658 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4659 : lowpart_subreg (mode, op0, cmp_mode);
4661 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4662 if ((code == EQ || code == NE)
4663 && op1 == const0_rtx
4664 && op0code == XOR)
4665 return simplify_gen_relational (code, mode, cmp_mode,
4666 XEXP (op0, 0), XEXP (op0, 1));
4668 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4669 if ((code == EQ || code == NE)
4670 && op0code == XOR
4671 && rtx_equal_p (XEXP (op0, 0), op1)
4672 && !side_effects_p (XEXP (op0, 0)))
4673 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
4674 CONST0_RTX (mode));
4676 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4677 if ((code == EQ || code == NE)
4678 && op0code == XOR
4679 && rtx_equal_p (XEXP (op0, 1), op1)
4680 && !side_effects_p (XEXP (op0, 1)))
4681 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4682 CONST0_RTX (mode));
4684 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4685 if ((code == EQ || code == NE)
4686 && op0code == XOR
4687 && CONST_SCALAR_INT_P (op1)
4688 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4689 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4690 simplify_gen_binary (XOR, cmp_mode,
4691 XEXP (op0, 1), op1));
4693 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4694 can be implemented with a BICS instruction on some targets, or
4695 constant-folded if y is a constant. */
4696 if ((code == EQ || code == NE)
4697 && op0code == AND
4698 && rtx_equal_p (XEXP (op0, 0), op1)
4699 && !side_effects_p (op1)
4700 && op1 != CONST0_RTX (cmp_mode))
4702 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1), cmp_mode);
4703 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
4705 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4706 CONST0_RTX (cmp_mode));
4709 /* Likewise for (eq/ne (and x y) y). */
4710 if ((code == EQ || code == NE)
4711 && op0code == AND
4712 && rtx_equal_p (XEXP (op0, 1), op1)
4713 && !side_effects_p (op1)
4714 && op1 != CONST0_RTX (cmp_mode))
4716 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0), cmp_mode);
4717 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
4719 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4720 CONST0_RTX (cmp_mode));
4723 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4724 if ((code == EQ || code == NE)
4725 && GET_CODE (op0) == BSWAP
4726 && CONST_SCALAR_INT_P (op1))
4727 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4728 simplify_gen_unary (BSWAP, cmp_mode,
4729 op1, cmp_mode));
4731 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4732 if ((code == EQ || code == NE)
4733 && GET_CODE (op0) == BSWAP
4734 && GET_CODE (op1) == BSWAP)
4735 return simplify_gen_relational (code, mode, cmp_mode,
4736 XEXP (op0, 0), XEXP (op1, 0));
4738 if (op0code == POPCOUNT && op1 == const0_rtx)
4739 switch (code)
4741 case EQ:
4742 case LE:
4743 case LEU:
4744 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4745 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4746 XEXP (op0, 0), const0_rtx);
4748 case NE:
4749 case GT:
4750 case GTU:
4751 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4752 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4753 XEXP (op0, 0), const0_rtx);
4755 default:
4756 break;
4759 return NULL_RTX;
4762 enum
4764 CMP_EQ = 1,
4765 CMP_LT = 2,
4766 CMP_GT = 4,
4767 CMP_LTU = 8,
4768 CMP_GTU = 16
4772 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4773 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4774 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4775 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4776 For floating-point comparisons, assume that the operands were ordered. */
4778 static rtx
4779 comparison_result (enum rtx_code code, int known_results)
4781 switch (code)
4783 case EQ:
4784 case UNEQ:
4785 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4786 case NE:
4787 case LTGT:
4788 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4790 case LT:
4791 case UNLT:
4792 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4793 case GE:
4794 case UNGE:
4795 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4797 case GT:
4798 case UNGT:
4799 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4800 case LE:
4801 case UNLE:
4802 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4804 case LTU:
4805 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4806 case GEU:
4807 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4809 case GTU:
4810 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4811 case LEU:
4812 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4814 case ORDERED:
4815 return const_true_rtx;
4816 case UNORDERED:
4817 return const0_rtx;
4818 default:
4819 gcc_unreachable ();
4823 /* Check if the given comparison (done in the given MODE) is actually
4824 a tautology or a contradiction. If the mode is VOID_mode, the
4825 comparison is done in "infinite precision". If no simplification
4826 is possible, this function returns zero. Otherwise, it returns
4827 either const_true_rtx or const0_rtx. */
4830 simplify_const_relational_operation (enum rtx_code code,
4831 machine_mode mode,
4832 rtx op0, rtx op1)
4834 rtx tem;
4835 rtx trueop0;
4836 rtx trueop1;
4838 gcc_assert (mode != VOIDmode
4839 || (GET_MODE (op0) == VOIDmode
4840 && GET_MODE (op1) == VOIDmode));
4842 /* If op0 is a compare, extract the comparison arguments from it. */
4843 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4845 op1 = XEXP (op0, 1);
4846 op0 = XEXP (op0, 0);
4848 if (GET_MODE (op0) != VOIDmode)
4849 mode = GET_MODE (op0);
4850 else if (GET_MODE (op1) != VOIDmode)
4851 mode = GET_MODE (op1);
4852 else
4853 return 0;
4856 /* We can't simplify MODE_CC values since we don't know what the
4857 actual comparison is. */
4858 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4859 return 0;
4861 /* Make sure the constant is second. */
4862 if (swap_commutative_operands_p (op0, op1))
4864 std::swap (op0, op1);
4865 code = swap_condition (code);
4868 trueop0 = avoid_constant_pool_reference (op0);
4869 trueop1 = avoid_constant_pool_reference (op1);
4871 /* For integer comparisons of A and B maybe we can simplify A - B and can
4872 then simplify a comparison of that with zero. If A and B are both either
4873 a register or a CONST_INT, this can't help; testing for these cases will
4874 prevent infinite recursion here and speed things up.
4876 We can only do this for EQ and NE comparisons as otherwise we may
4877 lose or introduce overflow which we cannot disregard as undefined as
4878 we do not know the signedness of the operation on either the left or
4879 the right hand side of the comparison. */
4881 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4882 && (code == EQ || code == NE)
4883 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4884 && (REG_P (op1) || CONST_INT_P (trueop1)))
4885 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4886 /* We cannot do this if tem is a nonzero address. */
4887 && ! nonzero_address_p (tem))
4888 return simplify_const_relational_operation (signed_condition (code),
4889 mode, tem, const0_rtx);
4891 if (! HONOR_NANS (mode) && code == ORDERED)
4892 return const_true_rtx;
4894 if (! HONOR_NANS (mode) && code == UNORDERED)
4895 return const0_rtx;
4897 /* For modes without NaNs, if the two operands are equal, we know the
4898 result except if they have side-effects. Even with NaNs we know
4899 the result of unordered comparisons and, if signaling NaNs are
4900 irrelevant, also the result of LT/GT/LTGT. */
4901 if ((! HONOR_NANS (trueop0)
4902 || code == UNEQ || code == UNLE || code == UNGE
4903 || ((code == LT || code == GT || code == LTGT)
4904 && ! HONOR_SNANS (trueop0)))
4905 && rtx_equal_p (trueop0, trueop1)
4906 && ! side_effects_p (trueop0))
4907 return comparison_result (code, CMP_EQ);
4909 /* If the operands are floating-point constants, see if we can fold
4910 the result. */
4911 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
4912 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
4913 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4915 const REAL_VALUE_TYPE *d0 = CONST_DOUBLE_REAL_VALUE (trueop0);
4916 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
4918 /* Comparisons are unordered iff at least one of the values is NaN. */
4919 if (REAL_VALUE_ISNAN (*d0) || REAL_VALUE_ISNAN (*d1))
4920 switch (code)
4922 case UNEQ:
4923 case UNLT:
4924 case UNGT:
4925 case UNLE:
4926 case UNGE:
4927 case NE:
4928 case UNORDERED:
4929 return const_true_rtx;
4930 case EQ:
4931 case LT:
4932 case GT:
4933 case LE:
4934 case GE:
4935 case LTGT:
4936 case ORDERED:
4937 return const0_rtx;
4938 default:
4939 return 0;
4942 return comparison_result (code,
4943 (real_equal (d0, d1) ? CMP_EQ :
4944 real_less (d0, d1) ? CMP_LT : CMP_GT));
4947 /* Otherwise, see if the operands are both integers. */
4948 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4949 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
4951 /* It would be nice if we really had a mode here. However, the
4952 largest int representable on the target is as good as
4953 infinite. */
4954 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
4955 rtx_mode_t ptrueop0 = std::make_pair (trueop0, cmode);
4956 rtx_mode_t ptrueop1 = std::make_pair (trueop1, cmode);
4958 if (wi::eq_p (ptrueop0, ptrueop1))
4959 return comparison_result (code, CMP_EQ);
4960 else
4962 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
4963 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
4964 return comparison_result (code, cr);
4968 /* Optimize comparisons with upper and lower bounds. */
4969 if (HWI_COMPUTABLE_MODE_P (mode)
4970 && CONST_INT_P (trueop1)
4971 && !side_effects_p (trueop0))
4973 int sign;
4974 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4975 HOST_WIDE_INT val = INTVAL (trueop1);
4976 HOST_WIDE_INT mmin, mmax;
4978 if (code == GEU
4979 || code == LEU
4980 || code == GTU
4981 || code == LTU)
4982 sign = 0;
4983 else
4984 sign = 1;
4986 /* Get a reduced range if the sign bit is zero. */
4987 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4989 mmin = 0;
4990 mmax = nonzero;
4992 else
4994 rtx mmin_rtx, mmax_rtx;
4995 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4997 mmin = INTVAL (mmin_rtx);
4998 mmax = INTVAL (mmax_rtx);
4999 if (sign)
5001 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
5003 mmin >>= (sign_copies - 1);
5004 mmax >>= (sign_copies - 1);
5008 switch (code)
5010 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5011 case GEU:
5012 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5013 return const_true_rtx;
5014 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5015 return const0_rtx;
5016 break;
5017 case GE:
5018 if (val <= mmin)
5019 return const_true_rtx;
5020 if (val > mmax)
5021 return const0_rtx;
5022 break;
5024 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5025 case LEU:
5026 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5027 return const_true_rtx;
5028 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5029 return const0_rtx;
5030 break;
5031 case LE:
5032 if (val >= mmax)
5033 return const_true_rtx;
5034 if (val < mmin)
5035 return const0_rtx;
5036 break;
5038 case EQ:
5039 /* x == y is always false for y out of range. */
5040 if (val < mmin || val > mmax)
5041 return const0_rtx;
5042 break;
5044 /* x > y is always false for y >= mmax, always true for y < mmin. */
5045 case GTU:
5046 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5047 return const0_rtx;
5048 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5049 return const_true_rtx;
5050 break;
5051 case GT:
5052 if (val >= mmax)
5053 return const0_rtx;
5054 if (val < mmin)
5055 return const_true_rtx;
5056 break;
5058 /* x < y is always false for y <= mmin, always true for y > mmax. */
5059 case LTU:
5060 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5061 return const0_rtx;
5062 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5063 return const_true_rtx;
5064 break;
5065 case LT:
5066 if (val <= mmin)
5067 return const0_rtx;
5068 if (val > mmax)
5069 return const_true_rtx;
5070 break;
5072 case NE:
5073 /* x != y is always true for y out of range. */
5074 if (val < mmin || val > mmax)
5075 return const_true_rtx;
5076 break;
5078 default:
5079 break;
5083 /* Optimize integer comparisons with zero. */
5084 if (trueop1 == const0_rtx && !side_effects_p (trueop0))
5086 /* Some addresses are known to be nonzero. We don't know
5087 their sign, but equality comparisons are known. */
5088 if (nonzero_address_p (trueop0))
5090 if (code == EQ || code == LEU)
5091 return const0_rtx;
5092 if (code == NE || code == GTU)
5093 return const_true_rtx;
5096 /* See if the first operand is an IOR with a constant. If so, we
5097 may be able to determine the result of this comparison. */
5098 if (GET_CODE (op0) == IOR)
5100 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5101 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5103 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
5104 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5105 && (UINTVAL (inner_const)
5106 & ((unsigned HOST_WIDE_INT) 1
5107 << sign_bitnum)));
5109 switch (code)
5111 case EQ:
5112 case LEU:
5113 return const0_rtx;
5114 case NE:
5115 case GTU:
5116 return const_true_rtx;
5117 case LT:
5118 case LE:
5119 if (has_sign)
5120 return const_true_rtx;
5121 break;
5122 case GT:
5123 case GE:
5124 if (has_sign)
5125 return const0_rtx;
5126 break;
5127 default:
5128 break;
5134 /* Optimize comparison of ABS with zero. */
5135 if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
5136 && (GET_CODE (trueop0) == ABS
5137 || (GET_CODE (trueop0) == FLOAT_EXTEND
5138 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5140 switch (code)
5142 case LT:
5143 /* Optimize abs(x) < 0.0. */
5144 if (!HONOR_SNANS (mode)
5145 && (!INTEGRAL_MODE_P (mode)
5146 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5148 if (INTEGRAL_MODE_P (mode)
5149 && (issue_strict_overflow_warning
5150 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5151 warning (OPT_Wstrict_overflow,
5152 ("assuming signed overflow does not occur when "
5153 "assuming abs (x) < 0 is false"));
5154 return const0_rtx;
5156 break;
5158 case GE:
5159 /* Optimize abs(x) >= 0.0. */
5160 if (!HONOR_NANS (mode)
5161 && (!INTEGRAL_MODE_P (mode)
5162 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5164 if (INTEGRAL_MODE_P (mode)
5165 && (issue_strict_overflow_warning
5166 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5167 warning (OPT_Wstrict_overflow,
5168 ("assuming signed overflow does not occur when "
5169 "assuming abs (x) >= 0 is true"));
5170 return const_true_rtx;
5172 break;
5174 case UNGE:
5175 /* Optimize ! (abs(x) < 0.0). */
5176 return const_true_rtx;
5178 default:
5179 break;
5183 return 0;
5186 /* Simplify CODE, an operation with result mode MODE and three operands,
5187 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5188 a constant. Return 0 if no simplifications is possible. */
5191 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5192 machine_mode op0_mode, rtx op0, rtx op1,
5193 rtx op2)
5195 unsigned int width = GET_MODE_PRECISION (mode);
5196 bool any_change = false;
5197 rtx tem, trueop2;
5199 /* VOIDmode means "infinite" precision. */
5200 if (width == 0)
5201 width = HOST_BITS_PER_WIDE_INT;
5203 switch (code)
5205 case FMA:
5206 /* Simplify negations around the multiplication. */
5207 /* -a * -b + c => a * b + c. */
5208 if (GET_CODE (op0) == NEG)
5210 tem = simplify_unary_operation (NEG, mode, op1, mode);
5211 if (tem)
5212 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5214 else if (GET_CODE (op1) == NEG)
5216 tem = simplify_unary_operation (NEG, mode, op0, mode);
5217 if (tem)
5218 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5221 /* Canonicalize the two multiplication operands. */
5222 /* a * -b + c => -b * a + c. */
5223 if (swap_commutative_operands_p (op0, op1))
5224 std::swap (op0, op1), any_change = true;
5226 if (any_change)
5227 return gen_rtx_FMA (mode, op0, op1, op2);
5228 return NULL_RTX;
5230 case SIGN_EXTRACT:
5231 case ZERO_EXTRACT:
5232 if (CONST_INT_P (op0)
5233 && CONST_INT_P (op1)
5234 && CONST_INT_P (op2)
5235 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5236 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5238 /* Extracting a bit-field from a constant */
5239 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5240 HOST_WIDE_INT op1val = INTVAL (op1);
5241 HOST_WIDE_INT op2val = INTVAL (op2);
5242 if (BITS_BIG_ENDIAN)
5243 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5244 else
5245 val >>= op2val;
5247 if (HOST_BITS_PER_WIDE_INT != op1val)
5249 /* First zero-extend. */
5250 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
5251 /* If desired, propagate sign bit. */
5252 if (code == SIGN_EXTRACT
5253 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
5254 != 0)
5255 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5258 return gen_int_mode (val, mode);
5260 break;
5262 case IF_THEN_ELSE:
5263 if (CONST_INT_P (op0))
5264 return op0 != const0_rtx ? op1 : op2;
5266 /* Convert c ? a : a into "a". */
5267 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5268 return op1;
5270 /* Convert a != b ? a : b into "a". */
5271 if (GET_CODE (op0) == NE
5272 && ! side_effects_p (op0)
5273 && ! HONOR_NANS (mode)
5274 && ! HONOR_SIGNED_ZEROS (mode)
5275 && ((rtx_equal_p (XEXP (op0, 0), op1)
5276 && rtx_equal_p (XEXP (op0, 1), op2))
5277 || (rtx_equal_p (XEXP (op0, 0), op2)
5278 && rtx_equal_p (XEXP (op0, 1), op1))))
5279 return op1;
5281 /* Convert a == b ? a : b into "b". */
5282 if (GET_CODE (op0) == EQ
5283 && ! side_effects_p (op0)
5284 && ! HONOR_NANS (mode)
5285 && ! HONOR_SIGNED_ZEROS (mode)
5286 && ((rtx_equal_p (XEXP (op0, 0), op1)
5287 && rtx_equal_p (XEXP (op0, 1), op2))
5288 || (rtx_equal_p (XEXP (op0, 0), op2)
5289 && rtx_equal_p (XEXP (op0, 1), op1))))
5290 return op2;
5292 /* Convert (!c) != {0,...,0} ? a : b into
5293 c != {0,...,0} ? b : a for vector modes. */
5294 if (VECTOR_MODE_P (GET_MODE (op1))
5295 && GET_CODE (op0) == NE
5296 && GET_CODE (XEXP (op0, 0)) == NOT
5297 && GET_CODE (XEXP (op0, 1)) == CONST_VECTOR)
5299 rtx cv = XEXP (op0, 1);
5300 int nunits = CONST_VECTOR_NUNITS (cv);
5301 bool ok = true;
5302 for (int i = 0; i < nunits; ++i)
5303 if (CONST_VECTOR_ELT (cv, i) != const0_rtx)
5305 ok = false;
5306 break;
5308 if (ok)
5310 rtx new_op0 = gen_rtx_NE (GET_MODE (op0),
5311 XEXP (XEXP (op0, 0), 0),
5312 XEXP (op0, 1));
5313 rtx retval = gen_rtx_IF_THEN_ELSE (mode, new_op0, op2, op1);
5314 return retval;
5318 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5320 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5321 ? GET_MODE (XEXP (op0, 1))
5322 : GET_MODE (XEXP (op0, 0)));
5323 rtx temp;
5325 /* Look for happy constants in op1 and op2. */
5326 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5328 HOST_WIDE_INT t = INTVAL (op1);
5329 HOST_WIDE_INT f = INTVAL (op2);
5331 if (t == STORE_FLAG_VALUE && f == 0)
5332 code = GET_CODE (op0);
5333 else if (t == 0 && f == STORE_FLAG_VALUE)
5335 enum rtx_code tmp;
5336 tmp = reversed_comparison_code (op0, NULL_RTX);
5337 if (tmp == UNKNOWN)
5338 break;
5339 code = tmp;
5341 else
5342 break;
5344 return simplify_gen_relational (code, mode, cmp_mode,
5345 XEXP (op0, 0), XEXP (op0, 1));
5348 if (cmp_mode == VOIDmode)
5349 cmp_mode = op0_mode;
5350 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5351 cmp_mode, XEXP (op0, 0),
5352 XEXP (op0, 1));
5354 /* See if any simplifications were possible. */
5355 if (temp)
5357 if (CONST_INT_P (temp))
5358 return temp == const0_rtx ? op2 : op1;
5359 else if (temp)
5360 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5363 break;
5365 case VEC_MERGE:
5366 gcc_assert (GET_MODE (op0) == mode);
5367 gcc_assert (GET_MODE (op1) == mode);
5368 gcc_assert (VECTOR_MODE_P (mode));
5369 trueop2 = avoid_constant_pool_reference (op2);
5370 if (CONST_INT_P (trueop2))
5372 int elt_size = GET_MODE_UNIT_SIZE (mode);
5373 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5374 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5375 unsigned HOST_WIDE_INT mask;
5376 if (n_elts == HOST_BITS_PER_WIDE_INT)
5377 mask = -1;
5378 else
5379 mask = ((unsigned HOST_WIDE_INT) 1 << n_elts) - 1;
5381 if (!(sel & mask) && !side_effects_p (op0))
5382 return op1;
5383 if ((sel & mask) == mask && !side_effects_p (op1))
5384 return op0;
5386 rtx trueop0 = avoid_constant_pool_reference (op0);
5387 rtx trueop1 = avoid_constant_pool_reference (op1);
5388 if (GET_CODE (trueop0) == CONST_VECTOR
5389 && GET_CODE (trueop1) == CONST_VECTOR)
5391 rtvec v = rtvec_alloc (n_elts);
5392 unsigned int i;
5394 for (i = 0; i < n_elts; i++)
5395 RTVEC_ELT (v, i) = ((sel & ((unsigned HOST_WIDE_INT) 1 << i))
5396 ? CONST_VECTOR_ELT (trueop0, i)
5397 : CONST_VECTOR_ELT (trueop1, i));
5398 return gen_rtx_CONST_VECTOR (mode, v);
5401 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5402 if no element from a appears in the result. */
5403 if (GET_CODE (op0) == VEC_MERGE)
5405 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5406 if (CONST_INT_P (tem))
5408 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5409 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5410 return simplify_gen_ternary (code, mode, mode,
5411 XEXP (op0, 1), op1, op2);
5412 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5413 return simplify_gen_ternary (code, mode, mode,
5414 XEXP (op0, 0), op1, op2);
5417 if (GET_CODE (op1) == VEC_MERGE)
5419 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5420 if (CONST_INT_P (tem))
5422 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5423 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5424 return simplify_gen_ternary (code, mode, mode,
5425 op0, XEXP (op1, 1), op2);
5426 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5427 return simplify_gen_ternary (code, mode, mode,
5428 op0, XEXP (op1, 0), op2);
5432 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5433 with a. */
5434 if (GET_CODE (op0) == VEC_DUPLICATE
5435 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5436 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5437 && mode_nunits[GET_MODE (XEXP (op0, 0))] == 1)
5439 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5440 if (CONST_INT_P (tem) && CONST_INT_P (op2))
5442 if (XEXP (XEXP (op0, 0), 0) == op1
5443 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5444 return op1;
5449 if (rtx_equal_p (op0, op1)
5450 && !side_effects_p (op2) && !side_effects_p (op1))
5451 return op0;
5453 break;
5455 default:
5456 gcc_unreachable ();
5459 return 0;
5462 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5463 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5464 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5466 Works by unpacking OP into a collection of 8-bit values
5467 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5468 and then repacking them again for OUTERMODE. */
5470 static rtx
5471 simplify_immed_subreg (machine_mode outermode, rtx op,
5472 machine_mode innermode, unsigned int byte)
5474 enum {
5475 value_bit = 8,
5476 value_mask = (1 << value_bit) - 1
5478 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
5479 int value_start;
5480 int i;
5481 int elem;
5483 int num_elem;
5484 rtx * elems;
5485 int elem_bitsize;
5486 rtx result_s;
5487 rtvec result_v = NULL;
5488 enum mode_class outer_class;
5489 machine_mode outer_submode;
5490 int max_bitsize;
5492 /* Some ports misuse CCmode. */
5493 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5494 return op;
5496 /* We have no way to represent a complex constant at the rtl level. */
5497 if (COMPLEX_MODE_P (outermode))
5498 return NULL_RTX;
5500 /* We support any size mode. */
5501 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5502 GET_MODE_BITSIZE (innermode));
5504 /* Unpack the value. */
5506 if (GET_CODE (op) == CONST_VECTOR)
5508 num_elem = CONST_VECTOR_NUNITS (op);
5509 elems = &CONST_VECTOR_ELT (op, 0);
5510 elem_bitsize = GET_MODE_UNIT_BITSIZE (innermode);
5512 else
5514 num_elem = 1;
5515 elems = &op;
5516 elem_bitsize = max_bitsize;
5518 /* If this asserts, it is too complicated; reducing value_bit may help. */
5519 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5520 /* I don't know how to handle endianness of sub-units. */
5521 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5523 for (elem = 0; elem < num_elem; elem++)
5525 unsigned char * vp;
5526 rtx el = elems[elem];
5528 /* Vectors are kept in target memory order. (This is probably
5529 a mistake.) */
5531 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5532 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5533 / BITS_PER_UNIT);
5534 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5535 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5536 unsigned bytele = (subword_byte % UNITS_PER_WORD
5537 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5538 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5541 switch (GET_CODE (el))
5543 case CONST_INT:
5544 for (i = 0;
5545 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5546 i += value_bit)
5547 *vp++ = INTVAL (el) >> i;
5548 /* CONST_INTs are always logically sign-extended. */
5549 for (; i < elem_bitsize; i += value_bit)
5550 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5551 break;
5553 case CONST_WIDE_INT:
5555 rtx_mode_t val = std::make_pair (el, innermode);
5556 unsigned char extend = wi::sign_mask (val);
5558 for (i = 0; i < elem_bitsize; i += value_bit)
5559 *vp++ = wi::extract_uhwi (val, i, value_bit);
5560 for (; i < elem_bitsize; i += value_bit)
5561 *vp++ = extend;
5563 break;
5565 case CONST_DOUBLE:
5566 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
5568 unsigned char extend = 0;
5569 /* If this triggers, someone should have generated a
5570 CONST_INT instead. */
5571 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5573 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5574 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5575 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5577 *vp++
5578 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5579 i += value_bit;
5582 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5583 extend = -1;
5584 for (; i < elem_bitsize; i += value_bit)
5585 *vp++ = extend;
5587 else
5589 /* This is big enough for anything on the platform. */
5590 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5591 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5593 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5594 gcc_assert (bitsize <= elem_bitsize);
5595 gcc_assert (bitsize % value_bit == 0);
5597 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5598 GET_MODE (el));
5600 /* real_to_target produces its result in words affected by
5601 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5602 and use WORDS_BIG_ENDIAN instead; see the documentation
5603 of SUBREG in rtl.texi. */
5604 for (i = 0; i < bitsize; i += value_bit)
5606 int ibase;
5607 if (WORDS_BIG_ENDIAN)
5608 ibase = bitsize - 1 - i;
5609 else
5610 ibase = i;
5611 *vp++ = tmp[ibase / 32] >> i % 32;
5614 /* It shouldn't matter what's done here, so fill it with
5615 zero. */
5616 for (; i < elem_bitsize; i += value_bit)
5617 *vp++ = 0;
5619 break;
5621 case CONST_FIXED:
5622 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5624 for (i = 0; i < elem_bitsize; i += value_bit)
5625 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5627 else
5629 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5630 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5631 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5632 i += value_bit)
5633 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5634 >> (i - HOST_BITS_PER_WIDE_INT);
5635 for (; i < elem_bitsize; i += value_bit)
5636 *vp++ = 0;
5638 break;
5640 default:
5641 gcc_unreachable ();
5645 /* Now, pick the right byte to start with. */
5646 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5647 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5648 will already have offset 0. */
5649 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5651 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5652 - byte);
5653 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5654 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5655 byte = (subword_byte % UNITS_PER_WORD
5656 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5659 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5660 so if it's become negative it will instead be very large.) */
5661 gcc_assert (byte < GET_MODE_SIZE (innermode));
5663 /* Convert from bytes to chunks of size value_bit. */
5664 value_start = byte * (BITS_PER_UNIT / value_bit);
5666 /* Re-pack the value. */
5667 num_elem = GET_MODE_NUNITS (outermode);
5669 if (VECTOR_MODE_P (outermode))
5671 result_v = rtvec_alloc (num_elem);
5672 elems = &RTVEC_ELT (result_v, 0);
5674 else
5675 elems = &result_s;
5677 outer_submode = GET_MODE_INNER (outermode);
5678 outer_class = GET_MODE_CLASS (outer_submode);
5679 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5681 gcc_assert (elem_bitsize % value_bit == 0);
5682 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5684 for (elem = 0; elem < num_elem; elem++)
5686 unsigned char *vp;
5688 /* Vectors are stored in target memory order. (This is probably
5689 a mistake.) */
5691 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5692 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5693 / BITS_PER_UNIT);
5694 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5695 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5696 unsigned bytele = (subword_byte % UNITS_PER_WORD
5697 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5698 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5701 switch (outer_class)
5703 case MODE_INT:
5704 case MODE_PARTIAL_INT:
5706 int u;
5707 int base = 0;
5708 int units
5709 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
5710 / HOST_BITS_PER_WIDE_INT;
5711 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
5712 wide_int r;
5714 if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
5715 return NULL_RTX;
5716 for (u = 0; u < units; u++)
5718 unsigned HOST_WIDE_INT buf = 0;
5719 for (i = 0;
5720 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
5721 i += value_bit)
5722 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5724 tmp[u] = buf;
5725 base += HOST_BITS_PER_WIDE_INT;
5727 r = wide_int::from_array (tmp, units,
5728 GET_MODE_PRECISION (outer_submode));
5729 #if TARGET_SUPPORTS_WIDE_INT == 0
5730 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5731 if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
5732 return NULL_RTX;
5733 #endif
5734 elems[elem] = immed_wide_int_const (r, outer_submode);
5736 break;
5738 case MODE_FLOAT:
5739 case MODE_DECIMAL_FLOAT:
5741 REAL_VALUE_TYPE r;
5742 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5744 /* real_from_target wants its input in words affected by
5745 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5746 and use WORDS_BIG_ENDIAN instead; see the documentation
5747 of SUBREG in rtl.texi. */
5748 for (i = 0; i < max_bitsize / 32; i++)
5749 tmp[i] = 0;
5750 for (i = 0; i < elem_bitsize; i += value_bit)
5752 int ibase;
5753 if (WORDS_BIG_ENDIAN)
5754 ibase = elem_bitsize - 1 - i;
5755 else
5756 ibase = i;
5757 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5760 real_from_target (&r, tmp, outer_submode);
5761 elems[elem] = const_double_from_real_value (r, outer_submode);
5763 break;
5765 case MODE_FRACT:
5766 case MODE_UFRACT:
5767 case MODE_ACCUM:
5768 case MODE_UACCUM:
5770 FIXED_VALUE_TYPE f;
5771 f.data.low = 0;
5772 f.data.high = 0;
5773 f.mode = outer_submode;
5775 for (i = 0;
5776 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5777 i += value_bit)
5778 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5779 for (; i < elem_bitsize; i += value_bit)
5780 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5781 << (i - HOST_BITS_PER_WIDE_INT));
5783 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5785 break;
5787 default:
5788 gcc_unreachable ();
5791 if (VECTOR_MODE_P (outermode))
5792 return gen_rtx_CONST_VECTOR (outermode, result_v);
5793 else
5794 return result_s;
5797 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5798 Return 0 if no simplifications are possible. */
5800 simplify_subreg (machine_mode outermode, rtx op,
5801 machine_mode innermode, unsigned int byte)
5803 /* Little bit of sanity checking. */
5804 gcc_assert (innermode != VOIDmode);
5805 gcc_assert (outermode != VOIDmode);
5806 gcc_assert (innermode != BLKmode);
5807 gcc_assert (outermode != BLKmode);
5809 gcc_assert (GET_MODE (op) == innermode
5810 || GET_MODE (op) == VOIDmode);
5812 if ((byte % GET_MODE_SIZE (outermode)) != 0)
5813 return NULL_RTX;
5815 if (byte >= GET_MODE_SIZE (innermode))
5816 return NULL_RTX;
5818 if (outermode == innermode && !byte)
5819 return op;
5821 if (CONST_SCALAR_INT_P (op)
5822 || CONST_DOUBLE_AS_FLOAT_P (op)
5823 || GET_CODE (op) == CONST_FIXED
5824 || GET_CODE (op) == CONST_VECTOR)
5825 return simplify_immed_subreg (outermode, op, innermode, byte);
5827 /* Changing mode twice with SUBREG => just change it once,
5828 or not at all if changing back op starting mode. */
5829 if (GET_CODE (op) == SUBREG)
5831 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5832 int final_offset = byte + SUBREG_BYTE (op);
5833 rtx newx;
5835 if (outermode == innermostmode
5836 && byte == 0 && SUBREG_BYTE (op) == 0)
5837 return SUBREG_REG (op);
5839 /* The SUBREG_BYTE represents offset, as if the value were stored
5840 in memory. Irritating exception is paradoxical subreg, where
5841 we define SUBREG_BYTE to be 0. On big endian machines, this
5842 value should be negative. For a moment, undo this exception. */
5843 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5845 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5846 if (WORDS_BIG_ENDIAN)
5847 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5848 if (BYTES_BIG_ENDIAN)
5849 final_offset += difference % UNITS_PER_WORD;
5851 if (SUBREG_BYTE (op) == 0
5852 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5854 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5855 if (WORDS_BIG_ENDIAN)
5856 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5857 if (BYTES_BIG_ENDIAN)
5858 final_offset += difference % UNITS_PER_WORD;
5861 /* See whether resulting subreg will be paradoxical. */
5862 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5864 /* In nonparadoxical subregs we can't handle negative offsets. */
5865 if (final_offset < 0)
5866 return NULL_RTX;
5867 /* Bail out in case resulting subreg would be incorrect. */
5868 if (final_offset % GET_MODE_SIZE (outermode)
5869 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5870 return NULL_RTX;
5872 else
5874 int offset = 0;
5875 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5877 /* In paradoxical subreg, see if we are still looking on lower part.
5878 If so, our SUBREG_BYTE will be 0. */
5879 if (WORDS_BIG_ENDIAN)
5880 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5881 if (BYTES_BIG_ENDIAN)
5882 offset += difference % UNITS_PER_WORD;
5883 if (offset == final_offset)
5884 final_offset = 0;
5885 else
5886 return NULL_RTX;
5889 /* Recurse for further possible simplifications. */
5890 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5891 final_offset);
5892 if (newx)
5893 return newx;
5894 if (validate_subreg (outermode, innermostmode,
5895 SUBREG_REG (op), final_offset))
5897 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5898 if (SUBREG_PROMOTED_VAR_P (op)
5899 && SUBREG_PROMOTED_SIGN (op) >= 0
5900 && GET_MODE_CLASS (outermode) == MODE_INT
5901 && IN_RANGE (GET_MODE_SIZE (outermode),
5902 GET_MODE_SIZE (innermode),
5903 GET_MODE_SIZE (innermostmode))
5904 && subreg_lowpart_p (newx))
5906 SUBREG_PROMOTED_VAR_P (newx) = 1;
5907 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
5909 return newx;
5911 return NULL_RTX;
5914 /* SUBREG of a hard register => just change the register number
5915 and/or mode. If the hard register is not valid in that mode,
5916 suppress this simplification. If the hard register is the stack,
5917 frame, or argument pointer, leave this as a SUBREG. */
5919 if (REG_P (op) && HARD_REGISTER_P (op))
5921 unsigned int regno, final_regno;
5923 regno = REGNO (op);
5924 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5925 if (HARD_REGISTER_NUM_P (final_regno))
5927 rtx x;
5928 int final_offset = byte;
5930 /* Adjust offset for paradoxical subregs. */
5931 if (byte == 0
5932 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5934 int difference = (GET_MODE_SIZE (innermode)
5935 - GET_MODE_SIZE (outermode));
5936 if (WORDS_BIG_ENDIAN)
5937 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5938 if (BYTES_BIG_ENDIAN)
5939 final_offset += difference % UNITS_PER_WORD;
5942 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5944 /* Propagate original regno. We don't have any way to specify
5945 the offset inside original regno, so do so only for lowpart.
5946 The information is used only by alias analysis that can not
5947 grog partial register anyway. */
5949 if (subreg_lowpart_offset (outermode, innermode) == byte)
5950 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5951 return x;
5955 /* If we have a SUBREG of a register that we are replacing and we are
5956 replacing it with a MEM, make a new MEM and try replacing the
5957 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5958 or if we would be widening it. */
5960 if (MEM_P (op)
5961 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
5962 /* Allow splitting of volatile memory references in case we don't
5963 have instruction to move the whole thing. */
5964 && (! MEM_VOLATILE_P (op)
5965 || ! have_insn_for (SET, innermode))
5966 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5967 return adjust_address_nv (op, outermode, byte);
5969 /* Handle complex values represented as CONCAT
5970 of real and imaginary part. */
5971 if (GET_CODE (op) == CONCAT)
5973 unsigned int part_size, final_offset;
5974 rtx part, res;
5976 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5977 if (byte < part_size)
5979 part = XEXP (op, 0);
5980 final_offset = byte;
5982 else
5984 part = XEXP (op, 1);
5985 final_offset = byte - part_size;
5988 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5989 return NULL_RTX;
5991 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5992 if (res)
5993 return res;
5994 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5995 return gen_rtx_SUBREG (outermode, part, final_offset);
5996 return NULL_RTX;
5999 /* A SUBREG resulting from a zero extension may fold to zero if
6000 it extracts higher bits that the ZERO_EXTEND's source bits. */
6001 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
6003 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
6004 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
6005 return CONST0_RTX (outermode);
6008 if (SCALAR_INT_MODE_P (outermode)
6009 && SCALAR_INT_MODE_P (innermode)
6010 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
6011 && byte == subreg_lowpart_offset (outermode, innermode))
6013 rtx tem = simplify_truncation (outermode, op, innermode);
6014 if (tem)
6015 return tem;
6018 return NULL_RTX;
6021 /* Make a SUBREG operation or equivalent if it folds. */
6024 simplify_gen_subreg (machine_mode outermode, rtx op,
6025 machine_mode innermode, unsigned int byte)
6027 rtx newx;
6029 newx = simplify_subreg (outermode, op, innermode, byte);
6030 if (newx)
6031 return newx;
6033 if (GET_CODE (op) == SUBREG
6034 || GET_CODE (op) == CONCAT
6035 || GET_MODE (op) == VOIDmode)
6036 return NULL_RTX;
6038 if (validate_subreg (outermode, innermode, op, byte))
6039 return gen_rtx_SUBREG (outermode, op, byte);
6041 return NULL_RTX;
6044 /* Generates a subreg to get the least significant part of EXPR (in mode
6045 INNER_MODE) to OUTER_MODE. */
6048 lowpart_subreg (machine_mode outer_mode, rtx expr,
6049 machine_mode inner_mode)
6051 return simplify_gen_subreg (outer_mode, expr, inner_mode,
6052 subreg_lowpart_offset (outer_mode, inner_mode));
6055 /* Simplify X, an rtx expression.
6057 Return the simplified expression or NULL if no simplifications
6058 were possible.
6060 This is the preferred entry point into the simplification routines;
6061 however, we still allow passes to call the more specific routines.
6063 Right now GCC has three (yes, three) major bodies of RTL simplification
6064 code that need to be unified.
6066 1. fold_rtx in cse.c. This code uses various CSE specific
6067 information to aid in RTL simplification.
6069 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6070 it uses combine specific information to aid in RTL
6071 simplification.
6073 3. The routines in this file.
6076 Long term we want to only have one body of simplification code; to
6077 get to that state I recommend the following steps:
6079 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6080 which are not pass dependent state into these routines.
6082 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6083 use this routine whenever possible.
6085 3. Allow for pass dependent state to be provided to these
6086 routines and add simplifications based on the pass dependent
6087 state. Remove code from cse.c & combine.c that becomes
6088 redundant/dead.
6090 It will take time, but ultimately the compiler will be easier to
6091 maintain and improve. It's totally silly that when we add a
6092 simplification that it needs to be added to 4 places (3 for RTL
6093 simplification and 1 for tree simplification. */
6096 simplify_rtx (const_rtx x)
6098 const enum rtx_code code = GET_CODE (x);
6099 const machine_mode mode = GET_MODE (x);
6101 switch (GET_RTX_CLASS (code))
6103 case RTX_UNARY:
6104 return simplify_unary_operation (code, mode,
6105 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6106 case RTX_COMM_ARITH:
6107 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6108 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6110 /* Fall through.... */
6112 case RTX_BIN_ARITH:
6113 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6115 case RTX_TERNARY:
6116 case RTX_BITFIELD_OPS:
6117 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6118 XEXP (x, 0), XEXP (x, 1),
6119 XEXP (x, 2));
6121 case RTX_COMPARE:
6122 case RTX_COMM_COMPARE:
6123 return simplify_relational_operation (code, mode,
6124 ((GET_MODE (XEXP (x, 0))
6125 != VOIDmode)
6126 ? GET_MODE (XEXP (x, 0))
6127 : GET_MODE (XEXP (x, 1))),
6128 XEXP (x, 0),
6129 XEXP (x, 1));
6131 case RTX_EXTRA:
6132 if (code == SUBREG)
6133 return simplify_subreg (mode, SUBREG_REG (x),
6134 GET_MODE (SUBREG_REG (x)),
6135 SUBREG_BYTE (x));
6136 break;
6138 case RTX_OBJ:
6139 if (code == LO_SUM)
6141 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6142 if (GET_CODE (XEXP (x, 0)) == HIGH
6143 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6144 return XEXP (x, 1);
6146 break;
6148 default:
6149 break;
6151 return NULL;