Update to patch that Aldy committed directly here.
[official-gcc.git] / gcc / simplify-rtx.c
blob91e4b9ca0cea75ff9e4d72940aba31e821be6d17
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "predict.h"
26 #include "rtl.h"
27 #include "alias.h"
28 #include "tree.h"
29 #include "fold-const.h"
30 #include "varasm.h"
31 #include "tm_p.h"
32 #include "regs.h"
33 #include "flags.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "insn-codes.h"
37 #include "optabs.h"
38 #include "expmed.h"
39 #include "dojump.h"
40 #include "explow.h"
41 #include "calls.h"
42 #include "emit-rtl.h"
43 #include "stmt.h"
44 #include "expr.h"
45 #include "diagnostic-core.h"
46 #include "target.h"
48 /* Simplification and canonicalization of RTL. */
50 /* Much code operates on (low, high) pairs; the low value is an
51 unsigned wide int, the high value a signed wide int. We
52 occasionally need to sign extend from low to high as if low were a
53 signed wide int. */
54 #define HWI_SIGN_EXTEND(low) \
55 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
57 static rtx neg_const_int (machine_mode, const_rtx);
58 static bool plus_minus_operand_p (const_rtx);
59 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
60 static rtx simplify_immed_subreg (machine_mode, rtx, machine_mode,
61 unsigned int);
62 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
63 rtx, rtx);
64 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
65 machine_mode, rtx, rtx);
66 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
67 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
68 rtx, rtx, rtx, rtx);
70 /* Negate a CONST_INT rtx, truncating (because a conversion from a
71 maximally negative number can overflow). */
72 static rtx
73 neg_const_int (machine_mode mode, const_rtx i)
75 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
78 /* Test whether expression, X, is an immediate constant that represents
79 the most significant bit of machine mode MODE. */
81 bool
82 mode_signbit_p (machine_mode mode, const_rtx x)
84 unsigned HOST_WIDE_INT val;
85 unsigned int width;
87 if (GET_MODE_CLASS (mode) != MODE_INT)
88 return false;
90 width = GET_MODE_PRECISION (mode);
91 if (width == 0)
92 return false;
94 if (width <= HOST_BITS_PER_WIDE_INT
95 && CONST_INT_P (x))
96 val = INTVAL (x);
97 #if TARGET_SUPPORTS_WIDE_INT
98 else if (CONST_WIDE_INT_P (x))
100 unsigned int i;
101 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
102 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
103 return false;
104 for (i = 0; i < elts - 1; i++)
105 if (CONST_WIDE_INT_ELT (x, i) != 0)
106 return false;
107 val = CONST_WIDE_INT_ELT (x, elts - 1);
108 width %= HOST_BITS_PER_WIDE_INT;
109 if (width == 0)
110 width = HOST_BITS_PER_WIDE_INT;
112 #else
113 else if (width <= HOST_BITS_PER_DOUBLE_INT
114 && CONST_DOUBLE_AS_INT_P (x)
115 && CONST_DOUBLE_LOW (x) == 0)
117 val = CONST_DOUBLE_HIGH (x);
118 width -= HOST_BITS_PER_WIDE_INT;
120 #endif
121 else
122 /* X is not an integer constant. */
123 return false;
125 if (width < HOST_BITS_PER_WIDE_INT)
126 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
127 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
130 /* Test whether VAL is equal to the most significant bit of mode MODE
131 (after masking with the mode mask of MODE). Returns false if the
132 precision of MODE is too large to handle. */
134 bool
135 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
137 unsigned int width;
139 if (GET_MODE_CLASS (mode) != MODE_INT)
140 return false;
142 width = GET_MODE_PRECISION (mode);
143 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
144 return false;
146 val &= GET_MODE_MASK (mode);
147 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
150 /* Test whether the most significant bit of mode MODE is set in VAL.
151 Returns false if the precision of MODE is too large to handle. */
152 bool
153 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
155 unsigned int width;
157 if (GET_MODE_CLASS (mode) != MODE_INT)
158 return false;
160 width = GET_MODE_PRECISION (mode);
161 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
162 return false;
164 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
165 return val != 0;
168 /* Test whether the most significant bit of mode MODE is clear in VAL.
169 Returns false if the precision of MODE is too large to handle. */
170 bool
171 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
173 unsigned int width;
175 if (GET_MODE_CLASS (mode) != MODE_INT)
176 return false;
178 width = GET_MODE_PRECISION (mode);
179 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
180 return false;
182 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
183 return val == 0;
186 /* Make a binary operation by properly ordering the operands and
187 seeing if the expression folds. */
190 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
191 rtx op1)
193 rtx tem;
195 /* If this simplifies, do it. */
196 tem = simplify_binary_operation (code, mode, op0, op1);
197 if (tem)
198 return tem;
200 /* Put complex operands first and constants second if commutative. */
201 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
202 && swap_commutative_operands_p (op0, op1))
203 std::swap (op0, op1);
205 return gen_rtx_fmt_ee (code, mode, op0, op1);
208 /* If X is a MEM referencing the constant pool, return the real value.
209 Otherwise return X. */
211 avoid_constant_pool_reference (rtx x)
213 rtx c, tmp, addr;
214 machine_mode cmode;
215 HOST_WIDE_INT offset = 0;
217 switch (GET_CODE (x))
219 case MEM:
220 break;
222 case FLOAT_EXTEND:
223 /* Handle float extensions of constant pool references. */
224 tmp = XEXP (x, 0);
225 c = avoid_constant_pool_reference (tmp);
226 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
228 REAL_VALUE_TYPE d;
230 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
231 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
233 return x;
235 default:
236 return x;
239 if (GET_MODE (x) == BLKmode)
240 return x;
242 addr = XEXP (x, 0);
244 /* Call target hook to avoid the effects of -fpic etc.... */
245 addr = targetm.delegitimize_address (addr);
247 /* Split the address into a base and integer offset. */
248 if (GET_CODE (addr) == CONST
249 && GET_CODE (XEXP (addr, 0)) == PLUS
250 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
252 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
253 addr = XEXP (XEXP (addr, 0), 0);
256 if (GET_CODE (addr) == LO_SUM)
257 addr = XEXP (addr, 1);
259 /* If this is a constant pool reference, we can turn it into its
260 constant and hope that simplifications happen. */
261 if (GET_CODE (addr) == SYMBOL_REF
262 && CONSTANT_POOL_ADDRESS_P (addr))
264 c = get_pool_constant (addr);
265 cmode = get_pool_mode (addr);
267 /* If we're accessing the constant in a different mode than it was
268 originally stored, attempt to fix that up via subreg simplifications.
269 If that fails we have no choice but to return the original memory. */
270 if ((offset != 0 || cmode != GET_MODE (x))
271 && offset >= 0 && offset < GET_MODE_SIZE (cmode))
273 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
274 if (tem && CONSTANT_P (tem))
275 return tem;
277 else
278 return c;
281 return x;
284 /* Simplify a MEM based on its attributes. This is the default
285 delegitimize_address target hook, and it's recommended that every
286 overrider call it. */
289 delegitimize_mem_from_attrs (rtx x)
291 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
292 use their base addresses as equivalent. */
293 if (MEM_P (x)
294 && MEM_EXPR (x)
295 && MEM_OFFSET_KNOWN_P (x))
297 tree decl = MEM_EXPR (x);
298 machine_mode mode = GET_MODE (x);
299 HOST_WIDE_INT offset = 0;
301 switch (TREE_CODE (decl))
303 default:
304 decl = NULL;
305 break;
307 case VAR_DECL:
308 break;
310 case ARRAY_REF:
311 case ARRAY_RANGE_REF:
312 case COMPONENT_REF:
313 case BIT_FIELD_REF:
314 case REALPART_EXPR:
315 case IMAGPART_EXPR:
316 case VIEW_CONVERT_EXPR:
318 HOST_WIDE_INT bitsize, bitpos;
319 tree toffset;
320 int unsignedp, volatilep = 0;
322 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
323 &mode, &unsignedp, &volatilep, false);
324 if (bitsize != GET_MODE_BITSIZE (mode)
325 || (bitpos % BITS_PER_UNIT)
326 || (toffset && !tree_fits_shwi_p (toffset)))
327 decl = NULL;
328 else
330 offset += bitpos / BITS_PER_UNIT;
331 if (toffset)
332 offset += tree_to_shwi (toffset);
334 break;
338 if (decl
339 && mode == GET_MODE (x)
340 && TREE_CODE (decl) == VAR_DECL
341 && (TREE_STATIC (decl)
342 || DECL_THREAD_LOCAL_P (decl))
343 && DECL_RTL_SET_P (decl)
344 && MEM_P (DECL_RTL (decl)))
346 rtx newx;
348 offset += MEM_OFFSET (x);
350 newx = DECL_RTL (decl);
352 if (MEM_P (newx))
354 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
356 /* Avoid creating a new MEM needlessly if we already had
357 the same address. We do if there's no OFFSET and the
358 old address X is identical to NEWX, or if X is of the
359 form (plus NEWX OFFSET), or the NEWX is of the form
360 (plus Y (const_int Z)) and X is that with the offset
361 added: (plus Y (const_int Z+OFFSET)). */
362 if (!((offset == 0
363 || (GET_CODE (o) == PLUS
364 && GET_CODE (XEXP (o, 1)) == CONST_INT
365 && (offset == INTVAL (XEXP (o, 1))
366 || (GET_CODE (n) == PLUS
367 && GET_CODE (XEXP (n, 1)) == CONST_INT
368 && (INTVAL (XEXP (n, 1)) + offset
369 == INTVAL (XEXP (o, 1)))
370 && (n = XEXP (n, 0))))
371 && (o = XEXP (o, 0))))
372 && rtx_equal_p (o, n)))
373 x = adjust_address_nv (newx, mode, offset);
375 else if (GET_MODE (x) == GET_MODE (newx)
376 && offset == 0)
377 x = newx;
381 return x;
384 /* Make a unary operation by first seeing if it folds and otherwise making
385 the specified operation. */
388 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
389 machine_mode op_mode)
391 rtx tem;
393 /* If this simplifies, use it. */
394 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
395 return tem;
397 return gen_rtx_fmt_e (code, mode, op);
400 /* Likewise for ternary operations. */
403 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
404 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
406 rtx tem;
408 /* If this simplifies, use it. */
409 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
410 op0, op1, op2)))
411 return tem;
413 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
416 /* Likewise, for relational operations.
417 CMP_MODE specifies mode comparison is done in. */
420 simplify_gen_relational (enum rtx_code code, machine_mode mode,
421 machine_mode cmp_mode, rtx op0, rtx op1)
423 rtx tem;
425 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
426 op0, op1)))
427 return tem;
429 return gen_rtx_fmt_ee (code, mode, op0, op1);
432 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
433 and simplify the result. If FN is non-NULL, call this callback on each
434 X, if it returns non-NULL, replace X with its return value and simplify the
435 result. */
438 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
439 rtx (*fn) (rtx, const_rtx, void *), void *data)
441 enum rtx_code code = GET_CODE (x);
442 machine_mode mode = GET_MODE (x);
443 machine_mode op_mode;
444 const char *fmt;
445 rtx op0, op1, op2, newx, op;
446 rtvec vec, newvec;
447 int i, j;
449 if (__builtin_expect (fn != NULL, 0))
451 newx = fn (x, old_rtx, data);
452 if (newx)
453 return newx;
455 else if (rtx_equal_p (x, old_rtx))
456 return copy_rtx ((rtx) data);
458 switch (GET_RTX_CLASS (code))
460 case RTX_UNARY:
461 op0 = XEXP (x, 0);
462 op_mode = GET_MODE (op0);
463 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
464 if (op0 == XEXP (x, 0))
465 return x;
466 return simplify_gen_unary (code, mode, op0, op_mode);
468 case RTX_BIN_ARITH:
469 case RTX_COMM_ARITH:
470 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
471 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
472 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
473 return x;
474 return simplify_gen_binary (code, mode, op0, op1);
476 case RTX_COMPARE:
477 case RTX_COMM_COMPARE:
478 op0 = XEXP (x, 0);
479 op1 = XEXP (x, 1);
480 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
481 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
482 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
483 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
484 return x;
485 return simplify_gen_relational (code, mode, op_mode, op0, op1);
487 case RTX_TERNARY:
488 case RTX_BITFIELD_OPS:
489 op0 = XEXP (x, 0);
490 op_mode = GET_MODE (op0);
491 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
492 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
493 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
494 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
495 return x;
496 if (op_mode == VOIDmode)
497 op_mode = GET_MODE (op0);
498 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
500 case RTX_EXTRA:
501 if (code == SUBREG)
503 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
504 if (op0 == SUBREG_REG (x))
505 return x;
506 op0 = simplify_gen_subreg (GET_MODE (x), op0,
507 GET_MODE (SUBREG_REG (x)),
508 SUBREG_BYTE (x));
509 return op0 ? op0 : x;
511 break;
513 case RTX_OBJ:
514 if (code == MEM)
516 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
517 if (op0 == XEXP (x, 0))
518 return x;
519 return replace_equiv_address_nv (x, op0);
521 else if (code == LO_SUM)
523 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
524 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
526 /* (lo_sum (high x) y) -> y where x and y have the same base. */
527 if (GET_CODE (op0) == HIGH)
529 rtx base0, base1, offset0, offset1;
530 split_const (XEXP (op0, 0), &base0, &offset0);
531 split_const (op1, &base1, &offset1);
532 if (rtx_equal_p (base0, base1))
533 return op1;
536 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
537 return x;
538 return gen_rtx_LO_SUM (mode, op0, op1);
540 break;
542 default:
543 break;
546 newx = x;
547 fmt = GET_RTX_FORMAT (code);
548 for (i = 0; fmt[i]; i++)
549 switch (fmt[i])
551 case 'E':
552 vec = XVEC (x, i);
553 newvec = XVEC (newx, i);
554 for (j = 0; j < GET_NUM_ELEM (vec); j++)
556 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
557 old_rtx, fn, data);
558 if (op != RTVEC_ELT (vec, j))
560 if (newvec == vec)
562 newvec = shallow_copy_rtvec (vec);
563 if (x == newx)
564 newx = shallow_copy_rtx (x);
565 XVEC (newx, i) = newvec;
567 RTVEC_ELT (newvec, j) = op;
570 break;
572 case 'e':
573 if (XEXP (x, i))
575 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
576 if (op != XEXP (x, i))
578 if (x == newx)
579 newx = shallow_copy_rtx (x);
580 XEXP (newx, i) = op;
583 break;
585 return newx;
588 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
589 resulting RTX. Return a new RTX which is as simplified as possible. */
592 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
594 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
597 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
598 Only handle cases where the truncated value is inherently an rvalue.
600 RTL provides two ways of truncating a value:
602 1. a lowpart subreg. This form is only a truncation when both
603 the outer and inner modes (here MODE and OP_MODE respectively)
604 are scalar integers, and only then when the subreg is used as
605 an rvalue.
607 It is only valid to form such truncating subregs if the
608 truncation requires no action by the target. The onus for
609 proving this is on the creator of the subreg -- e.g. the
610 caller to simplify_subreg or simplify_gen_subreg -- and typically
611 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
613 2. a TRUNCATE. This form handles both scalar and compound integers.
615 The first form is preferred where valid. However, the TRUNCATE
616 handling in simplify_unary_operation turns the second form into the
617 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
618 so it is generally safe to form rvalue truncations using:
620 simplify_gen_unary (TRUNCATE, ...)
622 and leave simplify_unary_operation to work out which representation
623 should be used.
625 Because of the proof requirements on (1), simplify_truncation must
626 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
627 regardless of whether the outer truncation came from a SUBREG or a
628 TRUNCATE. For example, if the caller has proven that an SImode
629 truncation of:
631 (and:DI X Y)
633 is a no-op and can be represented as a subreg, it does not follow
634 that SImode truncations of X and Y are also no-ops. On a target
635 like 64-bit MIPS that requires SImode values to be stored in
636 sign-extended form, an SImode truncation of:
638 (and:DI (reg:DI X) (const_int 63))
640 is trivially a no-op because only the lower 6 bits can be set.
641 However, X is still an arbitrary 64-bit number and so we cannot
642 assume that truncating it too is a no-op. */
644 static rtx
645 simplify_truncation (machine_mode mode, rtx op,
646 machine_mode op_mode)
648 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
649 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
650 gcc_assert (precision <= op_precision);
652 /* Optimize truncations of zero and sign extended values. */
653 if (GET_CODE (op) == ZERO_EXTEND
654 || GET_CODE (op) == SIGN_EXTEND)
656 /* There are three possibilities. If MODE is the same as the
657 origmode, we can omit both the extension and the subreg.
658 If MODE is not larger than the origmode, we can apply the
659 truncation without the extension. Finally, if the outermode
660 is larger than the origmode, we can just extend to the appropriate
661 mode. */
662 machine_mode origmode = GET_MODE (XEXP (op, 0));
663 if (mode == origmode)
664 return XEXP (op, 0);
665 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
666 return simplify_gen_unary (TRUNCATE, mode,
667 XEXP (op, 0), origmode);
668 else
669 return simplify_gen_unary (GET_CODE (op), mode,
670 XEXP (op, 0), origmode);
673 /* If the machine can perform operations in the truncated mode, distribute
674 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
675 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
676 if (1
677 && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
678 && (GET_CODE (op) == PLUS
679 || GET_CODE (op) == MINUS
680 || GET_CODE (op) == MULT))
682 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
683 if (op0)
685 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
686 if (op1)
687 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
691 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
692 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
693 the outer subreg is effectively a truncation to the original mode. */
694 if ((GET_CODE (op) == LSHIFTRT
695 || GET_CODE (op) == ASHIFTRT)
696 /* Ensure that OP_MODE is at least twice as wide as MODE
697 to avoid the possibility that an outer LSHIFTRT shifts by more
698 than the sign extension's sign_bit_copies and introduces zeros
699 into the high bits of the result. */
700 && 2 * precision <= op_precision
701 && CONST_INT_P (XEXP (op, 1))
702 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
703 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
704 && UINTVAL (XEXP (op, 1)) < precision)
705 return simplify_gen_binary (ASHIFTRT, mode,
706 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
708 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
709 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
710 the outer subreg is effectively a truncation to the original mode. */
711 if ((GET_CODE (op) == LSHIFTRT
712 || GET_CODE (op) == ASHIFTRT)
713 && CONST_INT_P (XEXP (op, 1))
714 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
715 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
716 && UINTVAL (XEXP (op, 1)) < precision)
717 return simplify_gen_binary (LSHIFTRT, mode,
718 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
720 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
721 to (ashift:QI (x:QI) C), where C is a suitable small constant and
722 the outer subreg is effectively a truncation to the original mode. */
723 if (GET_CODE (op) == ASHIFT
724 && CONST_INT_P (XEXP (op, 1))
725 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
726 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
727 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
728 && UINTVAL (XEXP (op, 1)) < precision)
729 return simplify_gen_binary (ASHIFT, mode,
730 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
732 /* Recognize a word extraction from a multi-word subreg. */
733 if ((GET_CODE (op) == LSHIFTRT
734 || GET_CODE (op) == ASHIFTRT)
735 && SCALAR_INT_MODE_P (mode)
736 && SCALAR_INT_MODE_P (op_mode)
737 && precision >= BITS_PER_WORD
738 && 2 * precision <= op_precision
739 && CONST_INT_P (XEXP (op, 1))
740 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
741 && UINTVAL (XEXP (op, 1)) < op_precision)
743 int byte = subreg_lowpart_offset (mode, op_mode);
744 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
745 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
746 (WORDS_BIG_ENDIAN
747 ? byte - shifted_bytes
748 : byte + shifted_bytes));
751 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
752 and try replacing the TRUNCATE and shift with it. Don't do this
753 if the MEM has a mode-dependent address. */
754 if ((GET_CODE (op) == LSHIFTRT
755 || GET_CODE (op) == ASHIFTRT)
756 && SCALAR_INT_MODE_P (op_mode)
757 && MEM_P (XEXP (op, 0))
758 && CONST_INT_P (XEXP (op, 1))
759 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
760 && INTVAL (XEXP (op, 1)) > 0
761 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
762 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
763 MEM_ADDR_SPACE (XEXP (op, 0)))
764 && ! MEM_VOLATILE_P (XEXP (op, 0))
765 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
766 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
768 int byte = subreg_lowpart_offset (mode, op_mode);
769 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
770 return adjust_address_nv (XEXP (op, 0), mode,
771 (WORDS_BIG_ENDIAN
772 ? byte - shifted_bytes
773 : byte + shifted_bytes));
776 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
777 (OP:SI foo:SI) if OP is NEG or ABS. */
778 if ((GET_CODE (op) == ABS
779 || GET_CODE (op) == NEG)
780 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
781 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
782 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
783 return simplify_gen_unary (GET_CODE (op), mode,
784 XEXP (XEXP (op, 0), 0), mode);
786 /* (truncate:A (subreg:B (truncate:C X) 0)) is
787 (truncate:A X). */
788 if (GET_CODE (op) == SUBREG
789 && SCALAR_INT_MODE_P (mode)
790 && SCALAR_INT_MODE_P (op_mode)
791 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
792 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
793 && subreg_lowpart_p (op))
795 rtx inner = XEXP (SUBREG_REG (op), 0);
796 if (GET_MODE_PRECISION (mode)
797 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
798 return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
799 else
800 /* If subreg above is paradoxical and C is narrower
801 than A, return (subreg:A (truncate:C X) 0). */
802 return simplify_gen_subreg (mode, SUBREG_REG (op),
803 GET_MODE (SUBREG_REG (op)), 0);
806 /* (truncate:A (truncate:B X)) is (truncate:A X). */
807 if (GET_CODE (op) == TRUNCATE)
808 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
809 GET_MODE (XEXP (op, 0)));
811 return NULL_RTX;
814 /* Try to simplify a unary operation CODE whose output mode is to be
815 MODE with input operand OP whose mode was originally OP_MODE.
816 Return zero if no simplification can be made. */
818 simplify_unary_operation (enum rtx_code code, machine_mode mode,
819 rtx op, machine_mode op_mode)
821 rtx trueop, tem;
823 trueop = avoid_constant_pool_reference (op);
825 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
826 if (tem)
827 return tem;
829 return simplify_unary_operation_1 (code, mode, op);
832 /* Perform some simplifications we can do even if the operands
833 aren't constant. */
834 static rtx
835 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
837 enum rtx_code reversed;
838 rtx temp;
840 switch (code)
842 case NOT:
843 /* (not (not X)) == X. */
844 if (GET_CODE (op) == NOT)
845 return XEXP (op, 0);
847 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
848 comparison is all ones. */
849 if (COMPARISON_P (op)
850 && (mode == BImode || STORE_FLAG_VALUE == -1)
851 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
852 return simplify_gen_relational (reversed, mode, VOIDmode,
853 XEXP (op, 0), XEXP (op, 1));
855 /* (not (plus X -1)) can become (neg X). */
856 if (GET_CODE (op) == PLUS
857 && XEXP (op, 1) == constm1_rtx)
858 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
860 /* Similarly, (not (neg X)) is (plus X -1). */
861 if (GET_CODE (op) == NEG)
862 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
863 CONSTM1_RTX (mode));
865 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
866 if (GET_CODE (op) == XOR
867 && CONST_INT_P (XEXP (op, 1))
868 && (temp = simplify_unary_operation (NOT, mode,
869 XEXP (op, 1), mode)) != 0)
870 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
872 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
873 if (GET_CODE (op) == PLUS
874 && CONST_INT_P (XEXP (op, 1))
875 && mode_signbit_p (mode, XEXP (op, 1))
876 && (temp = simplify_unary_operation (NOT, mode,
877 XEXP (op, 1), mode)) != 0)
878 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
881 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
882 operands other than 1, but that is not valid. We could do a
883 similar simplification for (not (lshiftrt C X)) where C is
884 just the sign bit, but this doesn't seem common enough to
885 bother with. */
886 if (GET_CODE (op) == ASHIFT
887 && XEXP (op, 0) == const1_rtx)
889 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
890 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
893 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
894 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
895 so we can perform the above simplification. */
896 if (STORE_FLAG_VALUE == -1
897 && GET_CODE (op) == ASHIFTRT
898 && CONST_INT_P (XEXP (op, 1))
899 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
900 return simplify_gen_relational (GE, mode, VOIDmode,
901 XEXP (op, 0), const0_rtx);
904 if (GET_CODE (op) == SUBREG
905 && subreg_lowpart_p (op)
906 && (GET_MODE_SIZE (GET_MODE (op))
907 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
908 && GET_CODE (SUBREG_REG (op)) == ASHIFT
909 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
911 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
912 rtx x;
914 x = gen_rtx_ROTATE (inner_mode,
915 simplify_gen_unary (NOT, inner_mode, const1_rtx,
916 inner_mode),
917 XEXP (SUBREG_REG (op), 1));
918 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
919 if (temp)
920 return temp;
923 /* Apply De Morgan's laws to reduce number of patterns for machines
924 with negating logical insns (and-not, nand, etc.). If result has
925 only one NOT, put it first, since that is how the patterns are
926 coded. */
927 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
929 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
930 machine_mode op_mode;
932 op_mode = GET_MODE (in1);
933 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
935 op_mode = GET_MODE (in2);
936 if (op_mode == VOIDmode)
937 op_mode = mode;
938 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
940 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
941 std::swap (in1, in2);
943 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
944 mode, in1, in2);
947 /* (not (bswap x)) -> (bswap (not x)). */
948 if (GET_CODE (op) == BSWAP)
950 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
951 return simplify_gen_unary (BSWAP, mode, x, mode);
953 break;
955 case NEG:
956 /* (neg (neg X)) == X. */
957 if (GET_CODE (op) == NEG)
958 return XEXP (op, 0);
960 /* (neg (plus X 1)) can become (not X). */
961 if (GET_CODE (op) == PLUS
962 && XEXP (op, 1) == const1_rtx)
963 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
965 /* Similarly, (neg (not X)) is (plus X 1). */
966 if (GET_CODE (op) == NOT)
967 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
968 CONST1_RTX (mode));
970 /* (neg (minus X Y)) can become (minus Y X). This transformation
971 isn't safe for modes with signed zeros, since if X and Y are
972 both +0, (minus Y X) is the same as (minus X Y). If the
973 rounding mode is towards +infinity (or -infinity) then the two
974 expressions will be rounded differently. */
975 if (GET_CODE (op) == MINUS
976 && !HONOR_SIGNED_ZEROS (mode)
977 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
978 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
980 if (GET_CODE (op) == PLUS
981 && !HONOR_SIGNED_ZEROS (mode)
982 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
984 /* (neg (plus A C)) is simplified to (minus -C A). */
985 if (CONST_SCALAR_INT_P (XEXP (op, 1))
986 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
988 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
989 if (temp)
990 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
993 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
994 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
995 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
998 /* (neg (mult A B)) becomes (mult A (neg B)).
999 This works even for floating-point values. */
1000 if (GET_CODE (op) == MULT
1001 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1003 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1004 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1007 /* NEG commutes with ASHIFT since it is multiplication. Only do
1008 this if we can then eliminate the NEG (e.g., if the operand
1009 is a constant). */
1010 if (GET_CODE (op) == ASHIFT)
1012 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1013 if (temp)
1014 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1017 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1018 C is equal to the width of MODE minus 1. */
1019 if (GET_CODE (op) == ASHIFTRT
1020 && CONST_INT_P (XEXP (op, 1))
1021 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1022 return simplify_gen_binary (LSHIFTRT, mode,
1023 XEXP (op, 0), XEXP (op, 1));
1025 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1026 C is equal to the width of MODE minus 1. */
1027 if (GET_CODE (op) == LSHIFTRT
1028 && CONST_INT_P (XEXP (op, 1))
1029 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1030 return simplify_gen_binary (ASHIFTRT, mode,
1031 XEXP (op, 0), XEXP (op, 1));
1033 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1034 if (GET_CODE (op) == XOR
1035 && XEXP (op, 1) == const1_rtx
1036 && nonzero_bits (XEXP (op, 0), mode) == 1)
1037 return plus_constant (mode, XEXP (op, 0), -1);
1039 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1040 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1041 if (GET_CODE (op) == LT
1042 && XEXP (op, 1) == const0_rtx
1043 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1045 machine_mode inner = GET_MODE (XEXP (op, 0));
1046 int isize = GET_MODE_PRECISION (inner);
1047 if (STORE_FLAG_VALUE == 1)
1049 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1050 GEN_INT (isize - 1));
1051 if (mode == inner)
1052 return temp;
1053 if (GET_MODE_PRECISION (mode) > isize)
1054 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1055 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1057 else if (STORE_FLAG_VALUE == -1)
1059 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1060 GEN_INT (isize - 1));
1061 if (mode == inner)
1062 return temp;
1063 if (GET_MODE_PRECISION (mode) > isize)
1064 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1065 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1068 break;
1070 case TRUNCATE:
1071 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1072 with the umulXi3_highpart patterns. */
1073 if (GET_CODE (op) == LSHIFTRT
1074 && GET_CODE (XEXP (op, 0)) == MULT)
1075 break;
1077 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1079 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1081 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1082 if (temp)
1083 return temp;
1085 /* We can't handle truncation to a partial integer mode here
1086 because we don't know the real bitsize of the partial
1087 integer mode. */
1088 break;
1091 if (GET_MODE (op) != VOIDmode)
1093 temp = simplify_truncation (mode, op, GET_MODE (op));
1094 if (temp)
1095 return temp;
1098 /* If we know that the value is already truncated, we can
1099 replace the TRUNCATE with a SUBREG. */
1100 if (GET_MODE_NUNITS (mode) == 1
1101 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1102 || truncated_to_mode (mode, op)))
1104 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1105 if (temp)
1106 return temp;
1109 /* A truncate of a comparison can be replaced with a subreg if
1110 STORE_FLAG_VALUE permits. This is like the previous test,
1111 but it works even if the comparison is done in a mode larger
1112 than HOST_BITS_PER_WIDE_INT. */
1113 if (HWI_COMPUTABLE_MODE_P (mode)
1114 && COMPARISON_P (op)
1115 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1117 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1118 if (temp)
1119 return temp;
1122 /* A truncate of a memory is just loading the low part of the memory
1123 if we are not changing the meaning of the address. */
1124 if (GET_CODE (op) == MEM
1125 && !VECTOR_MODE_P (mode)
1126 && !MEM_VOLATILE_P (op)
1127 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1129 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1130 if (temp)
1131 return temp;
1134 break;
1136 case FLOAT_TRUNCATE:
1137 if (DECIMAL_FLOAT_MODE_P (mode))
1138 break;
1140 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1141 if (GET_CODE (op) == FLOAT_EXTEND
1142 && GET_MODE (XEXP (op, 0)) == mode)
1143 return XEXP (op, 0);
1145 /* (float_truncate:SF (float_truncate:DF foo:XF))
1146 = (float_truncate:SF foo:XF).
1147 This may eliminate double rounding, so it is unsafe.
1149 (float_truncate:SF (float_extend:XF foo:DF))
1150 = (float_truncate:SF foo:DF).
1152 (float_truncate:DF (float_extend:XF foo:SF))
1153 = (float_extend:DF foo:SF). */
1154 if ((GET_CODE (op) == FLOAT_TRUNCATE
1155 && flag_unsafe_math_optimizations)
1156 || GET_CODE (op) == FLOAT_EXTEND)
1157 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1158 0)))
1159 > GET_MODE_SIZE (mode)
1160 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1161 mode,
1162 XEXP (op, 0), mode);
1164 /* (float_truncate (float x)) is (float x) */
1165 if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1166 && (flag_unsafe_math_optimizations
1167 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
1168 && ((unsigned)significand_size (GET_MODE (op))
1169 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1170 - num_sign_bit_copies (XEXP (op, 0),
1171 GET_MODE (XEXP (op, 0))))))))
1172 return simplify_gen_unary (GET_CODE (op), mode,
1173 XEXP (op, 0),
1174 GET_MODE (XEXP (op, 0)));
1176 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1177 (OP:SF foo:SF) if OP is NEG or ABS. */
1178 if ((GET_CODE (op) == ABS
1179 || GET_CODE (op) == NEG)
1180 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1181 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1182 return simplify_gen_unary (GET_CODE (op), mode,
1183 XEXP (XEXP (op, 0), 0), mode);
1185 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1186 is (float_truncate:SF x). */
1187 if (GET_CODE (op) == SUBREG
1188 && subreg_lowpart_p (op)
1189 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1190 return SUBREG_REG (op);
1191 break;
1193 case FLOAT_EXTEND:
1194 if (DECIMAL_FLOAT_MODE_P (mode))
1195 break;
1197 /* (float_extend (float_extend x)) is (float_extend x)
1199 (float_extend (float x)) is (float x) assuming that double
1200 rounding can't happen.
1202 if (GET_CODE (op) == FLOAT_EXTEND
1203 || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1204 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1205 && ((unsigned)significand_size (GET_MODE (op))
1206 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1207 - num_sign_bit_copies (XEXP (op, 0),
1208 GET_MODE (XEXP (op, 0)))))))
1209 return simplify_gen_unary (GET_CODE (op), mode,
1210 XEXP (op, 0),
1211 GET_MODE (XEXP (op, 0)));
1213 break;
1215 case ABS:
1216 /* (abs (neg <foo>)) -> (abs <foo>) */
1217 if (GET_CODE (op) == NEG)
1218 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1219 GET_MODE (XEXP (op, 0)));
1221 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1222 do nothing. */
1223 if (GET_MODE (op) == VOIDmode)
1224 break;
1226 /* If operand is something known to be positive, ignore the ABS. */
1227 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1228 || val_signbit_known_clear_p (GET_MODE (op),
1229 nonzero_bits (op, GET_MODE (op))))
1230 return op;
1232 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1233 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1234 return gen_rtx_NEG (mode, op);
1236 break;
1238 case FFS:
1239 /* (ffs (*_extend <X>)) = (ffs <X>) */
1240 if (GET_CODE (op) == SIGN_EXTEND
1241 || GET_CODE (op) == ZERO_EXTEND)
1242 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1243 GET_MODE (XEXP (op, 0)));
1244 break;
1246 case POPCOUNT:
1247 switch (GET_CODE (op))
1249 case BSWAP:
1250 case ZERO_EXTEND:
1251 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1252 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1253 GET_MODE (XEXP (op, 0)));
1255 case ROTATE:
1256 case ROTATERT:
1257 /* Rotations don't affect popcount. */
1258 if (!side_effects_p (XEXP (op, 1)))
1259 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1260 GET_MODE (XEXP (op, 0)));
1261 break;
1263 default:
1264 break;
1266 break;
1268 case PARITY:
1269 switch (GET_CODE (op))
1271 case NOT:
1272 case BSWAP:
1273 case ZERO_EXTEND:
1274 case SIGN_EXTEND:
1275 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1276 GET_MODE (XEXP (op, 0)));
1278 case ROTATE:
1279 case ROTATERT:
1280 /* Rotations don't affect parity. */
1281 if (!side_effects_p (XEXP (op, 1)))
1282 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1283 GET_MODE (XEXP (op, 0)));
1284 break;
1286 default:
1287 break;
1289 break;
1291 case BSWAP:
1292 /* (bswap (bswap x)) -> x. */
1293 if (GET_CODE (op) == BSWAP)
1294 return XEXP (op, 0);
1295 break;
1297 case FLOAT:
1298 /* (float (sign_extend <X>)) = (float <X>). */
1299 if (GET_CODE (op) == SIGN_EXTEND)
1300 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1301 GET_MODE (XEXP (op, 0)));
1302 break;
1304 case SIGN_EXTEND:
1305 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1306 becomes just the MINUS if its mode is MODE. This allows
1307 folding switch statements on machines using casesi (such as
1308 the VAX). */
1309 if (GET_CODE (op) == TRUNCATE
1310 && GET_MODE (XEXP (op, 0)) == mode
1311 && GET_CODE (XEXP (op, 0)) == MINUS
1312 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1313 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1314 return XEXP (op, 0);
1316 /* Extending a widening multiplication should be canonicalized to
1317 a wider widening multiplication. */
1318 if (GET_CODE (op) == MULT)
1320 rtx lhs = XEXP (op, 0);
1321 rtx rhs = XEXP (op, 1);
1322 enum rtx_code lcode = GET_CODE (lhs);
1323 enum rtx_code rcode = GET_CODE (rhs);
1325 /* Widening multiplies usually extend both operands, but sometimes
1326 they use a shift to extract a portion of a register. */
1327 if ((lcode == SIGN_EXTEND
1328 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1329 && (rcode == SIGN_EXTEND
1330 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1332 machine_mode lmode = GET_MODE (lhs);
1333 machine_mode rmode = GET_MODE (rhs);
1334 int bits;
1336 if (lcode == ASHIFTRT)
1337 /* Number of bits not shifted off the end. */
1338 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1339 else /* lcode == SIGN_EXTEND */
1340 /* Size of inner mode. */
1341 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1343 if (rcode == ASHIFTRT)
1344 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1345 else /* rcode == SIGN_EXTEND */
1346 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1348 /* We can only widen multiplies if the result is mathematiclly
1349 equivalent. I.e. if overflow was impossible. */
1350 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1351 return simplify_gen_binary
1352 (MULT, mode,
1353 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1354 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1358 /* Check for a sign extension of a subreg of a promoted
1359 variable, where the promotion is sign-extended, and the
1360 target mode is the same as the variable's promotion. */
1361 if (GET_CODE (op) == SUBREG
1362 && SUBREG_PROMOTED_VAR_P (op)
1363 && SUBREG_PROMOTED_SIGNED_P (op)
1364 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1366 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1367 if (temp)
1368 return temp;
1371 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1372 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1373 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1375 gcc_assert (GET_MODE_PRECISION (mode)
1376 > GET_MODE_PRECISION (GET_MODE (op)));
1377 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1378 GET_MODE (XEXP (op, 0)));
1381 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1382 is (sign_extend:M (subreg:O <X>)) if there is mode with
1383 GET_MODE_BITSIZE (N) - I bits.
1384 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1385 is similarly (zero_extend:M (subreg:O <X>)). */
1386 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1387 && GET_CODE (XEXP (op, 0)) == ASHIFT
1388 && CONST_INT_P (XEXP (op, 1))
1389 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1390 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1392 machine_mode tmode
1393 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1394 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1395 gcc_assert (GET_MODE_BITSIZE (mode)
1396 > GET_MODE_BITSIZE (GET_MODE (op)));
1397 if (tmode != BLKmode)
1399 rtx inner =
1400 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1401 if (inner)
1402 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1403 ? SIGN_EXTEND : ZERO_EXTEND,
1404 mode, inner, tmode);
1408 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1409 /* As we do not know which address space the pointer is referring to,
1410 we can do this only if the target does not support different pointer
1411 or address modes depending on the address space. */
1412 if (target_default_pointer_address_modes_p ()
1413 && ! POINTERS_EXTEND_UNSIGNED
1414 && mode == Pmode && GET_MODE (op) == ptr_mode
1415 && (CONSTANT_P (op)
1416 || (GET_CODE (op) == SUBREG
1417 && REG_P (SUBREG_REG (op))
1418 && REG_POINTER (SUBREG_REG (op))
1419 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1420 return convert_memory_address (Pmode, op);
1421 #endif
1422 break;
1424 case ZERO_EXTEND:
1425 /* Check for a zero extension of a subreg of a promoted
1426 variable, where the promotion is zero-extended, and the
1427 target mode is the same as the variable's promotion. */
1428 if (GET_CODE (op) == SUBREG
1429 && SUBREG_PROMOTED_VAR_P (op)
1430 && SUBREG_PROMOTED_UNSIGNED_P (op)
1431 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1433 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1434 if (temp)
1435 return temp;
1438 /* Extending a widening multiplication should be canonicalized to
1439 a wider widening multiplication. */
1440 if (GET_CODE (op) == MULT)
1442 rtx lhs = XEXP (op, 0);
1443 rtx rhs = XEXP (op, 1);
1444 enum rtx_code lcode = GET_CODE (lhs);
1445 enum rtx_code rcode = GET_CODE (rhs);
1447 /* Widening multiplies usually extend both operands, but sometimes
1448 they use a shift to extract a portion of a register. */
1449 if ((lcode == ZERO_EXTEND
1450 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1451 && (rcode == ZERO_EXTEND
1452 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1454 machine_mode lmode = GET_MODE (lhs);
1455 machine_mode rmode = GET_MODE (rhs);
1456 int bits;
1458 if (lcode == LSHIFTRT)
1459 /* Number of bits not shifted off the end. */
1460 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1461 else /* lcode == ZERO_EXTEND */
1462 /* Size of inner mode. */
1463 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1465 if (rcode == LSHIFTRT)
1466 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1467 else /* rcode == ZERO_EXTEND */
1468 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1470 /* We can only widen multiplies if the result is mathematiclly
1471 equivalent. I.e. if overflow was impossible. */
1472 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1473 return simplify_gen_binary
1474 (MULT, mode,
1475 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1476 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1480 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1481 if (GET_CODE (op) == ZERO_EXTEND)
1482 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1483 GET_MODE (XEXP (op, 0)));
1485 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1486 is (zero_extend:M (subreg:O <X>)) if there is mode with
1487 GET_MODE_PRECISION (N) - I bits. */
1488 if (GET_CODE (op) == LSHIFTRT
1489 && GET_CODE (XEXP (op, 0)) == ASHIFT
1490 && CONST_INT_P (XEXP (op, 1))
1491 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1492 && GET_MODE_PRECISION (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1494 machine_mode tmode
1495 = mode_for_size (GET_MODE_PRECISION (GET_MODE (op))
1496 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1497 if (tmode != BLKmode)
1499 rtx inner =
1500 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1501 if (inner)
1502 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1506 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1507 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1508 of mode N. E.g.
1509 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1510 (and:SI (reg:SI) (const_int 63)). */
1511 if (GET_CODE (op) == SUBREG
1512 && GET_MODE_PRECISION (GET_MODE (op))
1513 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1514 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1515 <= HOST_BITS_PER_WIDE_INT
1516 && GET_MODE_PRECISION (mode)
1517 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1518 && subreg_lowpart_p (op)
1519 && (nonzero_bits (SUBREG_REG (op), GET_MODE (SUBREG_REG (op)))
1520 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1522 if (GET_MODE_PRECISION (mode)
1523 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
1524 return SUBREG_REG (op);
1525 return simplify_gen_unary (ZERO_EXTEND, mode, SUBREG_REG (op),
1526 GET_MODE (SUBREG_REG (op)));
1529 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1530 /* As we do not know which address space the pointer is referring to,
1531 we can do this only if the target does not support different pointer
1532 or address modes depending on the address space. */
1533 if (target_default_pointer_address_modes_p ()
1534 && POINTERS_EXTEND_UNSIGNED > 0
1535 && mode == Pmode && GET_MODE (op) == ptr_mode
1536 && (CONSTANT_P (op)
1537 || (GET_CODE (op) == SUBREG
1538 && REG_P (SUBREG_REG (op))
1539 && REG_POINTER (SUBREG_REG (op))
1540 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1541 return convert_memory_address (Pmode, op);
1542 #endif
1543 break;
1545 default:
1546 break;
1549 return 0;
1552 /* Try to compute the value of a unary operation CODE whose output mode is to
1553 be MODE with input operand OP whose mode was originally OP_MODE.
1554 Return zero if the value cannot be computed. */
1556 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1557 rtx op, machine_mode op_mode)
1559 unsigned int width = GET_MODE_PRECISION (mode);
1561 if (code == VEC_DUPLICATE)
1563 gcc_assert (VECTOR_MODE_P (mode));
1564 if (GET_MODE (op) != VOIDmode)
1566 if (!VECTOR_MODE_P (GET_MODE (op)))
1567 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1568 else
1569 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1570 (GET_MODE (op)));
1572 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1573 || GET_CODE (op) == CONST_VECTOR)
1575 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1576 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1577 rtvec v = rtvec_alloc (n_elts);
1578 unsigned int i;
1580 if (GET_CODE (op) != CONST_VECTOR)
1581 for (i = 0; i < n_elts; i++)
1582 RTVEC_ELT (v, i) = op;
1583 else
1585 machine_mode inmode = GET_MODE (op);
1586 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1587 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1589 gcc_assert (in_n_elts < n_elts);
1590 gcc_assert ((n_elts % in_n_elts) == 0);
1591 for (i = 0; i < n_elts; i++)
1592 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1594 return gen_rtx_CONST_VECTOR (mode, v);
1598 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1600 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1601 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1602 machine_mode opmode = GET_MODE (op);
1603 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1604 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1605 rtvec v = rtvec_alloc (n_elts);
1606 unsigned int i;
1608 gcc_assert (op_n_elts == n_elts);
1609 for (i = 0; i < n_elts; i++)
1611 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1612 CONST_VECTOR_ELT (op, i),
1613 GET_MODE_INNER (opmode));
1614 if (!x)
1615 return 0;
1616 RTVEC_ELT (v, i) = x;
1618 return gen_rtx_CONST_VECTOR (mode, v);
1621 /* The order of these tests is critical so that, for example, we don't
1622 check the wrong mode (input vs. output) for a conversion operation,
1623 such as FIX. At some point, this should be simplified. */
1625 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1627 REAL_VALUE_TYPE d;
1629 if (op_mode == VOIDmode)
1631 /* CONST_INT have VOIDmode as the mode. We assume that all
1632 the bits of the constant are significant, though, this is
1633 a dangerous assumption as many times CONST_INTs are
1634 created and used with garbage in the bits outside of the
1635 precision of the implied mode of the const_int. */
1636 op_mode = MAX_MODE_INT;
1639 real_from_integer (&d, mode, std::make_pair (op, op_mode), SIGNED);
1640 d = real_value_truncate (mode, d);
1641 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1643 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1645 REAL_VALUE_TYPE d;
1647 if (op_mode == VOIDmode)
1649 /* CONST_INT have VOIDmode as the mode. We assume that all
1650 the bits of the constant are significant, though, this is
1651 a dangerous assumption as many times CONST_INTs are
1652 created and used with garbage in the bits outside of the
1653 precision of the implied mode of the const_int. */
1654 op_mode = MAX_MODE_INT;
1657 real_from_integer (&d, mode, std::make_pair (op, op_mode), UNSIGNED);
1658 d = real_value_truncate (mode, d);
1659 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1662 if (CONST_SCALAR_INT_P (op) && width > 0)
1664 wide_int result;
1665 machine_mode imode = op_mode == VOIDmode ? mode : op_mode;
1666 rtx_mode_t op0 = std::make_pair (op, imode);
1667 int int_value;
1669 #if TARGET_SUPPORTS_WIDE_INT == 0
1670 /* This assert keeps the simplification from producing a result
1671 that cannot be represented in a CONST_DOUBLE but a lot of
1672 upstream callers expect that this function never fails to
1673 simplify something and so you if you added this to the test
1674 above the code would die later anyway. If this assert
1675 happens, you just need to make the port support wide int. */
1676 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1677 #endif
1679 switch (code)
1681 case NOT:
1682 result = wi::bit_not (op0);
1683 break;
1685 case NEG:
1686 result = wi::neg (op0);
1687 break;
1689 case ABS:
1690 result = wi::abs (op0);
1691 break;
1693 case FFS:
1694 result = wi::shwi (wi::ffs (op0), mode);
1695 break;
1697 case CLZ:
1698 if (wi::ne_p (op0, 0))
1699 int_value = wi::clz (op0);
1700 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1701 int_value = GET_MODE_PRECISION (mode);
1702 result = wi::shwi (int_value, mode);
1703 break;
1705 case CLRSB:
1706 result = wi::shwi (wi::clrsb (op0), mode);
1707 break;
1709 case CTZ:
1710 if (wi::ne_p (op0, 0))
1711 int_value = wi::ctz (op0);
1712 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1713 int_value = GET_MODE_PRECISION (mode);
1714 result = wi::shwi (int_value, mode);
1715 break;
1717 case POPCOUNT:
1718 result = wi::shwi (wi::popcount (op0), mode);
1719 break;
1721 case PARITY:
1722 result = wi::shwi (wi::parity (op0), mode);
1723 break;
1725 case BSWAP:
1726 result = wide_int (op0).bswap ();
1727 break;
1729 case TRUNCATE:
1730 case ZERO_EXTEND:
1731 result = wide_int::from (op0, width, UNSIGNED);
1732 break;
1734 case SIGN_EXTEND:
1735 result = wide_int::from (op0, width, SIGNED);
1736 break;
1738 case SQRT:
1739 default:
1740 return 0;
1743 return immed_wide_int_const (result, mode);
1746 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1747 && SCALAR_FLOAT_MODE_P (mode)
1748 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1750 REAL_VALUE_TYPE d;
1751 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1753 switch (code)
1755 case SQRT:
1756 return 0;
1757 case ABS:
1758 d = real_value_abs (&d);
1759 break;
1760 case NEG:
1761 d = real_value_negate (&d);
1762 break;
1763 case FLOAT_TRUNCATE:
1764 d = real_value_truncate (mode, d);
1765 break;
1766 case FLOAT_EXTEND:
1767 /* All this does is change the mode, unless changing
1768 mode class. */
1769 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1770 real_convert (&d, mode, &d);
1771 break;
1772 case FIX:
1773 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1774 break;
1775 case NOT:
1777 long tmp[4];
1778 int i;
1780 real_to_target (tmp, &d, GET_MODE (op));
1781 for (i = 0; i < 4; i++)
1782 tmp[i] = ~tmp[i];
1783 real_from_target (&d, tmp, mode);
1784 break;
1786 default:
1787 gcc_unreachable ();
1789 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1791 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1792 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1793 && GET_MODE_CLASS (mode) == MODE_INT
1794 && width > 0)
1796 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1797 operators are intentionally left unspecified (to ease implementation
1798 by target backends), for consistency, this routine implements the
1799 same semantics for constant folding as used by the middle-end. */
1801 /* This was formerly used only for non-IEEE float.
1802 eggert@twinsun.com says it is safe for IEEE also. */
1803 REAL_VALUE_TYPE x, t;
1804 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1805 wide_int wmax, wmin;
1806 /* This is part of the abi to real_to_integer, but we check
1807 things before making this call. */
1808 bool fail;
1810 switch (code)
1812 case FIX:
1813 if (REAL_VALUE_ISNAN (x))
1814 return const0_rtx;
1816 /* Test against the signed upper bound. */
1817 wmax = wi::max_value (width, SIGNED);
1818 real_from_integer (&t, VOIDmode, wmax, SIGNED);
1819 if (REAL_VALUES_LESS (t, x))
1820 return immed_wide_int_const (wmax, mode);
1822 /* Test against the signed lower bound. */
1823 wmin = wi::min_value (width, SIGNED);
1824 real_from_integer (&t, VOIDmode, wmin, SIGNED);
1825 if (REAL_VALUES_LESS (x, t))
1826 return immed_wide_int_const (wmin, mode);
1828 return immed_wide_int_const (real_to_integer (&x, &fail, width), mode);
1829 break;
1831 case UNSIGNED_FIX:
1832 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1833 return const0_rtx;
1835 /* Test against the unsigned upper bound. */
1836 wmax = wi::max_value (width, UNSIGNED);
1837 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
1838 if (REAL_VALUES_LESS (t, x))
1839 return immed_wide_int_const (wmax, mode);
1841 return immed_wide_int_const (real_to_integer (&x, &fail, width),
1842 mode);
1843 break;
1845 default:
1846 gcc_unreachable ();
1850 return NULL_RTX;
1853 /* Subroutine of simplify_binary_operation to simplify a binary operation
1854 CODE that can commute with byte swapping, with result mode MODE and
1855 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
1856 Return zero if no simplification or canonicalization is possible. */
1858 static rtx
1859 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
1860 rtx op0, rtx op1)
1862 rtx tem;
1864 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
1865 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
1867 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
1868 simplify_gen_unary (BSWAP, mode, op1, mode));
1869 return simplify_gen_unary (BSWAP, mode, tem, mode);
1872 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
1873 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
1875 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
1876 return simplify_gen_unary (BSWAP, mode, tem, mode);
1879 return NULL_RTX;
1882 /* Subroutine of simplify_binary_operation to simplify a commutative,
1883 associative binary operation CODE with result mode MODE, operating
1884 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1885 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1886 canonicalization is possible. */
1888 static rtx
1889 simplify_associative_operation (enum rtx_code code, machine_mode mode,
1890 rtx op0, rtx op1)
1892 rtx tem;
1894 /* Linearize the operator to the left. */
1895 if (GET_CODE (op1) == code)
1897 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1898 if (GET_CODE (op0) == code)
1900 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1901 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1904 /* "a op (b op c)" becomes "(b op c) op a". */
1905 if (! swap_commutative_operands_p (op1, op0))
1906 return simplify_gen_binary (code, mode, op1, op0);
1908 std::swap (op0, op1);
1911 if (GET_CODE (op0) == code)
1913 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1914 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1916 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1917 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1920 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1921 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1922 if (tem != 0)
1923 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1925 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1926 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1927 if (tem != 0)
1928 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1931 return 0;
1935 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1936 and OP1. Return 0 if no simplification is possible.
1938 Don't use this for relational operations such as EQ or LT.
1939 Use simplify_relational_operation instead. */
1941 simplify_binary_operation (enum rtx_code code, machine_mode mode,
1942 rtx op0, rtx op1)
1944 rtx trueop0, trueop1;
1945 rtx tem;
1947 /* Relational operations don't work here. We must know the mode
1948 of the operands in order to do the comparison correctly.
1949 Assuming a full word can give incorrect results.
1950 Consider comparing 128 with -128 in QImode. */
1951 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1952 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1954 /* Make sure the constant is second. */
1955 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1956 && swap_commutative_operands_p (op0, op1))
1957 std::swap (op0, op1);
1959 trueop0 = avoid_constant_pool_reference (op0);
1960 trueop1 = avoid_constant_pool_reference (op1);
1962 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1963 if (tem)
1964 return tem;
1965 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1968 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1969 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1970 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1971 actual constants. */
1973 static rtx
1974 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
1975 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1977 rtx tem, reversed, opleft, opright;
1978 HOST_WIDE_INT val;
1979 unsigned int width = GET_MODE_PRECISION (mode);
1981 /* Even if we can't compute a constant result,
1982 there are some cases worth simplifying. */
1984 switch (code)
1986 case PLUS:
1987 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1988 when x is NaN, infinite, or finite and nonzero. They aren't
1989 when x is -0 and the rounding mode is not towards -infinity,
1990 since (-0) + 0 is then 0. */
1991 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1992 return op0;
1994 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1995 transformations are safe even for IEEE. */
1996 if (GET_CODE (op0) == NEG)
1997 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1998 else if (GET_CODE (op1) == NEG)
1999 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2001 /* (~a) + 1 -> -a */
2002 if (INTEGRAL_MODE_P (mode)
2003 && GET_CODE (op0) == NOT
2004 && trueop1 == const1_rtx)
2005 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2007 /* Handle both-operands-constant cases. We can only add
2008 CONST_INTs to constants since the sum of relocatable symbols
2009 can't be handled by most assemblers. Don't add CONST_INT
2010 to CONST_INT since overflow won't be computed properly if wider
2011 than HOST_BITS_PER_WIDE_INT. */
2013 if ((GET_CODE (op0) == CONST
2014 || GET_CODE (op0) == SYMBOL_REF
2015 || GET_CODE (op0) == LABEL_REF)
2016 && CONST_INT_P (op1))
2017 return plus_constant (mode, op0, INTVAL (op1));
2018 else if ((GET_CODE (op1) == CONST
2019 || GET_CODE (op1) == SYMBOL_REF
2020 || GET_CODE (op1) == LABEL_REF)
2021 && CONST_INT_P (op0))
2022 return plus_constant (mode, op1, INTVAL (op0));
2024 /* See if this is something like X * C - X or vice versa or
2025 if the multiplication is written as a shift. If so, we can
2026 distribute and make a new multiply, shift, or maybe just
2027 have X (if C is 2 in the example above). But don't make
2028 something more expensive than we had before. */
2030 if (SCALAR_INT_MODE_P (mode))
2032 rtx lhs = op0, rhs = op1;
2034 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2035 wide_int coeff1 = wi::one (GET_MODE_PRECISION (mode));
2037 if (GET_CODE (lhs) == NEG)
2039 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2040 lhs = XEXP (lhs, 0);
2042 else if (GET_CODE (lhs) == MULT
2043 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2045 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2046 lhs = XEXP (lhs, 0);
2048 else if (GET_CODE (lhs) == ASHIFT
2049 && CONST_INT_P (XEXP (lhs, 1))
2050 && INTVAL (XEXP (lhs, 1)) >= 0
2051 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2053 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2054 GET_MODE_PRECISION (mode));
2055 lhs = XEXP (lhs, 0);
2058 if (GET_CODE (rhs) == NEG)
2060 coeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2061 rhs = XEXP (rhs, 0);
2063 else if (GET_CODE (rhs) == MULT
2064 && CONST_INT_P (XEXP (rhs, 1)))
2066 coeff1 = std::make_pair (XEXP (rhs, 1), mode);
2067 rhs = XEXP (rhs, 0);
2069 else if (GET_CODE (rhs) == ASHIFT
2070 && CONST_INT_P (XEXP (rhs, 1))
2071 && INTVAL (XEXP (rhs, 1)) >= 0
2072 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2074 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2075 GET_MODE_PRECISION (mode));
2076 rhs = XEXP (rhs, 0);
2079 if (rtx_equal_p (lhs, rhs))
2081 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2082 rtx coeff;
2083 bool speed = optimize_function_for_speed_p (cfun);
2085 coeff = immed_wide_int_const (coeff0 + coeff1, mode);
2087 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2088 return (set_src_cost (tem, mode, speed)
2089 <= set_src_cost (orig, mode, speed) ? tem : 0);
2093 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2094 if (CONST_SCALAR_INT_P (op1)
2095 && GET_CODE (op0) == XOR
2096 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2097 && mode_signbit_p (mode, op1))
2098 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2099 simplify_gen_binary (XOR, mode, op1,
2100 XEXP (op0, 1)));
2102 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2103 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2104 && GET_CODE (op0) == MULT
2105 && GET_CODE (XEXP (op0, 0)) == NEG)
2107 rtx in1, in2;
2109 in1 = XEXP (XEXP (op0, 0), 0);
2110 in2 = XEXP (op0, 1);
2111 return simplify_gen_binary (MINUS, mode, op1,
2112 simplify_gen_binary (MULT, mode,
2113 in1, in2));
2116 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2117 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2118 is 1. */
2119 if (COMPARISON_P (op0)
2120 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2121 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2122 && (reversed = reversed_comparison (op0, mode)))
2123 return
2124 simplify_gen_unary (NEG, mode, reversed, mode);
2126 /* If one of the operands is a PLUS or a MINUS, see if we can
2127 simplify this by the associative law.
2128 Don't use the associative law for floating point.
2129 The inaccuracy makes it nonassociative,
2130 and subtle programs can break if operations are associated. */
2132 if (INTEGRAL_MODE_P (mode)
2133 && (plus_minus_operand_p (op0)
2134 || plus_minus_operand_p (op1))
2135 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2136 return tem;
2138 /* Reassociate floating point addition only when the user
2139 specifies associative math operations. */
2140 if (FLOAT_MODE_P (mode)
2141 && flag_associative_math)
2143 tem = simplify_associative_operation (code, mode, op0, op1);
2144 if (tem)
2145 return tem;
2147 break;
2149 case COMPARE:
2150 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2151 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2152 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2153 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2155 rtx xop00 = XEXP (op0, 0);
2156 rtx xop10 = XEXP (op1, 0);
2158 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2159 return xop00;
2161 if (REG_P (xop00) && REG_P (xop10)
2162 && GET_MODE (xop00) == GET_MODE (xop10)
2163 && REGNO (xop00) == REGNO (xop10)
2164 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2165 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2166 return xop00;
2168 break;
2170 case MINUS:
2171 /* We can't assume x-x is 0 even with non-IEEE floating point,
2172 but since it is zero except in very strange circumstances, we
2173 will treat it as zero with -ffinite-math-only. */
2174 if (rtx_equal_p (trueop0, trueop1)
2175 && ! side_effects_p (op0)
2176 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2177 return CONST0_RTX (mode);
2179 /* Change subtraction from zero into negation. (0 - x) is the
2180 same as -x when x is NaN, infinite, or finite and nonzero.
2181 But if the mode has signed zeros, and does not round towards
2182 -infinity, then 0 - 0 is 0, not -0. */
2183 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2184 return simplify_gen_unary (NEG, mode, op1, mode);
2186 /* (-1 - a) is ~a. */
2187 if (trueop0 == constm1_rtx)
2188 return simplify_gen_unary (NOT, mode, op1, mode);
2190 /* Subtracting 0 has no effect unless the mode has signed zeros
2191 and supports rounding towards -infinity. In such a case,
2192 0 - 0 is -0. */
2193 if (!(HONOR_SIGNED_ZEROS (mode)
2194 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2195 && trueop1 == CONST0_RTX (mode))
2196 return op0;
2198 /* See if this is something like X * C - X or vice versa or
2199 if the multiplication is written as a shift. If so, we can
2200 distribute and make a new multiply, shift, or maybe just
2201 have X (if C is 2 in the example above). But don't make
2202 something more expensive than we had before. */
2204 if (SCALAR_INT_MODE_P (mode))
2206 rtx lhs = op0, rhs = op1;
2208 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2209 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2211 if (GET_CODE (lhs) == NEG)
2213 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2214 lhs = XEXP (lhs, 0);
2216 else if (GET_CODE (lhs) == MULT
2217 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2219 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2220 lhs = XEXP (lhs, 0);
2222 else if (GET_CODE (lhs) == ASHIFT
2223 && CONST_INT_P (XEXP (lhs, 1))
2224 && INTVAL (XEXP (lhs, 1)) >= 0
2225 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2227 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2228 GET_MODE_PRECISION (mode));
2229 lhs = XEXP (lhs, 0);
2232 if (GET_CODE (rhs) == NEG)
2234 negcoeff1 = wi::one (GET_MODE_PRECISION (mode));
2235 rhs = XEXP (rhs, 0);
2237 else if (GET_CODE (rhs) == MULT
2238 && CONST_INT_P (XEXP (rhs, 1)))
2240 negcoeff1 = wi::neg (std::make_pair (XEXP (rhs, 1), mode));
2241 rhs = XEXP (rhs, 0);
2243 else if (GET_CODE (rhs) == ASHIFT
2244 && CONST_INT_P (XEXP (rhs, 1))
2245 && INTVAL (XEXP (rhs, 1)) >= 0
2246 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2248 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2249 GET_MODE_PRECISION (mode));
2250 negcoeff1 = -negcoeff1;
2251 rhs = XEXP (rhs, 0);
2254 if (rtx_equal_p (lhs, rhs))
2256 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2257 rtx coeff;
2258 bool speed = optimize_function_for_speed_p (cfun);
2260 coeff = immed_wide_int_const (coeff0 + negcoeff1, mode);
2262 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2263 return (set_src_cost (tem, mode, speed)
2264 <= set_src_cost (orig, mode, speed) ? tem : 0);
2268 /* (a - (-b)) -> (a + b). True even for IEEE. */
2269 if (GET_CODE (op1) == NEG)
2270 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2272 /* (-x - c) may be simplified as (-c - x). */
2273 if (GET_CODE (op0) == NEG
2274 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2276 tem = simplify_unary_operation (NEG, mode, op1, mode);
2277 if (tem)
2278 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2281 /* Don't let a relocatable value get a negative coeff. */
2282 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2283 return simplify_gen_binary (PLUS, mode,
2284 op0,
2285 neg_const_int (mode, op1));
2287 /* (x - (x & y)) -> (x & ~y) */
2288 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2290 if (rtx_equal_p (op0, XEXP (op1, 0)))
2292 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2293 GET_MODE (XEXP (op1, 1)));
2294 return simplify_gen_binary (AND, mode, op0, tem);
2296 if (rtx_equal_p (op0, XEXP (op1, 1)))
2298 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2299 GET_MODE (XEXP (op1, 0)));
2300 return simplify_gen_binary (AND, mode, op0, tem);
2304 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2305 by reversing the comparison code if valid. */
2306 if (STORE_FLAG_VALUE == 1
2307 && trueop0 == const1_rtx
2308 && COMPARISON_P (op1)
2309 && (reversed = reversed_comparison (op1, mode)))
2310 return reversed;
2312 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2313 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2314 && GET_CODE (op1) == MULT
2315 && GET_CODE (XEXP (op1, 0)) == NEG)
2317 rtx in1, in2;
2319 in1 = XEXP (XEXP (op1, 0), 0);
2320 in2 = XEXP (op1, 1);
2321 return simplify_gen_binary (PLUS, mode,
2322 simplify_gen_binary (MULT, mode,
2323 in1, in2),
2324 op0);
2327 /* Canonicalize (minus (neg A) (mult B C)) to
2328 (minus (mult (neg B) C) A). */
2329 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2330 && GET_CODE (op1) == MULT
2331 && GET_CODE (op0) == NEG)
2333 rtx in1, in2;
2335 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2336 in2 = XEXP (op1, 1);
2337 return simplify_gen_binary (MINUS, mode,
2338 simplify_gen_binary (MULT, mode,
2339 in1, in2),
2340 XEXP (op0, 0));
2343 /* If one of the operands is a PLUS or a MINUS, see if we can
2344 simplify this by the associative law. This will, for example,
2345 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2346 Don't use the associative law for floating point.
2347 The inaccuracy makes it nonassociative,
2348 and subtle programs can break if operations are associated. */
2350 if (INTEGRAL_MODE_P (mode)
2351 && (plus_minus_operand_p (op0)
2352 || plus_minus_operand_p (op1))
2353 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2354 return tem;
2355 break;
2357 case MULT:
2358 if (trueop1 == constm1_rtx)
2359 return simplify_gen_unary (NEG, mode, op0, mode);
2361 if (GET_CODE (op0) == NEG)
2363 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2364 /* If op1 is a MULT as well and simplify_unary_operation
2365 just moved the NEG to the second operand, simplify_gen_binary
2366 below could through simplify_associative_operation move
2367 the NEG around again and recurse endlessly. */
2368 if (temp
2369 && GET_CODE (op1) == MULT
2370 && GET_CODE (temp) == MULT
2371 && XEXP (op1, 0) == XEXP (temp, 0)
2372 && GET_CODE (XEXP (temp, 1)) == NEG
2373 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2374 temp = NULL_RTX;
2375 if (temp)
2376 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2378 if (GET_CODE (op1) == NEG)
2380 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2381 /* If op0 is a MULT as well and simplify_unary_operation
2382 just moved the NEG to the second operand, simplify_gen_binary
2383 below could through simplify_associative_operation move
2384 the NEG around again and recurse endlessly. */
2385 if (temp
2386 && GET_CODE (op0) == MULT
2387 && GET_CODE (temp) == MULT
2388 && XEXP (op0, 0) == XEXP (temp, 0)
2389 && GET_CODE (XEXP (temp, 1)) == NEG
2390 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2391 temp = NULL_RTX;
2392 if (temp)
2393 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2396 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2397 x is NaN, since x * 0 is then also NaN. Nor is it valid
2398 when the mode has signed zeros, since multiplying a negative
2399 number by 0 will give -0, not 0. */
2400 if (!HONOR_NANS (mode)
2401 && !HONOR_SIGNED_ZEROS (mode)
2402 && trueop1 == CONST0_RTX (mode)
2403 && ! side_effects_p (op0))
2404 return op1;
2406 /* In IEEE floating point, x*1 is not equivalent to x for
2407 signalling NaNs. */
2408 if (!HONOR_SNANS (mode)
2409 && trueop1 == CONST1_RTX (mode))
2410 return op0;
2412 /* Convert multiply by constant power of two into shift. */
2413 if (CONST_SCALAR_INT_P (trueop1))
2415 val = wi::exact_log2 (std::make_pair (trueop1, mode));
2416 if (val >= 0)
2417 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2420 /* x*2 is x+x and x*(-1) is -x */
2421 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2422 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2423 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2424 && GET_MODE (op0) == mode)
2426 REAL_VALUE_TYPE d;
2427 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2429 if (REAL_VALUES_EQUAL (d, dconst2))
2430 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2432 if (!HONOR_SNANS (mode)
2433 && REAL_VALUES_EQUAL (d, dconstm1))
2434 return simplify_gen_unary (NEG, mode, op0, mode);
2437 /* Optimize -x * -x as x * x. */
2438 if (FLOAT_MODE_P (mode)
2439 && GET_CODE (op0) == NEG
2440 && GET_CODE (op1) == NEG
2441 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2442 && !side_effects_p (XEXP (op0, 0)))
2443 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2445 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2446 if (SCALAR_FLOAT_MODE_P (mode)
2447 && GET_CODE (op0) == ABS
2448 && GET_CODE (op1) == ABS
2449 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2450 && !side_effects_p (XEXP (op0, 0)))
2451 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2453 /* Reassociate multiplication, but for floating point MULTs
2454 only when the user specifies unsafe math optimizations. */
2455 if (! FLOAT_MODE_P (mode)
2456 || flag_unsafe_math_optimizations)
2458 tem = simplify_associative_operation (code, mode, op0, op1);
2459 if (tem)
2460 return tem;
2462 break;
2464 case IOR:
2465 if (trueop1 == CONST0_RTX (mode))
2466 return op0;
2467 if (INTEGRAL_MODE_P (mode)
2468 && trueop1 == CONSTM1_RTX (mode)
2469 && !side_effects_p (op0))
2470 return op1;
2471 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2472 return op0;
2473 /* A | (~A) -> -1 */
2474 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2475 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2476 && ! side_effects_p (op0)
2477 && SCALAR_INT_MODE_P (mode))
2478 return constm1_rtx;
2480 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2481 if (CONST_INT_P (op1)
2482 && HWI_COMPUTABLE_MODE_P (mode)
2483 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2484 && !side_effects_p (op0))
2485 return op1;
2487 /* Canonicalize (X & C1) | C2. */
2488 if (GET_CODE (op0) == AND
2489 && CONST_INT_P (trueop1)
2490 && CONST_INT_P (XEXP (op0, 1)))
2492 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2493 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2494 HOST_WIDE_INT c2 = INTVAL (trueop1);
2496 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2497 if ((c1 & c2) == c1
2498 && !side_effects_p (XEXP (op0, 0)))
2499 return trueop1;
2501 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2502 if (((c1|c2) & mask) == mask)
2503 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2505 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2506 if (((c1 & ~c2) & mask) != (c1 & mask))
2508 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2509 gen_int_mode (c1 & ~c2, mode));
2510 return simplify_gen_binary (IOR, mode, tem, op1);
2514 /* Convert (A & B) | A to A. */
2515 if (GET_CODE (op0) == AND
2516 && (rtx_equal_p (XEXP (op0, 0), op1)
2517 || rtx_equal_p (XEXP (op0, 1), op1))
2518 && ! side_effects_p (XEXP (op0, 0))
2519 && ! side_effects_p (XEXP (op0, 1)))
2520 return op1;
2522 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2523 mode size to (rotate A CX). */
2525 if (GET_CODE (op1) == ASHIFT
2526 || GET_CODE (op1) == SUBREG)
2528 opleft = op1;
2529 opright = op0;
2531 else
2533 opright = op1;
2534 opleft = op0;
2537 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2538 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2539 && CONST_INT_P (XEXP (opleft, 1))
2540 && CONST_INT_P (XEXP (opright, 1))
2541 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2542 == GET_MODE_PRECISION (mode)))
2543 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2545 /* Same, but for ashift that has been "simplified" to a wider mode
2546 by simplify_shift_const. */
2548 if (GET_CODE (opleft) == SUBREG
2549 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2550 && GET_CODE (opright) == LSHIFTRT
2551 && GET_CODE (XEXP (opright, 0)) == SUBREG
2552 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2553 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2554 && (GET_MODE_SIZE (GET_MODE (opleft))
2555 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2556 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2557 SUBREG_REG (XEXP (opright, 0)))
2558 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2559 && CONST_INT_P (XEXP (opright, 1))
2560 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2561 == GET_MODE_PRECISION (mode)))
2562 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2563 XEXP (SUBREG_REG (opleft), 1));
2565 /* If we have (ior (and (X C1) C2)), simplify this by making
2566 C1 as small as possible if C1 actually changes. */
2567 if (CONST_INT_P (op1)
2568 && (HWI_COMPUTABLE_MODE_P (mode)
2569 || INTVAL (op1) > 0)
2570 && GET_CODE (op0) == AND
2571 && CONST_INT_P (XEXP (op0, 1))
2572 && CONST_INT_P (op1)
2573 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2575 rtx tmp = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2576 gen_int_mode (UINTVAL (XEXP (op0, 1))
2577 & ~UINTVAL (op1),
2578 mode));
2579 return simplify_gen_binary (IOR, mode, tmp, op1);
2582 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2583 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2584 the PLUS does not affect any of the bits in OP1: then we can do
2585 the IOR as a PLUS and we can associate. This is valid if OP1
2586 can be safely shifted left C bits. */
2587 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2588 && GET_CODE (XEXP (op0, 0)) == PLUS
2589 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2590 && CONST_INT_P (XEXP (op0, 1))
2591 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2593 int count = INTVAL (XEXP (op0, 1));
2594 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2596 if (mask >> count == INTVAL (trueop1)
2597 && trunc_int_for_mode (mask, mode) == mask
2598 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2599 return simplify_gen_binary (ASHIFTRT, mode,
2600 plus_constant (mode, XEXP (op0, 0),
2601 mask),
2602 XEXP (op0, 1));
2605 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2606 if (tem)
2607 return tem;
2609 tem = simplify_associative_operation (code, mode, op0, op1);
2610 if (tem)
2611 return tem;
2612 break;
2614 case XOR:
2615 if (trueop1 == CONST0_RTX (mode))
2616 return op0;
2617 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2618 return simplify_gen_unary (NOT, mode, op0, mode);
2619 if (rtx_equal_p (trueop0, trueop1)
2620 && ! side_effects_p (op0)
2621 && GET_MODE_CLASS (mode) != MODE_CC)
2622 return CONST0_RTX (mode);
2624 /* Canonicalize XOR of the most significant bit to PLUS. */
2625 if (CONST_SCALAR_INT_P (op1)
2626 && mode_signbit_p (mode, op1))
2627 return simplify_gen_binary (PLUS, mode, op0, op1);
2628 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2629 if (CONST_SCALAR_INT_P (op1)
2630 && GET_CODE (op0) == PLUS
2631 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2632 && mode_signbit_p (mode, XEXP (op0, 1)))
2633 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2634 simplify_gen_binary (XOR, mode, op1,
2635 XEXP (op0, 1)));
2637 /* If we are XORing two things that have no bits in common,
2638 convert them into an IOR. This helps to detect rotation encoded
2639 using those methods and possibly other simplifications. */
2641 if (HWI_COMPUTABLE_MODE_P (mode)
2642 && (nonzero_bits (op0, mode)
2643 & nonzero_bits (op1, mode)) == 0)
2644 return (simplify_gen_binary (IOR, mode, op0, op1));
2646 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2647 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2648 (NOT y). */
2650 int num_negated = 0;
2652 if (GET_CODE (op0) == NOT)
2653 num_negated++, op0 = XEXP (op0, 0);
2654 if (GET_CODE (op1) == NOT)
2655 num_negated++, op1 = XEXP (op1, 0);
2657 if (num_negated == 2)
2658 return simplify_gen_binary (XOR, mode, op0, op1);
2659 else if (num_negated == 1)
2660 return simplify_gen_unary (NOT, mode,
2661 simplify_gen_binary (XOR, mode, op0, op1),
2662 mode);
2665 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2666 correspond to a machine insn or result in further simplifications
2667 if B is a constant. */
2669 if (GET_CODE (op0) == AND
2670 && rtx_equal_p (XEXP (op0, 1), op1)
2671 && ! side_effects_p (op1))
2672 return simplify_gen_binary (AND, mode,
2673 simplify_gen_unary (NOT, mode,
2674 XEXP (op0, 0), mode),
2675 op1);
2677 else if (GET_CODE (op0) == AND
2678 && rtx_equal_p (XEXP (op0, 0), op1)
2679 && ! side_effects_p (op1))
2680 return simplify_gen_binary (AND, mode,
2681 simplify_gen_unary (NOT, mode,
2682 XEXP (op0, 1), mode),
2683 op1);
2685 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2686 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2687 out bits inverted twice and not set by C. Similarly, given
2688 (xor (and (xor A B) C) D), simplify without inverting C in
2689 the xor operand: (xor (and A C) (B&C)^D).
2691 else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
2692 && GET_CODE (XEXP (op0, 0)) == XOR
2693 && CONST_INT_P (op1)
2694 && CONST_INT_P (XEXP (op0, 1))
2695 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
2697 enum rtx_code op = GET_CODE (op0);
2698 rtx a = XEXP (XEXP (op0, 0), 0);
2699 rtx b = XEXP (XEXP (op0, 0), 1);
2700 rtx c = XEXP (op0, 1);
2701 rtx d = op1;
2702 HOST_WIDE_INT bval = INTVAL (b);
2703 HOST_WIDE_INT cval = INTVAL (c);
2704 HOST_WIDE_INT dval = INTVAL (d);
2705 HOST_WIDE_INT xcval;
2707 if (op == IOR)
2708 xcval = ~cval;
2709 else
2710 xcval = cval;
2712 return simplify_gen_binary (XOR, mode,
2713 simplify_gen_binary (op, mode, a, c),
2714 gen_int_mode ((bval & xcval) ^ dval,
2715 mode));
2718 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2719 we can transform like this:
2720 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2721 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2722 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2723 Attempt a few simplifications when B and C are both constants. */
2724 if (GET_CODE (op0) == AND
2725 && CONST_INT_P (op1)
2726 && CONST_INT_P (XEXP (op0, 1)))
2728 rtx a = XEXP (op0, 0);
2729 rtx b = XEXP (op0, 1);
2730 rtx c = op1;
2731 HOST_WIDE_INT bval = INTVAL (b);
2732 HOST_WIDE_INT cval = INTVAL (c);
2734 /* Instead of computing ~A&C, we compute its negated value,
2735 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2736 optimize for sure. If it does not simplify, we still try
2737 to compute ~A&C below, but since that always allocates
2738 RTL, we don't try that before committing to returning a
2739 simplified expression. */
2740 rtx n_na_c = simplify_binary_operation (IOR, mode, a,
2741 GEN_INT (~cval));
2743 if ((~cval & bval) == 0)
2745 rtx na_c = NULL_RTX;
2746 if (n_na_c)
2747 na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
2748 else
2750 /* If ~A does not simplify, don't bother: we don't
2751 want to simplify 2 operations into 3, and if na_c
2752 were to simplify with na, n_na_c would have
2753 simplified as well. */
2754 rtx na = simplify_unary_operation (NOT, mode, a, mode);
2755 if (na)
2756 na_c = simplify_gen_binary (AND, mode, na, c);
2759 /* Try to simplify ~A&C | ~B&C. */
2760 if (na_c != NULL_RTX)
2761 return simplify_gen_binary (IOR, mode, na_c,
2762 gen_int_mode (~bval & cval, mode));
2764 else
2766 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2767 if (n_na_c == CONSTM1_RTX (mode))
2769 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2770 gen_int_mode (~cval & bval,
2771 mode));
2772 return simplify_gen_binary (IOR, mode, a_nc_b,
2773 gen_int_mode (~bval & cval,
2774 mode));
2779 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2780 comparison if STORE_FLAG_VALUE is 1. */
2781 if (STORE_FLAG_VALUE == 1
2782 && trueop1 == const1_rtx
2783 && COMPARISON_P (op0)
2784 && (reversed = reversed_comparison (op0, mode)))
2785 return reversed;
2787 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2788 is (lt foo (const_int 0)), so we can perform the above
2789 simplification if STORE_FLAG_VALUE is 1. */
2791 if (STORE_FLAG_VALUE == 1
2792 && trueop1 == const1_rtx
2793 && GET_CODE (op0) == LSHIFTRT
2794 && CONST_INT_P (XEXP (op0, 1))
2795 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2796 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2798 /* (xor (comparison foo bar) (const_int sign-bit))
2799 when STORE_FLAG_VALUE is the sign bit. */
2800 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2801 && trueop1 == const_true_rtx
2802 && COMPARISON_P (op0)
2803 && (reversed = reversed_comparison (op0, mode)))
2804 return reversed;
2806 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2807 if (tem)
2808 return tem;
2810 tem = simplify_associative_operation (code, mode, op0, op1);
2811 if (tem)
2812 return tem;
2813 break;
2815 case AND:
2816 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2817 return trueop1;
2818 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2819 return op0;
2820 if (HWI_COMPUTABLE_MODE_P (mode))
2822 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2823 HOST_WIDE_INT nzop1;
2824 if (CONST_INT_P (trueop1))
2826 HOST_WIDE_INT val1 = INTVAL (trueop1);
2827 /* If we are turning off bits already known off in OP0, we need
2828 not do an AND. */
2829 if ((nzop0 & ~val1) == 0)
2830 return op0;
2832 nzop1 = nonzero_bits (trueop1, mode);
2833 /* If we are clearing all the nonzero bits, the result is zero. */
2834 if ((nzop1 & nzop0) == 0
2835 && !side_effects_p (op0) && !side_effects_p (op1))
2836 return CONST0_RTX (mode);
2838 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2839 && GET_MODE_CLASS (mode) != MODE_CC)
2840 return op0;
2841 /* A & (~A) -> 0 */
2842 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2843 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2844 && ! side_effects_p (op0)
2845 && GET_MODE_CLASS (mode) != MODE_CC)
2846 return CONST0_RTX (mode);
2848 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2849 there are no nonzero bits of C outside of X's mode. */
2850 if ((GET_CODE (op0) == SIGN_EXTEND
2851 || GET_CODE (op0) == ZERO_EXTEND)
2852 && CONST_INT_P (trueop1)
2853 && HWI_COMPUTABLE_MODE_P (mode)
2854 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2855 & UINTVAL (trueop1)) == 0)
2857 machine_mode imode = GET_MODE (XEXP (op0, 0));
2858 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2859 gen_int_mode (INTVAL (trueop1),
2860 imode));
2861 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2864 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2865 we might be able to further simplify the AND with X and potentially
2866 remove the truncation altogether. */
2867 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2869 rtx x = XEXP (op0, 0);
2870 machine_mode xmode = GET_MODE (x);
2871 tem = simplify_gen_binary (AND, xmode, x,
2872 gen_int_mode (INTVAL (trueop1), xmode));
2873 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2876 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2877 if (GET_CODE (op0) == IOR
2878 && CONST_INT_P (trueop1)
2879 && CONST_INT_P (XEXP (op0, 1)))
2881 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2882 return simplify_gen_binary (IOR, mode,
2883 simplify_gen_binary (AND, mode,
2884 XEXP (op0, 0), op1),
2885 gen_int_mode (tmp, mode));
2888 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2889 insn (and may simplify more). */
2890 if (GET_CODE (op0) == XOR
2891 && rtx_equal_p (XEXP (op0, 0), op1)
2892 && ! side_effects_p (op1))
2893 return simplify_gen_binary (AND, mode,
2894 simplify_gen_unary (NOT, mode,
2895 XEXP (op0, 1), mode),
2896 op1);
2898 if (GET_CODE (op0) == XOR
2899 && rtx_equal_p (XEXP (op0, 1), op1)
2900 && ! side_effects_p (op1))
2901 return simplify_gen_binary (AND, mode,
2902 simplify_gen_unary (NOT, mode,
2903 XEXP (op0, 0), mode),
2904 op1);
2906 /* Similarly for (~(A ^ B)) & A. */
2907 if (GET_CODE (op0) == NOT
2908 && GET_CODE (XEXP (op0, 0)) == XOR
2909 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2910 && ! side_effects_p (op1))
2911 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2913 if (GET_CODE (op0) == NOT
2914 && GET_CODE (XEXP (op0, 0)) == XOR
2915 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2916 && ! side_effects_p (op1))
2917 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2919 /* Convert (A | B) & A to A. */
2920 if (GET_CODE (op0) == IOR
2921 && (rtx_equal_p (XEXP (op0, 0), op1)
2922 || rtx_equal_p (XEXP (op0, 1), op1))
2923 && ! side_effects_p (XEXP (op0, 0))
2924 && ! side_effects_p (XEXP (op0, 1)))
2925 return op1;
2927 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2928 ((A & N) + B) & M -> (A + B) & M
2929 Similarly if (N & M) == 0,
2930 ((A | N) + B) & M -> (A + B) & M
2931 and for - instead of + and/or ^ instead of |.
2932 Also, if (N & M) == 0, then
2933 (A +- N) & M -> A & M. */
2934 if (CONST_INT_P (trueop1)
2935 && HWI_COMPUTABLE_MODE_P (mode)
2936 && ~UINTVAL (trueop1)
2937 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
2938 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2940 rtx pmop[2];
2941 int which;
2943 pmop[0] = XEXP (op0, 0);
2944 pmop[1] = XEXP (op0, 1);
2946 if (CONST_INT_P (pmop[1])
2947 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
2948 return simplify_gen_binary (AND, mode, pmop[0], op1);
2950 for (which = 0; which < 2; which++)
2952 tem = pmop[which];
2953 switch (GET_CODE (tem))
2955 case AND:
2956 if (CONST_INT_P (XEXP (tem, 1))
2957 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
2958 == UINTVAL (trueop1))
2959 pmop[which] = XEXP (tem, 0);
2960 break;
2961 case IOR:
2962 case XOR:
2963 if (CONST_INT_P (XEXP (tem, 1))
2964 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
2965 pmop[which] = XEXP (tem, 0);
2966 break;
2967 default:
2968 break;
2972 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2974 tem = simplify_gen_binary (GET_CODE (op0), mode,
2975 pmop[0], pmop[1]);
2976 return simplify_gen_binary (code, mode, tem, op1);
2980 /* (and X (ior (not X) Y) -> (and X Y) */
2981 if (GET_CODE (op1) == IOR
2982 && GET_CODE (XEXP (op1, 0)) == NOT
2983 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
2984 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2986 /* (and (ior (not X) Y) X) -> (and X Y) */
2987 if (GET_CODE (op0) == IOR
2988 && GET_CODE (XEXP (op0, 0)) == NOT
2989 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
2990 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2992 /* (and X (ior Y (not X)) -> (and X Y) */
2993 if (GET_CODE (op1) == IOR
2994 && GET_CODE (XEXP (op1, 1)) == NOT
2995 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
2996 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
2998 /* (and (ior Y (not X)) X) -> (and X Y) */
2999 if (GET_CODE (op0) == IOR
3000 && GET_CODE (XEXP (op0, 1)) == NOT
3001 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3002 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3004 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3005 if (tem)
3006 return tem;
3008 tem = simplify_associative_operation (code, mode, op0, op1);
3009 if (tem)
3010 return tem;
3011 break;
3013 case UDIV:
3014 /* 0/x is 0 (or x&0 if x has side-effects). */
3015 if (trueop0 == CONST0_RTX (mode))
3017 if (side_effects_p (op1))
3018 return simplify_gen_binary (AND, mode, op1, trueop0);
3019 return trueop0;
3021 /* x/1 is x. */
3022 if (trueop1 == CONST1_RTX (mode))
3024 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3025 if (tem)
3026 return tem;
3028 /* Convert divide by power of two into shift. */
3029 if (CONST_INT_P (trueop1)
3030 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3031 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3032 break;
3034 case DIV:
3035 /* Handle floating point and integers separately. */
3036 if (SCALAR_FLOAT_MODE_P (mode))
3038 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3039 safe for modes with NaNs, since 0.0 / 0.0 will then be
3040 NaN rather than 0.0. Nor is it safe for modes with signed
3041 zeros, since dividing 0 by a negative number gives -0.0 */
3042 if (trueop0 == CONST0_RTX (mode)
3043 && !HONOR_NANS (mode)
3044 && !HONOR_SIGNED_ZEROS (mode)
3045 && ! side_effects_p (op1))
3046 return op0;
3047 /* x/1.0 is x. */
3048 if (trueop1 == CONST1_RTX (mode)
3049 && !HONOR_SNANS (mode))
3050 return op0;
3052 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3053 && trueop1 != CONST0_RTX (mode))
3055 REAL_VALUE_TYPE d;
3056 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
3058 /* x/-1.0 is -x. */
3059 if (REAL_VALUES_EQUAL (d, dconstm1)
3060 && !HONOR_SNANS (mode))
3061 return simplify_gen_unary (NEG, mode, op0, mode);
3063 /* Change FP division by a constant into multiplication.
3064 Only do this with -freciprocal-math. */
3065 if (flag_reciprocal_math
3066 && !REAL_VALUES_EQUAL (d, dconst0))
3068 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
3069 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3070 return simplify_gen_binary (MULT, mode, op0, tem);
3074 else if (SCALAR_INT_MODE_P (mode))
3076 /* 0/x is 0 (or x&0 if x has side-effects). */
3077 if (trueop0 == CONST0_RTX (mode)
3078 && !cfun->can_throw_non_call_exceptions)
3080 if (side_effects_p (op1))
3081 return simplify_gen_binary (AND, mode, op1, trueop0);
3082 return trueop0;
3084 /* x/1 is x. */
3085 if (trueop1 == CONST1_RTX (mode))
3087 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3088 if (tem)
3089 return tem;
3091 /* x/-1 is -x. */
3092 if (trueop1 == constm1_rtx)
3094 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3095 if (x)
3096 return simplify_gen_unary (NEG, mode, x, mode);
3099 break;
3101 case UMOD:
3102 /* 0%x is 0 (or x&0 if x has side-effects). */
3103 if (trueop0 == CONST0_RTX (mode))
3105 if (side_effects_p (op1))
3106 return simplify_gen_binary (AND, mode, op1, trueop0);
3107 return trueop0;
3109 /* x%1 is 0 (of x&0 if x has side-effects). */
3110 if (trueop1 == CONST1_RTX (mode))
3112 if (side_effects_p (op0))
3113 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3114 return CONST0_RTX (mode);
3116 /* Implement modulus by power of two as AND. */
3117 if (CONST_INT_P (trueop1)
3118 && exact_log2 (UINTVAL (trueop1)) > 0)
3119 return simplify_gen_binary (AND, mode, op0,
3120 gen_int_mode (INTVAL (op1) - 1, mode));
3121 break;
3123 case MOD:
3124 /* 0%x is 0 (or x&0 if x has side-effects). */
3125 if (trueop0 == CONST0_RTX (mode))
3127 if (side_effects_p (op1))
3128 return simplify_gen_binary (AND, mode, op1, trueop0);
3129 return trueop0;
3131 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3132 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3134 if (side_effects_p (op0))
3135 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3136 return CONST0_RTX (mode);
3138 break;
3140 case ROTATERT:
3141 case ROTATE:
3142 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3143 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3144 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3145 amount instead. */
3146 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3147 if (CONST_INT_P (trueop1)
3148 && IN_RANGE (INTVAL (trueop1),
3149 GET_MODE_PRECISION (mode) / 2 + (code == ROTATE),
3150 GET_MODE_PRECISION (mode) - 1))
3151 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3152 mode, op0, GEN_INT (GET_MODE_PRECISION (mode)
3153 - INTVAL (trueop1)));
3154 #endif
3155 /* FALLTHRU */
3156 case ASHIFTRT:
3157 if (trueop1 == CONST0_RTX (mode))
3158 return op0;
3159 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3160 return op0;
3161 /* Rotating ~0 always results in ~0. */
3162 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3163 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3164 && ! side_effects_p (op1))
3165 return op0;
3166 /* Given:
3167 scalar modes M1, M2
3168 scalar constants c1, c2
3169 size (M2) > size (M1)
3170 c1 == size (M2) - size (M1)
3171 optimize:
3172 (ashiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3173 <low_part>)
3174 (const_int <c2>))
3176 (subreg:M1 (ashiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3177 <low_part>). */
3178 if (code == ASHIFTRT
3179 && !VECTOR_MODE_P (mode)
3180 && SUBREG_P (op0)
3181 && CONST_INT_P (op1)
3182 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3183 && !VECTOR_MODE_P (GET_MODE (SUBREG_REG (op0)))
3184 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3185 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3186 > GET_MODE_BITSIZE (mode))
3187 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3188 == (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3189 - GET_MODE_BITSIZE (mode)))
3190 && subreg_lowpart_p (op0))
3192 rtx tmp = GEN_INT (INTVAL (XEXP (SUBREG_REG (op0), 1))
3193 + INTVAL (op1));
3194 machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
3195 tmp = simplify_gen_binary (ASHIFTRT,
3196 GET_MODE (SUBREG_REG (op0)),
3197 XEXP (SUBREG_REG (op0), 0),
3198 tmp);
3199 return simplify_gen_subreg (mode, tmp, inner_mode,
3200 subreg_lowpart_offset (mode,
3201 inner_mode));
3203 canonicalize_shift:
3204 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3206 val = INTVAL (op1) & (GET_MODE_PRECISION (mode) - 1);
3207 if (val != INTVAL (op1))
3208 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3210 break;
3212 case ASHIFT:
3213 case SS_ASHIFT:
3214 case US_ASHIFT:
3215 if (trueop1 == CONST0_RTX (mode))
3216 return op0;
3217 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3218 return op0;
3219 goto canonicalize_shift;
3221 case LSHIFTRT:
3222 if (trueop1 == CONST0_RTX (mode))
3223 return op0;
3224 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3225 return op0;
3226 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3227 if (GET_CODE (op0) == CLZ
3228 && CONST_INT_P (trueop1)
3229 && STORE_FLAG_VALUE == 1
3230 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3232 machine_mode imode = GET_MODE (XEXP (op0, 0));
3233 unsigned HOST_WIDE_INT zero_val = 0;
3235 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3236 && zero_val == GET_MODE_PRECISION (imode)
3237 && INTVAL (trueop1) == exact_log2 (zero_val))
3238 return simplify_gen_relational (EQ, mode, imode,
3239 XEXP (op0, 0), const0_rtx);
3241 goto canonicalize_shift;
3243 case SMIN:
3244 if (width <= HOST_BITS_PER_WIDE_INT
3245 && mode_signbit_p (mode, trueop1)
3246 && ! side_effects_p (op0))
3247 return op1;
3248 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3249 return op0;
3250 tem = simplify_associative_operation (code, mode, op0, op1);
3251 if (tem)
3252 return tem;
3253 break;
3255 case SMAX:
3256 if (width <= HOST_BITS_PER_WIDE_INT
3257 && CONST_INT_P (trueop1)
3258 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3259 && ! side_effects_p (op0))
3260 return op1;
3261 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3262 return op0;
3263 tem = simplify_associative_operation (code, mode, op0, op1);
3264 if (tem)
3265 return tem;
3266 break;
3268 case UMIN:
3269 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3270 return op1;
3271 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3272 return op0;
3273 tem = simplify_associative_operation (code, mode, op0, op1);
3274 if (tem)
3275 return tem;
3276 break;
3278 case UMAX:
3279 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3280 return op1;
3281 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3282 return op0;
3283 tem = simplify_associative_operation (code, mode, op0, op1);
3284 if (tem)
3285 return tem;
3286 break;
3288 case SS_PLUS:
3289 case US_PLUS:
3290 case SS_MINUS:
3291 case US_MINUS:
3292 case SS_MULT:
3293 case US_MULT:
3294 case SS_DIV:
3295 case US_DIV:
3296 /* ??? There are simplifications that can be done. */
3297 return 0;
3299 case VEC_SELECT:
3300 if (!VECTOR_MODE_P (mode))
3302 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3303 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3304 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3305 gcc_assert (XVECLEN (trueop1, 0) == 1);
3306 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3308 if (GET_CODE (trueop0) == CONST_VECTOR)
3309 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3310 (trueop1, 0, 0)));
3312 /* Extract a scalar element from a nested VEC_SELECT expression
3313 (with optional nested VEC_CONCAT expression). Some targets
3314 (i386) extract scalar element from a vector using chain of
3315 nested VEC_SELECT expressions. When input operand is a memory
3316 operand, this operation can be simplified to a simple scalar
3317 load from an offseted memory address. */
3318 if (GET_CODE (trueop0) == VEC_SELECT)
3320 rtx op0 = XEXP (trueop0, 0);
3321 rtx op1 = XEXP (trueop0, 1);
3323 machine_mode opmode = GET_MODE (op0);
3324 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3325 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3327 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3328 int elem;
3330 rtvec vec;
3331 rtx tmp_op, tmp;
3333 gcc_assert (GET_CODE (op1) == PARALLEL);
3334 gcc_assert (i < n_elts);
3336 /* Select element, pointed by nested selector. */
3337 elem = INTVAL (XVECEXP (op1, 0, i));
3339 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3340 if (GET_CODE (op0) == VEC_CONCAT)
3342 rtx op00 = XEXP (op0, 0);
3343 rtx op01 = XEXP (op0, 1);
3345 machine_mode mode00, mode01;
3346 int n_elts00, n_elts01;
3348 mode00 = GET_MODE (op00);
3349 mode01 = GET_MODE (op01);
3351 /* Find out number of elements of each operand. */
3352 if (VECTOR_MODE_P (mode00))
3354 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3355 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3357 else
3358 n_elts00 = 1;
3360 if (VECTOR_MODE_P (mode01))
3362 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3363 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3365 else
3366 n_elts01 = 1;
3368 gcc_assert (n_elts == n_elts00 + n_elts01);
3370 /* Select correct operand of VEC_CONCAT
3371 and adjust selector. */
3372 if (elem < n_elts01)
3373 tmp_op = op00;
3374 else
3376 tmp_op = op01;
3377 elem -= n_elts00;
3380 else
3381 tmp_op = op0;
3383 vec = rtvec_alloc (1);
3384 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3386 tmp = gen_rtx_fmt_ee (code, mode,
3387 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3388 return tmp;
3390 if (GET_CODE (trueop0) == VEC_DUPLICATE
3391 && GET_MODE (XEXP (trueop0, 0)) == mode)
3392 return XEXP (trueop0, 0);
3394 else
3396 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3397 gcc_assert (GET_MODE_INNER (mode)
3398 == GET_MODE_INNER (GET_MODE (trueop0)));
3399 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3401 if (GET_CODE (trueop0) == CONST_VECTOR)
3403 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3404 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3405 rtvec v = rtvec_alloc (n_elts);
3406 unsigned int i;
3408 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3409 for (i = 0; i < n_elts; i++)
3411 rtx x = XVECEXP (trueop1, 0, i);
3413 gcc_assert (CONST_INT_P (x));
3414 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3415 INTVAL (x));
3418 return gen_rtx_CONST_VECTOR (mode, v);
3421 /* Recognize the identity. */
3422 if (GET_MODE (trueop0) == mode)
3424 bool maybe_ident = true;
3425 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3427 rtx j = XVECEXP (trueop1, 0, i);
3428 if (!CONST_INT_P (j) || INTVAL (j) != i)
3430 maybe_ident = false;
3431 break;
3434 if (maybe_ident)
3435 return trueop0;
3438 /* If we build {a,b} then permute it, build the result directly. */
3439 if (XVECLEN (trueop1, 0) == 2
3440 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3441 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3442 && GET_CODE (trueop0) == VEC_CONCAT
3443 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3444 && GET_MODE (XEXP (trueop0, 0)) == mode
3445 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3446 && GET_MODE (XEXP (trueop0, 1)) == mode)
3448 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3449 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3450 rtx subop0, subop1;
3452 gcc_assert (i0 < 4 && i1 < 4);
3453 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3454 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3456 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3459 if (XVECLEN (trueop1, 0) == 2
3460 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3461 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3462 && GET_CODE (trueop0) == VEC_CONCAT
3463 && GET_MODE (trueop0) == mode)
3465 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3466 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3467 rtx subop0, subop1;
3469 gcc_assert (i0 < 2 && i1 < 2);
3470 subop0 = XEXP (trueop0, i0);
3471 subop1 = XEXP (trueop0, i1);
3473 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3476 /* If we select one half of a vec_concat, return that. */
3477 if (GET_CODE (trueop0) == VEC_CONCAT
3478 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3480 rtx subop0 = XEXP (trueop0, 0);
3481 rtx subop1 = XEXP (trueop0, 1);
3482 machine_mode mode0 = GET_MODE (subop0);
3483 machine_mode mode1 = GET_MODE (subop1);
3484 int li = GET_MODE_SIZE (GET_MODE_INNER (mode0));
3485 int l0 = GET_MODE_SIZE (mode0) / li;
3486 int l1 = GET_MODE_SIZE (mode1) / li;
3487 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3488 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3490 bool success = true;
3491 for (int i = 1; i < l0; ++i)
3493 rtx j = XVECEXP (trueop1, 0, i);
3494 if (!CONST_INT_P (j) || INTVAL (j) != i)
3496 success = false;
3497 break;
3500 if (success)
3501 return subop0;
3503 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3505 bool success = true;
3506 for (int i = 1; i < l1; ++i)
3508 rtx j = XVECEXP (trueop1, 0, i);
3509 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3511 success = false;
3512 break;
3515 if (success)
3516 return subop1;
3521 if (XVECLEN (trueop1, 0) == 1
3522 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3523 && GET_CODE (trueop0) == VEC_CONCAT)
3525 rtx vec = trueop0;
3526 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3528 /* Try to find the element in the VEC_CONCAT. */
3529 while (GET_MODE (vec) != mode
3530 && GET_CODE (vec) == VEC_CONCAT)
3532 HOST_WIDE_INT vec_size;
3534 if (CONST_INT_P (XEXP (vec, 0)))
3536 /* vec_concat of two const_ints doesn't make sense with
3537 respect to modes. */
3538 if (CONST_INT_P (XEXP (vec, 1)))
3539 return 0;
3541 vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
3542 - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
3544 else
3545 vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3547 if (offset < vec_size)
3548 vec = XEXP (vec, 0);
3549 else
3551 offset -= vec_size;
3552 vec = XEXP (vec, 1);
3554 vec = avoid_constant_pool_reference (vec);
3557 if (GET_MODE (vec) == mode)
3558 return vec;
3561 /* If we select elements in a vec_merge that all come from the same
3562 operand, select from that operand directly. */
3563 if (GET_CODE (op0) == VEC_MERGE)
3565 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3566 if (CONST_INT_P (trueop02))
3568 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3569 bool all_operand0 = true;
3570 bool all_operand1 = true;
3571 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3573 rtx j = XVECEXP (trueop1, 0, i);
3574 if (sel & (1 << UINTVAL (j)))
3575 all_operand1 = false;
3576 else
3577 all_operand0 = false;
3579 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3580 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3581 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3582 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3586 /* If we have two nested selects that are inverses of each
3587 other, replace them with the source operand. */
3588 if (GET_CODE (trueop0) == VEC_SELECT
3589 && GET_MODE (XEXP (trueop0, 0)) == mode)
3591 rtx op0_subop1 = XEXP (trueop0, 1);
3592 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3593 gcc_assert (XVECLEN (trueop1, 0) == GET_MODE_NUNITS (mode));
3595 /* Apply the outer ordering vector to the inner one. (The inner
3596 ordering vector is expressly permitted to be of a different
3597 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3598 then the two VEC_SELECTs cancel. */
3599 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3601 rtx x = XVECEXP (trueop1, 0, i);
3602 if (!CONST_INT_P (x))
3603 return 0;
3604 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3605 if (!CONST_INT_P (y) || i != INTVAL (y))
3606 return 0;
3608 return XEXP (trueop0, 0);
3611 return 0;
3612 case VEC_CONCAT:
3614 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3615 ? GET_MODE (trueop0)
3616 : GET_MODE_INNER (mode));
3617 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3618 ? GET_MODE (trueop1)
3619 : GET_MODE_INNER (mode));
3621 gcc_assert (VECTOR_MODE_P (mode));
3622 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3623 == GET_MODE_SIZE (mode));
3625 if (VECTOR_MODE_P (op0_mode))
3626 gcc_assert (GET_MODE_INNER (mode)
3627 == GET_MODE_INNER (op0_mode));
3628 else
3629 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3631 if (VECTOR_MODE_P (op1_mode))
3632 gcc_assert (GET_MODE_INNER (mode)
3633 == GET_MODE_INNER (op1_mode));
3634 else
3635 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3637 if ((GET_CODE (trueop0) == CONST_VECTOR
3638 || CONST_SCALAR_INT_P (trueop0)
3639 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3640 && (GET_CODE (trueop1) == CONST_VECTOR
3641 || CONST_SCALAR_INT_P (trueop1)
3642 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3644 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3645 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3646 rtvec v = rtvec_alloc (n_elts);
3647 unsigned int i;
3648 unsigned in_n_elts = 1;
3650 if (VECTOR_MODE_P (op0_mode))
3651 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3652 for (i = 0; i < n_elts; i++)
3654 if (i < in_n_elts)
3656 if (!VECTOR_MODE_P (op0_mode))
3657 RTVEC_ELT (v, i) = trueop0;
3658 else
3659 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3661 else
3663 if (!VECTOR_MODE_P (op1_mode))
3664 RTVEC_ELT (v, i) = trueop1;
3665 else
3666 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3667 i - in_n_elts);
3671 return gen_rtx_CONST_VECTOR (mode, v);
3674 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3675 Restrict the transformation to avoid generating a VEC_SELECT with a
3676 mode unrelated to its operand. */
3677 if (GET_CODE (trueop0) == VEC_SELECT
3678 && GET_CODE (trueop1) == VEC_SELECT
3679 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3680 && GET_MODE (XEXP (trueop0, 0)) == mode)
3682 rtx par0 = XEXP (trueop0, 1);
3683 rtx par1 = XEXP (trueop1, 1);
3684 int len0 = XVECLEN (par0, 0);
3685 int len1 = XVECLEN (par1, 0);
3686 rtvec vec = rtvec_alloc (len0 + len1);
3687 for (int i = 0; i < len0; i++)
3688 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3689 for (int i = 0; i < len1; i++)
3690 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3691 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3692 gen_rtx_PARALLEL (VOIDmode, vec));
3695 return 0;
3697 default:
3698 gcc_unreachable ();
3701 return 0;
3705 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
3706 rtx op0, rtx op1)
3708 unsigned int width = GET_MODE_PRECISION (mode);
3710 if (VECTOR_MODE_P (mode)
3711 && code != VEC_CONCAT
3712 && GET_CODE (op0) == CONST_VECTOR
3713 && GET_CODE (op1) == CONST_VECTOR)
3715 unsigned n_elts = GET_MODE_NUNITS (mode);
3716 machine_mode op0mode = GET_MODE (op0);
3717 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3718 machine_mode op1mode = GET_MODE (op1);
3719 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3720 rtvec v = rtvec_alloc (n_elts);
3721 unsigned int i;
3723 gcc_assert (op0_n_elts == n_elts);
3724 gcc_assert (op1_n_elts == n_elts);
3725 for (i = 0; i < n_elts; i++)
3727 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3728 CONST_VECTOR_ELT (op0, i),
3729 CONST_VECTOR_ELT (op1, i));
3730 if (!x)
3731 return 0;
3732 RTVEC_ELT (v, i) = x;
3735 return gen_rtx_CONST_VECTOR (mode, v);
3738 if (VECTOR_MODE_P (mode)
3739 && code == VEC_CONCAT
3740 && (CONST_SCALAR_INT_P (op0)
3741 || GET_CODE (op0) == CONST_FIXED
3742 || CONST_DOUBLE_AS_FLOAT_P (op0))
3743 && (CONST_SCALAR_INT_P (op1)
3744 || CONST_DOUBLE_AS_FLOAT_P (op1)
3745 || GET_CODE (op1) == CONST_FIXED))
3747 unsigned n_elts = GET_MODE_NUNITS (mode);
3748 rtvec v = rtvec_alloc (n_elts);
3750 gcc_assert (n_elts >= 2);
3751 if (n_elts == 2)
3753 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3754 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3756 RTVEC_ELT (v, 0) = op0;
3757 RTVEC_ELT (v, 1) = op1;
3759 else
3761 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3762 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3763 unsigned i;
3765 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3766 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3767 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3769 for (i = 0; i < op0_n_elts; ++i)
3770 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3771 for (i = 0; i < op1_n_elts; ++i)
3772 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3775 return gen_rtx_CONST_VECTOR (mode, v);
3778 if (SCALAR_FLOAT_MODE_P (mode)
3779 && CONST_DOUBLE_AS_FLOAT_P (op0)
3780 && CONST_DOUBLE_AS_FLOAT_P (op1)
3781 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3783 if (code == AND
3784 || code == IOR
3785 || code == XOR)
3787 long tmp0[4];
3788 long tmp1[4];
3789 REAL_VALUE_TYPE r;
3790 int i;
3792 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3793 GET_MODE (op0));
3794 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3795 GET_MODE (op1));
3796 for (i = 0; i < 4; i++)
3798 switch (code)
3800 case AND:
3801 tmp0[i] &= tmp1[i];
3802 break;
3803 case IOR:
3804 tmp0[i] |= tmp1[i];
3805 break;
3806 case XOR:
3807 tmp0[i] ^= tmp1[i];
3808 break;
3809 default:
3810 gcc_unreachable ();
3813 real_from_target (&r, tmp0, mode);
3814 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3816 else
3818 REAL_VALUE_TYPE f0, f1, value, result;
3819 bool inexact;
3821 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3822 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3823 real_convert (&f0, mode, &f0);
3824 real_convert (&f1, mode, &f1);
3826 if (HONOR_SNANS (mode)
3827 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3828 return 0;
3830 if (code == DIV
3831 && REAL_VALUES_EQUAL (f1, dconst0)
3832 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3833 return 0;
3835 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3836 && flag_trapping_math
3837 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3839 int s0 = REAL_VALUE_NEGATIVE (f0);
3840 int s1 = REAL_VALUE_NEGATIVE (f1);
3842 switch (code)
3844 case PLUS:
3845 /* Inf + -Inf = NaN plus exception. */
3846 if (s0 != s1)
3847 return 0;
3848 break;
3849 case MINUS:
3850 /* Inf - Inf = NaN plus exception. */
3851 if (s0 == s1)
3852 return 0;
3853 break;
3854 case DIV:
3855 /* Inf / Inf = NaN plus exception. */
3856 return 0;
3857 default:
3858 break;
3862 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3863 && flag_trapping_math
3864 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3865 || (REAL_VALUE_ISINF (f1)
3866 && REAL_VALUES_EQUAL (f0, dconst0))))
3867 /* Inf * 0 = NaN plus exception. */
3868 return 0;
3870 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3871 &f0, &f1);
3872 real_convert (&result, mode, &value);
3874 /* Don't constant fold this floating point operation if
3875 the result has overflowed and flag_trapping_math. */
3877 if (flag_trapping_math
3878 && MODE_HAS_INFINITIES (mode)
3879 && REAL_VALUE_ISINF (result)
3880 && !REAL_VALUE_ISINF (f0)
3881 && !REAL_VALUE_ISINF (f1))
3882 /* Overflow plus exception. */
3883 return 0;
3885 /* Don't constant fold this floating point operation if the
3886 result may dependent upon the run-time rounding mode and
3887 flag_rounding_math is set, or if GCC's software emulation
3888 is unable to accurately represent the result. */
3890 if ((flag_rounding_math
3891 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3892 && (inexact || !real_identical (&result, &value)))
3893 return NULL_RTX;
3895 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3899 /* We can fold some multi-word operations. */
3900 if ((GET_MODE_CLASS (mode) == MODE_INT
3901 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
3902 && CONST_SCALAR_INT_P (op0)
3903 && CONST_SCALAR_INT_P (op1))
3905 wide_int result;
3906 bool overflow;
3907 rtx_mode_t pop0 = std::make_pair (op0, mode);
3908 rtx_mode_t pop1 = std::make_pair (op1, mode);
3910 #if TARGET_SUPPORTS_WIDE_INT == 0
3911 /* This assert keeps the simplification from producing a result
3912 that cannot be represented in a CONST_DOUBLE but a lot of
3913 upstream callers expect that this function never fails to
3914 simplify something and so you if you added this to the test
3915 above the code would die later anyway. If this assert
3916 happens, you just need to make the port support wide int. */
3917 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
3918 #endif
3919 switch (code)
3921 case MINUS:
3922 result = wi::sub (pop0, pop1);
3923 break;
3925 case PLUS:
3926 result = wi::add (pop0, pop1);
3927 break;
3929 case MULT:
3930 result = wi::mul (pop0, pop1);
3931 break;
3933 case DIV:
3934 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
3935 if (overflow)
3936 return NULL_RTX;
3937 break;
3939 case MOD:
3940 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
3941 if (overflow)
3942 return NULL_RTX;
3943 break;
3945 case UDIV:
3946 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
3947 if (overflow)
3948 return NULL_RTX;
3949 break;
3951 case UMOD:
3952 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
3953 if (overflow)
3954 return NULL_RTX;
3955 break;
3957 case AND:
3958 result = wi::bit_and (pop0, pop1);
3959 break;
3961 case IOR:
3962 result = wi::bit_or (pop0, pop1);
3963 break;
3965 case XOR:
3966 result = wi::bit_xor (pop0, pop1);
3967 break;
3969 case SMIN:
3970 result = wi::smin (pop0, pop1);
3971 break;
3973 case SMAX:
3974 result = wi::smax (pop0, pop1);
3975 break;
3977 case UMIN:
3978 result = wi::umin (pop0, pop1);
3979 break;
3981 case UMAX:
3982 result = wi::umax (pop0, pop1);
3983 break;
3985 case LSHIFTRT:
3986 case ASHIFTRT:
3987 case ASHIFT:
3989 wide_int wop1 = pop1;
3990 if (SHIFT_COUNT_TRUNCATED)
3991 wop1 = wi::umod_trunc (wop1, width);
3992 else if (wi::geu_p (wop1, width))
3993 return NULL_RTX;
3995 switch (code)
3997 case LSHIFTRT:
3998 result = wi::lrshift (pop0, wop1);
3999 break;
4001 case ASHIFTRT:
4002 result = wi::arshift (pop0, wop1);
4003 break;
4005 case ASHIFT:
4006 result = wi::lshift (pop0, wop1);
4007 break;
4009 default:
4010 gcc_unreachable ();
4012 break;
4014 case ROTATE:
4015 case ROTATERT:
4017 if (wi::neg_p (pop1))
4018 return NULL_RTX;
4020 switch (code)
4022 case ROTATE:
4023 result = wi::lrotate (pop0, pop1);
4024 break;
4026 case ROTATERT:
4027 result = wi::rrotate (pop0, pop1);
4028 break;
4030 default:
4031 gcc_unreachable ();
4033 break;
4035 default:
4036 return NULL_RTX;
4038 return immed_wide_int_const (result, mode);
4041 return NULL_RTX;
4046 /* Return a positive integer if X should sort after Y. The value
4047 returned is 1 if and only if X and Y are both regs. */
4049 static int
4050 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4052 int result;
4054 result = (commutative_operand_precedence (y)
4055 - commutative_operand_precedence (x));
4056 if (result)
4057 return result + result;
4059 /* Group together equal REGs to do more simplification. */
4060 if (REG_P (x) && REG_P (y))
4061 return REGNO (x) > REGNO (y);
4063 return 0;
4066 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4067 operands may be another PLUS or MINUS.
4069 Rather than test for specific case, we do this by a brute-force method
4070 and do all possible simplifications until no more changes occur. Then
4071 we rebuild the operation.
4073 May return NULL_RTX when no changes were made. */
4075 static rtx
4076 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4077 rtx op1)
4079 struct simplify_plus_minus_op_data
4081 rtx op;
4082 short neg;
4083 } ops[16];
4084 rtx result, tem;
4085 int n_ops = 2;
4086 int changed, n_constants, canonicalized = 0;
4087 int i, j;
4089 memset (ops, 0, sizeof ops);
4091 /* Set up the two operands and then expand them until nothing has been
4092 changed. If we run out of room in our array, give up; this should
4093 almost never happen. */
4095 ops[0].op = op0;
4096 ops[0].neg = 0;
4097 ops[1].op = op1;
4098 ops[1].neg = (code == MINUS);
4102 changed = 0;
4103 n_constants = 0;
4105 for (i = 0; i < n_ops; i++)
4107 rtx this_op = ops[i].op;
4108 int this_neg = ops[i].neg;
4109 enum rtx_code this_code = GET_CODE (this_op);
4111 switch (this_code)
4113 case PLUS:
4114 case MINUS:
4115 if (n_ops == ARRAY_SIZE (ops))
4116 return NULL_RTX;
4118 ops[n_ops].op = XEXP (this_op, 1);
4119 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4120 n_ops++;
4122 ops[i].op = XEXP (this_op, 0);
4123 changed = 1;
4124 /* If this operand was negated then we will potentially
4125 canonicalize the expression. Similarly if we don't
4126 place the operands adjacent we're re-ordering the
4127 expression and thus might be performing a
4128 canonicalization. Ignore register re-ordering.
4129 ??? It might be better to shuffle the ops array here,
4130 but then (plus (plus (A, B), plus (C, D))) wouldn't
4131 be seen as non-canonical. */
4132 if (this_neg
4133 || (i != n_ops - 2
4134 && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
4135 canonicalized = 1;
4136 break;
4138 case NEG:
4139 ops[i].op = XEXP (this_op, 0);
4140 ops[i].neg = ! this_neg;
4141 changed = 1;
4142 canonicalized = 1;
4143 break;
4145 case CONST:
4146 if (n_ops != ARRAY_SIZE (ops)
4147 && GET_CODE (XEXP (this_op, 0)) == PLUS
4148 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4149 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4151 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4152 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4153 ops[n_ops].neg = this_neg;
4154 n_ops++;
4155 changed = 1;
4156 canonicalized = 1;
4158 break;
4160 case NOT:
4161 /* ~a -> (-a - 1) */
4162 if (n_ops != ARRAY_SIZE (ops))
4164 ops[n_ops].op = CONSTM1_RTX (mode);
4165 ops[n_ops++].neg = this_neg;
4166 ops[i].op = XEXP (this_op, 0);
4167 ops[i].neg = !this_neg;
4168 changed = 1;
4169 canonicalized = 1;
4171 break;
4173 case CONST_INT:
4174 n_constants++;
4175 if (this_neg)
4177 ops[i].op = neg_const_int (mode, this_op);
4178 ops[i].neg = 0;
4179 changed = 1;
4180 canonicalized = 1;
4182 break;
4184 default:
4185 break;
4189 while (changed);
4191 if (n_constants > 1)
4192 canonicalized = 1;
4194 gcc_assert (n_ops >= 2);
4196 /* If we only have two operands, we can avoid the loops. */
4197 if (n_ops == 2)
4199 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4200 rtx lhs, rhs;
4202 /* Get the two operands. Be careful with the order, especially for
4203 the cases where code == MINUS. */
4204 if (ops[0].neg && ops[1].neg)
4206 lhs = gen_rtx_NEG (mode, ops[0].op);
4207 rhs = ops[1].op;
4209 else if (ops[0].neg)
4211 lhs = ops[1].op;
4212 rhs = ops[0].op;
4214 else
4216 lhs = ops[0].op;
4217 rhs = ops[1].op;
4220 return simplify_const_binary_operation (code, mode, lhs, rhs);
4223 /* Now simplify each pair of operands until nothing changes. */
4224 while (1)
4226 /* Insertion sort is good enough for a small array. */
4227 for (i = 1; i < n_ops; i++)
4229 struct simplify_plus_minus_op_data save;
4230 int cmp;
4232 j = i - 1;
4233 cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
4234 if (cmp <= 0)
4235 continue;
4236 /* Just swapping registers doesn't count as canonicalization. */
4237 if (cmp != 1)
4238 canonicalized = 1;
4240 save = ops[i];
4242 ops[j + 1] = ops[j];
4243 while (j--
4244 && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
4245 ops[j + 1] = save;
4248 changed = 0;
4249 for (i = n_ops - 1; i > 0; i--)
4250 for (j = i - 1; j >= 0; j--)
4252 rtx lhs = ops[j].op, rhs = ops[i].op;
4253 int lneg = ops[j].neg, rneg = ops[i].neg;
4255 if (lhs != 0 && rhs != 0)
4257 enum rtx_code ncode = PLUS;
4259 if (lneg != rneg)
4261 ncode = MINUS;
4262 if (lneg)
4263 std::swap (lhs, rhs);
4265 else if (swap_commutative_operands_p (lhs, rhs))
4266 std::swap (lhs, rhs);
4268 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4269 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4271 rtx tem_lhs, tem_rhs;
4273 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4274 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4275 tem = simplify_binary_operation (ncode, mode, tem_lhs,
4276 tem_rhs);
4278 if (tem && !CONSTANT_P (tem))
4279 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4281 else
4282 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4284 if (tem)
4286 /* Reject "simplifications" that just wrap the two
4287 arguments in a CONST. Failure to do so can result
4288 in infinite recursion with simplify_binary_operation
4289 when it calls us to simplify CONST operations.
4290 Also, if we find such a simplification, don't try
4291 any more combinations with this rhs: We must have
4292 something like symbol+offset, ie. one of the
4293 trivial CONST expressions we handle later. */
4294 if (GET_CODE (tem) == CONST
4295 && GET_CODE (XEXP (tem, 0)) == ncode
4296 && XEXP (XEXP (tem, 0), 0) == lhs
4297 && XEXP (XEXP (tem, 0), 1) == rhs)
4298 break;
4299 lneg &= rneg;
4300 if (GET_CODE (tem) == NEG)
4301 tem = XEXP (tem, 0), lneg = !lneg;
4302 if (CONST_INT_P (tem) && lneg)
4303 tem = neg_const_int (mode, tem), lneg = 0;
4305 ops[i].op = tem;
4306 ops[i].neg = lneg;
4307 ops[j].op = NULL_RTX;
4308 changed = 1;
4309 canonicalized = 1;
4314 if (!changed)
4315 break;
4317 /* Pack all the operands to the lower-numbered entries. */
4318 for (i = 0, j = 0; j < n_ops; j++)
4319 if (ops[j].op)
4321 ops[i] = ops[j];
4322 i++;
4324 n_ops = i;
4327 /* If nothing changed, fail. */
4328 if (!canonicalized)
4329 return NULL_RTX;
4331 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4332 if (n_ops == 2
4333 && CONST_INT_P (ops[1].op)
4334 && CONSTANT_P (ops[0].op)
4335 && ops[0].neg)
4336 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4338 /* We suppressed creation of trivial CONST expressions in the
4339 combination loop to avoid recursion. Create one manually now.
4340 The combination loop should have ensured that there is exactly
4341 one CONST_INT, and the sort will have ensured that it is last
4342 in the array and that any other constant will be next-to-last. */
4344 if (n_ops > 1
4345 && CONST_INT_P (ops[n_ops - 1].op)
4346 && CONSTANT_P (ops[n_ops - 2].op))
4348 rtx value = ops[n_ops - 1].op;
4349 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4350 value = neg_const_int (mode, value);
4351 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4352 INTVAL (value));
4353 n_ops--;
4356 /* Put a non-negated operand first, if possible. */
4358 for (i = 0; i < n_ops && ops[i].neg; i++)
4359 continue;
4360 if (i == n_ops)
4361 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4362 else if (i != 0)
4364 tem = ops[0].op;
4365 ops[0] = ops[i];
4366 ops[i].op = tem;
4367 ops[i].neg = 1;
4370 /* Now make the result by performing the requested operations. */
4371 result = ops[0].op;
4372 for (i = 1; i < n_ops; i++)
4373 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4374 mode, result, ops[i].op);
4376 return result;
4379 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4380 static bool
4381 plus_minus_operand_p (const_rtx x)
4383 return GET_CODE (x) == PLUS
4384 || GET_CODE (x) == MINUS
4385 || (GET_CODE (x) == CONST
4386 && GET_CODE (XEXP (x, 0)) == PLUS
4387 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4388 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4391 /* Like simplify_binary_operation except used for relational operators.
4392 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4393 not also be VOIDmode.
4395 CMP_MODE specifies in which mode the comparison is done in, so it is
4396 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4397 the operands or, if both are VOIDmode, the operands are compared in
4398 "infinite precision". */
4400 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4401 machine_mode cmp_mode, rtx op0, rtx op1)
4403 rtx tem, trueop0, trueop1;
4405 if (cmp_mode == VOIDmode)
4406 cmp_mode = GET_MODE (op0);
4407 if (cmp_mode == VOIDmode)
4408 cmp_mode = GET_MODE (op1);
4410 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4411 if (tem)
4413 if (SCALAR_FLOAT_MODE_P (mode))
4415 if (tem == const0_rtx)
4416 return CONST0_RTX (mode);
4417 #ifdef FLOAT_STORE_FLAG_VALUE
4419 REAL_VALUE_TYPE val;
4420 val = FLOAT_STORE_FLAG_VALUE (mode);
4421 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4423 #else
4424 return NULL_RTX;
4425 #endif
4427 if (VECTOR_MODE_P (mode))
4429 if (tem == const0_rtx)
4430 return CONST0_RTX (mode);
4431 #ifdef VECTOR_STORE_FLAG_VALUE
4433 int i, units;
4434 rtvec v;
4436 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4437 if (val == NULL_RTX)
4438 return NULL_RTX;
4439 if (val == const1_rtx)
4440 return CONST1_RTX (mode);
4442 units = GET_MODE_NUNITS (mode);
4443 v = rtvec_alloc (units);
4444 for (i = 0; i < units; i++)
4445 RTVEC_ELT (v, i) = val;
4446 return gen_rtx_raw_CONST_VECTOR (mode, v);
4448 #else
4449 return NULL_RTX;
4450 #endif
4453 return tem;
4456 /* For the following tests, ensure const0_rtx is op1. */
4457 if (swap_commutative_operands_p (op0, op1)
4458 || (op0 == const0_rtx && op1 != const0_rtx))
4459 std::swap (op0, op1), code = swap_condition (code);
4461 /* If op0 is a compare, extract the comparison arguments from it. */
4462 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4463 return simplify_gen_relational (code, mode, VOIDmode,
4464 XEXP (op0, 0), XEXP (op0, 1));
4466 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4467 || CC0_P (op0))
4468 return NULL_RTX;
4470 trueop0 = avoid_constant_pool_reference (op0);
4471 trueop1 = avoid_constant_pool_reference (op1);
4472 return simplify_relational_operation_1 (code, mode, cmp_mode,
4473 trueop0, trueop1);
4476 /* This part of simplify_relational_operation is only used when CMP_MODE
4477 is not in class MODE_CC (i.e. it is a real comparison).
4479 MODE is the mode of the result, while CMP_MODE specifies in which
4480 mode the comparison is done in, so it is the mode of the operands. */
4482 static rtx
4483 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4484 machine_mode cmp_mode, rtx op0, rtx op1)
4486 enum rtx_code op0code = GET_CODE (op0);
4488 if (op1 == const0_rtx && COMPARISON_P (op0))
4490 /* If op0 is a comparison, extract the comparison arguments
4491 from it. */
4492 if (code == NE)
4494 if (GET_MODE (op0) == mode)
4495 return simplify_rtx (op0);
4496 else
4497 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4498 XEXP (op0, 0), XEXP (op0, 1));
4500 else if (code == EQ)
4502 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4503 if (new_code != UNKNOWN)
4504 return simplify_gen_relational (new_code, mode, VOIDmode,
4505 XEXP (op0, 0), XEXP (op0, 1));
4509 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4510 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4511 if ((code == LTU || code == GEU)
4512 && GET_CODE (op0) == PLUS
4513 && CONST_INT_P (XEXP (op0, 1))
4514 && (rtx_equal_p (op1, XEXP (op0, 0))
4515 || rtx_equal_p (op1, XEXP (op0, 1)))
4516 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4517 && XEXP (op0, 1) != const0_rtx)
4519 rtx new_cmp
4520 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4521 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4522 cmp_mode, XEXP (op0, 0), new_cmp);
4525 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4526 if ((code == LTU || code == GEU)
4527 && GET_CODE (op0) == PLUS
4528 && rtx_equal_p (op1, XEXP (op0, 1))
4529 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4530 && !rtx_equal_p (op1, XEXP (op0, 0)))
4531 return simplify_gen_relational (code, mode, cmp_mode, op0,
4532 copy_rtx (XEXP (op0, 0)));
4534 if (op1 == const0_rtx)
4536 /* Canonicalize (GTU x 0) as (NE x 0). */
4537 if (code == GTU)
4538 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4539 /* Canonicalize (LEU x 0) as (EQ x 0). */
4540 if (code == LEU)
4541 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4543 else if (op1 == const1_rtx)
4545 switch (code)
4547 case GE:
4548 /* Canonicalize (GE x 1) as (GT x 0). */
4549 return simplify_gen_relational (GT, mode, cmp_mode,
4550 op0, const0_rtx);
4551 case GEU:
4552 /* Canonicalize (GEU x 1) as (NE x 0). */
4553 return simplify_gen_relational (NE, mode, cmp_mode,
4554 op0, const0_rtx);
4555 case LT:
4556 /* Canonicalize (LT x 1) as (LE x 0). */
4557 return simplify_gen_relational (LE, mode, cmp_mode,
4558 op0, const0_rtx);
4559 case LTU:
4560 /* Canonicalize (LTU x 1) as (EQ x 0). */
4561 return simplify_gen_relational (EQ, mode, cmp_mode,
4562 op0, const0_rtx);
4563 default:
4564 break;
4567 else if (op1 == constm1_rtx)
4569 /* Canonicalize (LE x -1) as (LT x 0). */
4570 if (code == LE)
4571 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4572 /* Canonicalize (GT x -1) as (GE x 0). */
4573 if (code == GT)
4574 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4577 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4578 if ((code == EQ || code == NE)
4579 && (op0code == PLUS || op0code == MINUS)
4580 && CONSTANT_P (op1)
4581 && CONSTANT_P (XEXP (op0, 1))
4582 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4584 rtx x = XEXP (op0, 0);
4585 rtx c = XEXP (op0, 1);
4586 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4587 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4589 /* Detect an infinite recursive condition, where we oscillate at this
4590 simplification case between:
4591 A + B == C <---> C - B == A,
4592 where A, B, and C are all constants with non-simplifiable expressions,
4593 usually SYMBOL_REFs. */
4594 if (GET_CODE (tem) == invcode
4595 && CONSTANT_P (x)
4596 && rtx_equal_p (c, XEXP (tem, 1)))
4597 return NULL_RTX;
4599 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4602 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4603 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4604 if (code == NE
4605 && op1 == const0_rtx
4606 && GET_MODE_CLASS (mode) == MODE_INT
4607 && cmp_mode != VOIDmode
4608 /* ??? Work-around BImode bugs in the ia64 backend. */
4609 && mode != BImode
4610 && cmp_mode != BImode
4611 && nonzero_bits (op0, cmp_mode) == 1
4612 && STORE_FLAG_VALUE == 1)
4613 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4614 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4615 : lowpart_subreg (mode, op0, cmp_mode);
4617 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4618 if ((code == EQ || code == NE)
4619 && op1 == const0_rtx
4620 && op0code == XOR)
4621 return simplify_gen_relational (code, mode, cmp_mode,
4622 XEXP (op0, 0), XEXP (op0, 1));
4624 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4625 if ((code == EQ || code == NE)
4626 && op0code == XOR
4627 && rtx_equal_p (XEXP (op0, 0), op1)
4628 && !side_effects_p (XEXP (op0, 0)))
4629 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
4630 CONST0_RTX (mode));
4632 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4633 if ((code == EQ || code == NE)
4634 && op0code == XOR
4635 && rtx_equal_p (XEXP (op0, 1), op1)
4636 && !side_effects_p (XEXP (op0, 1)))
4637 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4638 CONST0_RTX (mode));
4640 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4641 if ((code == EQ || code == NE)
4642 && op0code == XOR
4643 && CONST_SCALAR_INT_P (op1)
4644 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4645 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4646 simplify_gen_binary (XOR, cmp_mode,
4647 XEXP (op0, 1), op1));
4649 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4650 can be implemented with a BICS instruction on some targets, or
4651 constant-folded if y is a constant. */
4652 if ((code == EQ || code == NE)
4653 && op0code == AND
4654 && rtx_equal_p (XEXP (op0, 0), op1)
4655 && !side_effects_p (op1)
4656 && op1 != CONST0_RTX (cmp_mode))
4658 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1), cmp_mode);
4659 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
4661 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4662 CONST0_RTX (cmp_mode));
4665 /* Likewise for (eq/ne (and x y) y). */
4666 if ((code == EQ || code == NE)
4667 && op0code == AND
4668 && rtx_equal_p (XEXP (op0, 1), op1)
4669 && !side_effects_p (op1)
4670 && op1 != CONST0_RTX (cmp_mode))
4672 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0), cmp_mode);
4673 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
4675 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4676 CONST0_RTX (cmp_mode));
4679 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4680 if ((code == EQ || code == NE)
4681 && GET_CODE (op0) == BSWAP
4682 && CONST_SCALAR_INT_P (op1))
4683 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4684 simplify_gen_unary (BSWAP, cmp_mode,
4685 op1, cmp_mode));
4687 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4688 if ((code == EQ || code == NE)
4689 && GET_CODE (op0) == BSWAP
4690 && GET_CODE (op1) == BSWAP)
4691 return simplify_gen_relational (code, mode, cmp_mode,
4692 XEXP (op0, 0), XEXP (op1, 0));
4694 if (op0code == POPCOUNT && op1 == const0_rtx)
4695 switch (code)
4697 case EQ:
4698 case LE:
4699 case LEU:
4700 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4701 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4702 XEXP (op0, 0), const0_rtx);
4704 case NE:
4705 case GT:
4706 case GTU:
4707 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4708 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4709 XEXP (op0, 0), const0_rtx);
4711 default:
4712 break;
4715 return NULL_RTX;
4718 enum
4720 CMP_EQ = 1,
4721 CMP_LT = 2,
4722 CMP_GT = 4,
4723 CMP_LTU = 8,
4724 CMP_GTU = 16
4728 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4729 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4730 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4731 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4732 For floating-point comparisons, assume that the operands were ordered. */
4734 static rtx
4735 comparison_result (enum rtx_code code, int known_results)
4737 switch (code)
4739 case EQ:
4740 case UNEQ:
4741 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4742 case NE:
4743 case LTGT:
4744 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4746 case LT:
4747 case UNLT:
4748 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4749 case GE:
4750 case UNGE:
4751 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4753 case GT:
4754 case UNGT:
4755 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4756 case LE:
4757 case UNLE:
4758 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4760 case LTU:
4761 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4762 case GEU:
4763 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4765 case GTU:
4766 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4767 case LEU:
4768 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4770 case ORDERED:
4771 return const_true_rtx;
4772 case UNORDERED:
4773 return const0_rtx;
4774 default:
4775 gcc_unreachable ();
4779 /* Check if the given comparison (done in the given MODE) is actually
4780 a tautology or a contradiction. If the mode is VOID_mode, the
4781 comparison is done in "infinite precision". If no simplification
4782 is possible, this function returns zero. Otherwise, it returns
4783 either const_true_rtx or const0_rtx. */
4786 simplify_const_relational_operation (enum rtx_code code,
4787 machine_mode mode,
4788 rtx op0, rtx op1)
4790 rtx tem;
4791 rtx trueop0;
4792 rtx trueop1;
4794 gcc_assert (mode != VOIDmode
4795 || (GET_MODE (op0) == VOIDmode
4796 && GET_MODE (op1) == VOIDmode));
4798 /* If op0 is a compare, extract the comparison arguments from it. */
4799 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4801 op1 = XEXP (op0, 1);
4802 op0 = XEXP (op0, 0);
4804 if (GET_MODE (op0) != VOIDmode)
4805 mode = GET_MODE (op0);
4806 else if (GET_MODE (op1) != VOIDmode)
4807 mode = GET_MODE (op1);
4808 else
4809 return 0;
4812 /* We can't simplify MODE_CC values since we don't know what the
4813 actual comparison is. */
4814 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4815 return 0;
4817 /* Make sure the constant is second. */
4818 if (swap_commutative_operands_p (op0, op1))
4820 std::swap (op0, op1);
4821 code = swap_condition (code);
4824 trueop0 = avoid_constant_pool_reference (op0);
4825 trueop1 = avoid_constant_pool_reference (op1);
4827 /* For integer comparisons of A and B maybe we can simplify A - B and can
4828 then simplify a comparison of that with zero. If A and B are both either
4829 a register or a CONST_INT, this can't help; testing for these cases will
4830 prevent infinite recursion here and speed things up.
4832 We can only do this for EQ and NE comparisons as otherwise we may
4833 lose or introduce overflow which we cannot disregard as undefined as
4834 we do not know the signedness of the operation on either the left or
4835 the right hand side of the comparison. */
4837 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4838 && (code == EQ || code == NE)
4839 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4840 && (REG_P (op1) || CONST_INT_P (trueop1)))
4841 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4842 /* We cannot do this if tem is a nonzero address. */
4843 && ! nonzero_address_p (tem))
4844 return simplify_const_relational_operation (signed_condition (code),
4845 mode, tem, const0_rtx);
4847 if (! HONOR_NANS (mode) && code == ORDERED)
4848 return const_true_rtx;
4850 if (! HONOR_NANS (mode) && code == UNORDERED)
4851 return const0_rtx;
4853 /* For modes without NaNs, if the two operands are equal, we know the
4854 result except if they have side-effects. Even with NaNs we know
4855 the result of unordered comparisons and, if signaling NaNs are
4856 irrelevant, also the result of LT/GT/LTGT. */
4857 if ((! HONOR_NANS (trueop0)
4858 || code == UNEQ || code == UNLE || code == UNGE
4859 || ((code == LT || code == GT || code == LTGT)
4860 && ! HONOR_SNANS (trueop0)))
4861 && rtx_equal_p (trueop0, trueop1)
4862 && ! side_effects_p (trueop0))
4863 return comparison_result (code, CMP_EQ);
4865 /* If the operands are floating-point constants, see if we can fold
4866 the result. */
4867 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
4868 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
4869 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4871 REAL_VALUE_TYPE d0, d1;
4873 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4874 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4876 /* Comparisons are unordered iff at least one of the values is NaN. */
4877 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4878 switch (code)
4880 case UNEQ:
4881 case UNLT:
4882 case UNGT:
4883 case UNLE:
4884 case UNGE:
4885 case NE:
4886 case UNORDERED:
4887 return const_true_rtx;
4888 case EQ:
4889 case LT:
4890 case GT:
4891 case LE:
4892 case GE:
4893 case LTGT:
4894 case ORDERED:
4895 return const0_rtx;
4896 default:
4897 return 0;
4900 return comparison_result (code,
4901 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4902 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4905 /* Otherwise, see if the operands are both integers. */
4906 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4907 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
4909 /* It would be nice if we really had a mode here. However, the
4910 largest int representable on the target is as good as
4911 infinite. */
4912 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
4913 rtx_mode_t ptrueop0 = std::make_pair (trueop0, cmode);
4914 rtx_mode_t ptrueop1 = std::make_pair (trueop1, cmode);
4916 if (wi::eq_p (ptrueop0, ptrueop1))
4917 return comparison_result (code, CMP_EQ);
4918 else
4920 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
4921 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
4922 return comparison_result (code, cr);
4926 /* Optimize comparisons with upper and lower bounds. */
4927 if (HWI_COMPUTABLE_MODE_P (mode)
4928 && CONST_INT_P (trueop1)
4929 && !side_effects_p (trueop0))
4931 int sign;
4932 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4933 HOST_WIDE_INT val = INTVAL (trueop1);
4934 HOST_WIDE_INT mmin, mmax;
4936 if (code == GEU
4937 || code == LEU
4938 || code == GTU
4939 || code == LTU)
4940 sign = 0;
4941 else
4942 sign = 1;
4944 /* Get a reduced range if the sign bit is zero. */
4945 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4947 mmin = 0;
4948 mmax = nonzero;
4950 else
4952 rtx mmin_rtx, mmax_rtx;
4953 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4955 mmin = INTVAL (mmin_rtx);
4956 mmax = INTVAL (mmax_rtx);
4957 if (sign)
4959 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4961 mmin >>= (sign_copies - 1);
4962 mmax >>= (sign_copies - 1);
4966 switch (code)
4968 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4969 case GEU:
4970 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4971 return const_true_rtx;
4972 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4973 return const0_rtx;
4974 break;
4975 case GE:
4976 if (val <= mmin)
4977 return const_true_rtx;
4978 if (val > mmax)
4979 return const0_rtx;
4980 break;
4982 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4983 case LEU:
4984 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4985 return const_true_rtx;
4986 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4987 return const0_rtx;
4988 break;
4989 case LE:
4990 if (val >= mmax)
4991 return const_true_rtx;
4992 if (val < mmin)
4993 return const0_rtx;
4994 break;
4996 case EQ:
4997 /* x == y is always false for y out of range. */
4998 if (val < mmin || val > mmax)
4999 return const0_rtx;
5000 break;
5002 /* x > y is always false for y >= mmax, always true for y < mmin. */
5003 case GTU:
5004 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5005 return const0_rtx;
5006 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5007 return const_true_rtx;
5008 break;
5009 case GT:
5010 if (val >= mmax)
5011 return const0_rtx;
5012 if (val < mmin)
5013 return const_true_rtx;
5014 break;
5016 /* x < y is always false for y <= mmin, always true for y > mmax. */
5017 case LTU:
5018 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5019 return const0_rtx;
5020 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5021 return const_true_rtx;
5022 break;
5023 case LT:
5024 if (val <= mmin)
5025 return const0_rtx;
5026 if (val > mmax)
5027 return const_true_rtx;
5028 break;
5030 case NE:
5031 /* x != y is always true for y out of range. */
5032 if (val < mmin || val > mmax)
5033 return const_true_rtx;
5034 break;
5036 default:
5037 break;
5041 /* Optimize integer comparisons with zero. */
5042 if (trueop1 == const0_rtx && !side_effects_p (trueop0))
5044 /* Some addresses are known to be nonzero. We don't know
5045 their sign, but equality comparisons are known. */
5046 if (nonzero_address_p (trueop0))
5048 if (code == EQ || code == LEU)
5049 return const0_rtx;
5050 if (code == NE || code == GTU)
5051 return const_true_rtx;
5054 /* See if the first operand is an IOR with a constant. If so, we
5055 may be able to determine the result of this comparison. */
5056 if (GET_CODE (op0) == IOR)
5058 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5059 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5061 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
5062 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5063 && (UINTVAL (inner_const)
5064 & ((unsigned HOST_WIDE_INT) 1
5065 << sign_bitnum)));
5067 switch (code)
5069 case EQ:
5070 case LEU:
5071 return const0_rtx;
5072 case NE:
5073 case GTU:
5074 return const_true_rtx;
5075 case LT:
5076 case LE:
5077 if (has_sign)
5078 return const_true_rtx;
5079 break;
5080 case GT:
5081 case GE:
5082 if (has_sign)
5083 return const0_rtx;
5084 break;
5085 default:
5086 break;
5092 /* Optimize comparison of ABS with zero. */
5093 if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
5094 && (GET_CODE (trueop0) == ABS
5095 || (GET_CODE (trueop0) == FLOAT_EXTEND
5096 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5098 switch (code)
5100 case LT:
5101 /* Optimize abs(x) < 0.0. */
5102 if (!HONOR_SNANS (mode)
5103 && (!INTEGRAL_MODE_P (mode)
5104 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5106 if (INTEGRAL_MODE_P (mode)
5107 && (issue_strict_overflow_warning
5108 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5109 warning (OPT_Wstrict_overflow,
5110 ("assuming signed overflow does not occur when "
5111 "assuming abs (x) < 0 is false"));
5112 return const0_rtx;
5114 break;
5116 case GE:
5117 /* Optimize abs(x) >= 0.0. */
5118 if (!HONOR_NANS (mode)
5119 && (!INTEGRAL_MODE_P (mode)
5120 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5122 if (INTEGRAL_MODE_P (mode)
5123 && (issue_strict_overflow_warning
5124 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5125 warning (OPT_Wstrict_overflow,
5126 ("assuming signed overflow does not occur when "
5127 "assuming abs (x) >= 0 is true"));
5128 return const_true_rtx;
5130 break;
5132 case UNGE:
5133 /* Optimize ! (abs(x) < 0.0). */
5134 return const_true_rtx;
5136 default:
5137 break;
5141 return 0;
5144 /* Simplify CODE, an operation with result mode MODE and three operands,
5145 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5146 a constant. Return 0 if no simplifications is possible. */
5149 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5150 machine_mode op0_mode, rtx op0, rtx op1,
5151 rtx op2)
5153 unsigned int width = GET_MODE_PRECISION (mode);
5154 bool any_change = false;
5155 rtx tem, trueop2;
5157 /* VOIDmode means "infinite" precision. */
5158 if (width == 0)
5159 width = HOST_BITS_PER_WIDE_INT;
5161 switch (code)
5163 case FMA:
5164 /* Simplify negations around the multiplication. */
5165 /* -a * -b + c => a * b + c. */
5166 if (GET_CODE (op0) == NEG)
5168 tem = simplify_unary_operation (NEG, mode, op1, mode);
5169 if (tem)
5170 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5172 else if (GET_CODE (op1) == NEG)
5174 tem = simplify_unary_operation (NEG, mode, op0, mode);
5175 if (tem)
5176 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5179 /* Canonicalize the two multiplication operands. */
5180 /* a * -b + c => -b * a + c. */
5181 if (swap_commutative_operands_p (op0, op1))
5182 std::swap (op0, op1), any_change = true;
5184 if (any_change)
5185 return gen_rtx_FMA (mode, op0, op1, op2);
5186 return NULL_RTX;
5188 case SIGN_EXTRACT:
5189 case ZERO_EXTRACT:
5190 if (CONST_INT_P (op0)
5191 && CONST_INT_P (op1)
5192 && CONST_INT_P (op2)
5193 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5194 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5196 /* Extracting a bit-field from a constant */
5197 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5198 HOST_WIDE_INT op1val = INTVAL (op1);
5199 HOST_WIDE_INT op2val = INTVAL (op2);
5200 if (BITS_BIG_ENDIAN)
5201 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5202 else
5203 val >>= op2val;
5205 if (HOST_BITS_PER_WIDE_INT != op1val)
5207 /* First zero-extend. */
5208 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
5209 /* If desired, propagate sign bit. */
5210 if (code == SIGN_EXTRACT
5211 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
5212 != 0)
5213 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5216 return gen_int_mode (val, mode);
5218 break;
5220 case IF_THEN_ELSE:
5221 if (CONST_INT_P (op0))
5222 return op0 != const0_rtx ? op1 : op2;
5224 /* Convert c ? a : a into "a". */
5225 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5226 return op1;
5228 /* Convert a != b ? a : b into "a". */
5229 if (GET_CODE (op0) == NE
5230 && ! side_effects_p (op0)
5231 && ! HONOR_NANS (mode)
5232 && ! HONOR_SIGNED_ZEROS (mode)
5233 && ((rtx_equal_p (XEXP (op0, 0), op1)
5234 && rtx_equal_p (XEXP (op0, 1), op2))
5235 || (rtx_equal_p (XEXP (op0, 0), op2)
5236 && rtx_equal_p (XEXP (op0, 1), op1))))
5237 return op1;
5239 /* Convert a == b ? a : b into "b". */
5240 if (GET_CODE (op0) == EQ
5241 && ! side_effects_p (op0)
5242 && ! HONOR_NANS (mode)
5243 && ! HONOR_SIGNED_ZEROS (mode)
5244 && ((rtx_equal_p (XEXP (op0, 0), op1)
5245 && rtx_equal_p (XEXP (op0, 1), op2))
5246 || (rtx_equal_p (XEXP (op0, 0), op2)
5247 && rtx_equal_p (XEXP (op0, 1), op1))))
5248 return op2;
5250 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5252 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5253 ? GET_MODE (XEXP (op0, 1))
5254 : GET_MODE (XEXP (op0, 0)));
5255 rtx temp;
5257 /* Look for happy constants in op1 and op2. */
5258 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5260 HOST_WIDE_INT t = INTVAL (op1);
5261 HOST_WIDE_INT f = INTVAL (op2);
5263 if (t == STORE_FLAG_VALUE && f == 0)
5264 code = GET_CODE (op0);
5265 else if (t == 0 && f == STORE_FLAG_VALUE)
5267 enum rtx_code tmp;
5268 tmp = reversed_comparison_code (op0, NULL_RTX);
5269 if (tmp == UNKNOWN)
5270 break;
5271 code = tmp;
5273 else
5274 break;
5276 return simplify_gen_relational (code, mode, cmp_mode,
5277 XEXP (op0, 0), XEXP (op0, 1));
5280 if (cmp_mode == VOIDmode)
5281 cmp_mode = op0_mode;
5282 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5283 cmp_mode, XEXP (op0, 0),
5284 XEXP (op0, 1));
5286 /* See if any simplifications were possible. */
5287 if (temp)
5289 if (CONST_INT_P (temp))
5290 return temp == const0_rtx ? op2 : op1;
5291 else if (temp)
5292 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5295 break;
5297 case VEC_MERGE:
5298 gcc_assert (GET_MODE (op0) == mode);
5299 gcc_assert (GET_MODE (op1) == mode);
5300 gcc_assert (VECTOR_MODE_P (mode));
5301 trueop2 = avoid_constant_pool_reference (op2);
5302 if (CONST_INT_P (trueop2))
5304 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5305 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5306 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5307 unsigned HOST_WIDE_INT mask;
5308 if (n_elts == HOST_BITS_PER_WIDE_INT)
5309 mask = -1;
5310 else
5311 mask = ((unsigned HOST_WIDE_INT) 1 << n_elts) - 1;
5313 if (!(sel & mask) && !side_effects_p (op0))
5314 return op1;
5315 if ((sel & mask) == mask && !side_effects_p (op1))
5316 return op0;
5318 rtx trueop0 = avoid_constant_pool_reference (op0);
5319 rtx trueop1 = avoid_constant_pool_reference (op1);
5320 if (GET_CODE (trueop0) == CONST_VECTOR
5321 && GET_CODE (trueop1) == CONST_VECTOR)
5323 rtvec v = rtvec_alloc (n_elts);
5324 unsigned int i;
5326 for (i = 0; i < n_elts; i++)
5327 RTVEC_ELT (v, i) = ((sel & ((unsigned HOST_WIDE_INT) 1 << i))
5328 ? CONST_VECTOR_ELT (trueop0, i)
5329 : CONST_VECTOR_ELT (trueop1, i));
5330 return gen_rtx_CONST_VECTOR (mode, v);
5333 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5334 if no element from a appears in the result. */
5335 if (GET_CODE (op0) == VEC_MERGE)
5337 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5338 if (CONST_INT_P (tem))
5340 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5341 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5342 return simplify_gen_ternary (code, mode, mode,
5343 XEXP (op0, 1), op1, op2);
5344 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5345 return simplify_gen_ternary (code, mode, mode,
5346 XEXP (op0, 0), op1, op2);
5349 if (GET_CODE (op1) == VEC_MERGE)
5351 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5352 if (CONST_INT_P (tem))
5354 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5355 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5356 return simplify_gen_ternary (code, mode, mode,
5357 op0, XEXP (op1, 1), op2);
5358 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5359 return simplify_gen_ternary (code, mode, mode,
5360 op0, XEXP (op1, 0), op2);
5364 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5365 with a. */
5366 if (GET_CODE (op0) == VEC_DUPLICATE
5367 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5368 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5369 && mode_nunits[GET_MODE (XEXP (op0, 0))] == 1)
5371 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5372 if (CONST_INT_P (tem) && CONST_INT_P (op2))
5374 if (XEXP (XEXP (op0, 0), 0) == op1
5375 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5376 return op1;
5381 if (rtx_equal_p (op0, op1)
5382 && !side_effects_p (op2) && !side_effects_p (op1))
5383 return op0;
5385 break;
5387 default:
5388 gcc_unreachable ();
5391 return 0;
5394 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5395 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5396 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5398 Works by unpacking OP into a collection of 8-bit values
5399 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5400 and then repacking them again for OUTERMODE. */
5402 static rtx
5403 simplify_immed_subreg (machine_mode outermode, rtx op,
5404 machine_mode innermode, unsigned int byte)
5406 enum {
5407 value_bit = 8,
5408 value_mask = (1 << value_bit) - 1
5410 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
5411 int value_start;
5412 int i;
5413 int elem;
5415 int num_elem;
5416 rtx * elems;
5417 int elem_bitsize;
5418 rtx result_s;
5419 rtvec result_v = NULL;
5420 enum mode_class outer_class;
5421 machine_mode outer_submode;
5422 int max_bitsize;
5424 /* Some ports misuse CCmode. */
5425 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5426 return op;
5428 /* We have no way to represent a complex constant at the rtl level. */
5429 if (COMPLEX_MODE_P (outermode))
5430 return NULL_RTX;
5432 /* We support any size mode. */
5433 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5434 GET_MODE_BITSIZE (innermode));
5436 /* Unpack the value. */
5438 if (GET_CODE (op) == CONST_VECTOR)
5440 num_elem = CONST_VECTOR_NUNITS (op);
5441 elems = &CONST_VECTOR_ELT (op, 0);
5442 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5444 else
5446 num_elem = 1;
5447 elems = &op;
5448 elem_bitsize = max_bitsize;
5450 /* If this asserts, it is too complicated; reducing value_bit may help. */
5451 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5452 /* I don't know how to handle endianness of sub-units. */
5453 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5455 for (elem = 0; elem < num_elem; elem++)
5457 unsigned char * vp;
5458 rtx el = elems[elem];
5460 /* Vectors are kept in target memory order. (This is probably
5461 a mistake.) */
5463 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5464 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5465 / BITS_PER_UNIT);
5466 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5467 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5468 unsigned bytele = (subword_byte % UNITS_PER_WORD
5469 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5470 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5473 switch (GET_CODE (el))
5475 case CONST_INT:
5476 for (i = 0;
5477 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5478 i += value_bit)
5479 *vp++ = INTVAL (el) >> i;
5480 /* CONST_INTs are always logically sign-extended. */
5481 for (; i < elem_bitsize; i += value_bit)
5482 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5483 break;
5485 case CONST_WIDE_INT:
5487 rtx_mode_t val = std::make_pair (el, innermode);
5488 unsigned char extend = wi::sign_mask (val);
5490 for (i = 0; i < elem_bitsize; i += value_bit)
5491 *vp++ = wi::extract_uhwi (val, i, value_bit);
5492 for (; i < elem_bitsize; i += value_bit)
5493 *vp++ = extend;
5495 break;
5497 case CONST_DOUBLE:
5498 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
5500 unsigned char extend = 0;
5501 /* If this triggers, someone should have generated a
5502 CONST_INT instead. */
5503 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5505 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5506 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5507 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5509 *vp++
5510 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5511 i += value_bit;
5514 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5515 extend = -1;
5516 for (; i < elem_bitsize; i += value_bit)
5517 *vp++ = extend;
5519 else
5521 /* This is big enough for anything on the platform. */
5522 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5523 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5525 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5526 gcc_assert (bitsize <= elem_bitsize);
5527 gcc_assert (bitsize % value_bit == 0);
5529 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5530 GET_MODE (el));
5532 /* real_to_target produces its result in words affected by
5533 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5534 and use WORDS_BIG_ENDIAN instead; see the documentation
5535 of SUBREG in rtl.texi. */
5536 for (i = 0; i < bitsize; i += value_bit)
5538 int ibase;
5539 if (WORDS_BIG_ENDIAN)
5540 ibase = bitsize - 1 - i;
5541 else
5542 ibase = i;
5543 *vp++ = tmp[ibase / 32] >> i % 32;
5546 /* It shouldn't matter what's done here, so fill it with
5547 zero. */
5548 for (; i < elem_bitsize; i += value_bit)
5549 *vp++ = 0;
5551 break;
5553 case CONST_FIXED:
5554 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5556 for (i = 0; i < elem_bitsize; i += value_bit)
5557 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5559 else
5561 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5562 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5563 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5564 i += value_bit)
5565 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5566 >> (i - HOST_BITS_PER_WIDE_INT);
5567 for (; i < elem_bitsize; i += value_bit)
5568 *vp++ = 0;
5570 break;
5572 default:
5573 gcc_unreachable ();
5577 /* Now, pick the right byte to start with. */
5578 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5579 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5580 will already have offset 0. */
5581 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5583 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5584 - byte);
5585 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5586 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5587 byte = (subword_byte % UNITS_PER_WORD
5588 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5591 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5592 so if it's become negative it will instead be very large.) */
5593 gcc_assert (byte < GET_MODE_SIZE (innermode));
5595 /* Convert from bytes to chunks of size value_bit. */
5596 value_start = byte * (BITS_PER_UNIT / value_bit);
5598 /* Re-pack the value. */
5600 if (VECTOR_MODE_P (outermode))
5602 num_elem = GET_MODE_NUNITS (outermode);
5603 result_v = rtvec_alloc (num_elem);
5604 elems = &RTVEC_ELT (result_v, 0);
5605 outer_submode = GET_MODE_INNER (outermode);
5607 else
5609 num_elem = 1;
5610 elems = &result_s;
5611 outer_submode = outermode;
5614 outer_class = GET_MODE_CLASS (outer_submode);
5615 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5617 gcc_assert (elem_bitsize % value_bit == 0);
5618 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5620 for (elem = 0; elem < num_elem; elem++)
5622 unsigned char *vp;
5624 /* Vectors are stored in target memory order. (This is probably
5625 a mistake.) */
5627 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5628 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5629 / BITS_PER_UNIT);
5630 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5631 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5632 unsigned bytele = (subword_byte % UNITS_PER_WORD
5633 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5634 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5637 switch (outer_class)
5639 case MODE_INT:
5640 case MODE_PARTIAL_INT:
5642 int u;
5643 int base = 0;
5644 int units
5645 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
5646 / HOST_BITS_PER_WIDE_INT;
5647 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
5648 wide_int r;
5650 if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
5651 return NULL_RTX;
5652 for (u = 0; u < units; u++)
5654 unsigned HOST_WIDE_INT buf = 0;
5655 for (i = 0;
5656 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
5657 i += value_bit)
5658 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5660 tmp[u] = buf;
5661 base += HOST_BITS_PER_WIDE_INT;
5663 r = wide_int::from_array (tmp, units,
5664 GET_MODE_PRECISION (outer_submode));
5665 #if TARGET_SUPPORTS_WIDE_INT == 0
5666 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5667 if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
5668 return NULL_RTX;
5669 #endif
5670 elems[elem] = immed_wide_int_const (r, outer_submode);
5672 break;
5674 case MODE_FLOAT:
5675 case MODE_DECIMAL_FLOAT:
5677 REAL_VALUE_TYPE r;
5678 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5680 /* real_from_target wants its input in words affected by
5681 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5682 and use WORDS_BIG_ENDIAN instead; see the documentation
5683 of SUBREG in rtl.texi. */
5684 for (i = 0; i < max_bitsize / 32; i++)
5685 tmp[i] = 0;
5686 for (i = 0; i < elem_bitsize; i += value_bit)
5688 int ibase;
5689 if (WORDS_BIG_ENDIAN)
5690 ibase = elem_bitsize - 1 - i;
5691 else
5692 ibase = i;
5693 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5696 real_from_target (&r, tmp, outer_submode);
5697 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5699 break;
5701 case MODE_FRACT:
5702 case MODE_UFRACT:
5703 case MODE_ACCUM:
5704 case MODE_UACCUM:
5706 FIXED_VALUE_TYPE f;
5707 f.data.low = 0;
5708 f.data.high = 0;
5709 f.mode = outer_submode;
5711 for (i = 0;
5712 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5713 i += value_bit)
5714 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5715 for (; i < elem_bitsize; i += value_bit)
5716 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5717 << (i - HOST_BITS_PER_WIDE_INT));
5719 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5721 break;
5723 default:
5724 gcc_unreachable ();
5727 if (VECTOR_MODE_P (outermode))
5728 return gen_rtx_CONST_VECTOR (outermode, result_v);
5729 else
5730 return result_s;
5733 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5734 Return 0 if no simplifications are possible. */
5736 simplify_subreg (machine_mode outermode, rtx op,
5737 machine_mode innermode, unsigned int byte)
5739 /* Little bit of sanity checking. */
5740 gcc_assert (innermode != VOIDmode);
5741 gcc_assert (outermode != VOIDmode);
5742 gcc_assert (innermode != BLKmode);
5743 gcc_assert (outermode != BLKmode);
5745 gcc_assert (GET_MODE (op) == innermode
5746 || GET_MODE (op) == VOIDmode);
5748 if ((byte % GET_MODE_SIZE (outermode)) != 0)
5749 return NULL_RTX;
5751 if (byte >= GET_MODE_SIZE (innermode))
5752 return NULL_RTX;
5754 if (outermode == innermode && !byte)
5755 return op;
5757 if (CONST_SCALAR_INT_P (op)
5758 || CONST_DOUBLE_AS_FLOAT_P (op)
5759 || GET_CODE (op) == CONST_FIXED
5760 || GET_CODE (op) == CONST_VECTOR)
5761 return simplify_immed_subreg (outermode, op, innermode, byte);
5763 /* Changing mode twice with SUBREG => just change it once,
5764 or not at all if changing back op starting mode. */
5765 if (GET_CODE (op) == SUBREG)
5767 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5768 int final_offset = byte + SUBREG_BYTE (op);
5769 rtx newx;
5771 if (outermode == innermostmode
5772 && byte == 0 && SUBREG_BYTE (op) == 0)
5773 return SUBREG_REG (op);
5775 /* The SUBREG_BYTE represents offset, as if the value were stored
5776 in memory. Irritating exception is paradoxical subreg, where
5777 we define SUBREG_BYTE to be 0. On big endian machines, this
5778 value should be negative. For a moment, undo this exception. */
5779 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5781 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5782 if (WORDS_BIG_ENDIAN)
5783 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5784 if (BYTES_BIG_ENDIAN)
5785 final_offset += difference % UNITS_PER_WORD;
5787 if (SUBREG_BYTE (op) == 0
5788 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5790 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5791 if (WORDS_BIG_ENDIAN)
5792 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5793 if (BYTES_BIG_ENDIAN)
5794 final_offset += difference % UNITS_PER_WORD;
5797 /* See whether resulting subreg will be paradoxical. */
5798 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5800 /* In nonparadoxical subregs we can't handle negative offsets. */
5801 if (final_offset < 0)
5802 return NULL_RTX;
5803 /* Bail out in case resulting subreg would be incorrect. */
5804 if (final_offset % GET_MODE_SIZE (outermode)
5805 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5806 return NULL_RTX;
5808 else
5810 int offset = 0;
5811 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5813 /* In paradoxical subreg, see if we are still looking on lower part.
5814 If so, our SUBREG_BYTE will be 0. */
5815 if (WORDS_BIG_ENDIAN)
5816 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5817 if (BYTES_BIG_ENDIAN)
5818 offset += difference % UNITS_PER_WORD;
5819 if (offset == final_offset)
5820 final_offset = 0;
5821 else
5822 return NULL_RTX;
5825 /* Recurse for further possible simplifications. */
5826 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5827 final_offset);
5828 if (newx)
5829 return newx;
5830 if (validate_subreg (outermode, innermostmode,
5831 SUBREG_REG (op), final_offset))
5833 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5834 if (SUBREG_PROMOTED_VAR_P (op)
5835 && SUBREG_PROMOTED_SIGN (op) >= 0
5836 && GET_MODE_CLASS (outermode) == MODE_INT
5837 && IN_RANGE (GET_MODE_SIZE (outermode),
5838 GET_MODE_SIZE (innermode),
5839 GET_MODE_SIZE (innermostmode))
5840 && subreg_lowpart_p (newx))
5842 SUBREG_PROMOTED_VAR_P (newx) = 1;
5843 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
5845 return newx;
5847 return NULL_RTX;
5850 /* SUBREG of a hard register => just change the register number
5851 and/or mode. If the hard register is not valid in that mode,
5852 suppress this simplification. If the hard register is the stack,
5853 frame, or argument pointer, leave this as a SUBREG. */
5855 if (REG_P (op) && HARD_REGISTER_P (op))
5857 unsigned int regno, final_regno;
5859 regno = REGNO (op);
5860 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5861 if (HARD_REGISTER_NUM_P (final_regno))
5863 rtx x;
5864 int final_offset = byte;
5866 /* Adjust offset for paradoxical subregs. */
5867 if (byte == 0
5868 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5870 int difference = (GET_MODE_SIZE (innermode)
5871 - GET_MODE_SIZE (outermode));
5872 if (WORDS_BIG_ENDIAN)
5873 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5874 if (BYTES_BIG_ENDIAN)
5875 final_offset += difference % UNITS_PER_WORD;
5878 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5880 /* Propagate original regno. We don't have any way to specify
5881 the offset inside original regno, so do so only for lowpart.
5882 The information is used only by alias analysis that can not
5883 grog partial register anyway. */
5885 if (subreg_lowpart_offset (outermode, innermode) == byte)
5886 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5887 return x;
5891 /* If we have a SUBREG of a register that we are replacing and we are
5892 replacing it with a MEM, make a new MEM and try replacing the
5893 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5894 or if we would be widening it. */
5896 if (MEM_P (op)
5897 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
5898 /* Allow splitting of volatile memory references in case we don't
5899 have instruction to move the whole thing. */
5900 && (! MEM_VOLATILE_P (op)
5901 || ! have_insn_for (SET, innermode))
5902 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5903 return adjust_address_nv (op, outermode, byte);
5905 /* Handle complex values represented as CONCAT
5906 of real and imaginary part. */
5907 if (GET_CODE (op) == CONCAT)
5909 unsigned int part_size, final_offset;
5910 rtx part, res;
5912 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5913 if (byte < part_size)
5915 part = XEXP (op, 0);
5916 final_offset = byte;
5918 else
5920 part = XEXP (op, 1);
5921 final_offset = byte - part_size;
5924 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5925 return NULL_RTX;
5927 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5928 if (res)
5929 return res;
5930 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5931 return gen_rtx_SUBREG (outermode, part, final_offset);
5932 return NULL_RTX;
5935 /* A SUBREG resulting from a zero extension may fold to zero if
5936 it extracts higher bits that the ZERO_EXTEND's source bits. */
5937 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
5939 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5940 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
5941 return CONST0_RTX (outermode);
5944 if (SCALAR_INT_MODE_P (outermode)
5945 && SCALAR_INT_MODE_P (innermode)
5946 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5947 && byte == subreg_lowpart_offset (outermode, innermode))
5949 rtx tem = simplify_truncation (outermode, op, innermode);
5950 if (tem)
5951 return tem;
5954 return NULL_RTX;
5957 /* Make a SUBREG operation or equivalent if it folds. */
5960 simplify_gen_subreg (machine_mode outermode, rtx op,
5961 machine_mode innermode, unsigned int byte)
5963 rtx newx;
5965 newx = simplify_subreg (outermode, op, innermode, byte);
5966 if (newx)
5967 return newx;
5969 if (GET_CODE (op) == SUBREG
5970 || GET_CODE (op) == CONCAT
5971 || GET_MODE (op) == VOIDmode)
5972 return NULL_RTX;
5974 if (validate_subreg (outermode, innermode, op, byte))
5975 return gen_rtx_SUBREG (outermode, op, byte);
5977 return NULL_RTX;
5980 /* Simplify X, an rtx expression.
5982 Return the simplified expression or NULL if no simplifications
5983 were possible.
5985 This is the preferred entry point into the simplification routines;
5986 however, we still allow passes to call the more specific routines.
5988 Right now GCC has three (yes, three) major bodies of RTL simplification
5989 code that need to be unified.
5991 1. fold_rtx in cse.c. This code uses various CSE specific
5992 information to aid in RTL simplification.
5994 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5995 it uses combine specific information to aid in RTL
5996 simplification.
5998 3. The routines in this file.
6001 Long term we want to only have one body of simplification code; to
6002 get to that state I recommend the following steps:
6004 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6005 which are not pass dependent state into these routines.
6007 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6008 use this routine whenever possible.
6010 3. Allow for pass dependent state to be provided to these
6011 routines and add simplifications based on the pass dependent
6012 state. Remove code from cse.c & combine.c that becomes
6013 redundant/dead.
6015 It will take time, but ultimately the compiler will be easier to
6016 maintain and improve. It's totally silly that when we add a
6017 simplification that it needs to be added to 4 places (3 for RTL
6018 simplification and 1 for tree simplification. */
6021 simplify_rtx (const_rtx x)
6023 const enum rtx_code code = GET_CODE (x);
6024 const machine_mode mode = GET_MODE (x);
6026 switch (GET_RTX_CLASS (code))
6028 case RTX_UNARY:
6029 return simplify_unary_operation (code, mode,
6030 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6031 case RTX_COMM_ARITH:
6032 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6033 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6035 /* Fall through.... */
6037 case RTX_BIN_ARITH:
6038 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6040 case RTX_TERNARY:
6041 case RTX_BITFIELD_OPS:
6042 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6043 XEXP (x, 0), XEXP (x, 1),
6044 XEXP (x, 2));
6046 case RTX_COMPARE:
6047 case RTX_COMM_COMPARE:
6048 return simplify_relational_operation (code, mode,
6049 ((GET_MODE (XEXP (x, 0))
6050 != VOIDmode)
6051 ? GET_MODE (XEXP (x, 0))
6052 : GET_MODE (XEXP (x, 1))),
6053 XEXP (x, 0),
6054 XEXP (x, 1));
6056 case RTX_EXTRA:
6057 if (code == SUBREG)
6058 return simplify_subreg (mode, SUBREG_REG (x),
6059 GET_MODE (SUBREG_REG (x)),
6060 SUBREG_BYTE (x));
6061 break;
6063 case RTX_OBJ:
6064 if (code == LO_SUM)
6066 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6067 if (GET_CODE (XEXP (x, 0)) == HIGH
6068 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6069 return XEXP (x, 1);
6071 break;
6073 default:
6074 break;
6076 return NULL;