* include/bits/alloc_traits.h (__alloctr_rebind): Remove.
[official-gcc.git] / gcc / simplify-rtx.c
blobca8310d15563a05be41957df933e302928ba29e6
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "alias.h"
27 #include "symtab.h"
28 #include "tree.h"
29 #include "fold-const.h"
30 #include "varasm.h"
31 #include "tm_p.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "flags.h"
35 #include "insn-config.h"
36 #include "recog.h"
37 #include "function.h"
38 #include "insn-codes.h"
39 #include "optabs.h"
40 #include "expmed.h"
41 #include "dojump.h"
42 #include "explow.h"
43 #include "calls.h"
44 #include "emit-rtl.h"
45 #include "stmt.h"
46 #include "expr.h"
47 #include "diagnostic-core.h"
48 #include "target.h"
49 #include "predict.h"
51 /* Simplification and canonicalization of RTL. */
53 /* Much code operates on (low, high) pairs; the low value is an
54 unsigned wide int, the high value a signed wide int. We
55 occasionally need to sign extend from low to high as if low were a
56 signed wide int. */
57 #define HWI_SIGN_EXTEND(low) \
58 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
60 static rtx neg_const_int (machine_mode, const_rtx);
61 static bool plus_minus_operand_p (const_rtx);
62 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
63 static rtx simplify_immed_subreg (machine_mode, rtx, machine_mode,
64 unsigned int);
65 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
66 rtx, rtx);
67 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
68 machine_mode, rtx, rtx);
69 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
70 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
71 rtx, rtx, rtx, rtx);
73 /* Negate a CONST_INT rtx, truncating (because a conversion from a
74 maximally negative number can overflow). */
75 static rtx
76 neg_const_int (machine_mode mode, const_rtx i)
78 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
81 /* Test whether expression, X, is an immediate constant that represents
82 the most significant bit of machine mode MODE. */
84 bool
85 mode_signbit_p (machine_mode mode, const_rtx x)
87 unsigned HOST_WIDE_INT val;
88 unsigned int width;
90 if (GET_MODE_CLASS (mode) != MODE_INT)
91 return false;
93 width = GET_MODE_PRECISION (mode);
94 if (width == 0)
95 return false;
97 if (width <= HOST_BITS_PER_WIDE_INT
98 && CONST_INT_P (x))
99 val = INTVAL (x);
100 #if TARGET_SUPPORTS_WIDE_INT
101 else if (CONST_WIDE_INT_P (x))
103 unsigned int i;
104 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
105 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
106 return false;
107 for (i = 0; i < elts - 1; i++)
108 if (CONST_WIDE_INT_ELT (x, i) != 0)
109 return false;
110 val = CONST_WIDE_INT_ELT (x, elts - 1);
111 width %= HOST_BITS_PER_WIDE_INT;
112 if (width == 0)
113 width = HOST_BITS_PER_WIDE_INT;
115 #else
116 else if (width <= HOST_BITS_PER_DOUBLE_INT
117 && CONST_DOUBLE_AS_INT_P (x)
118 && CONST_DOUBLE_LOW (x) == 0)
120 val = CONST_DOUBLE_HIGH (x);
121 width -= HOST_BITS_PER_WIDE_INT;
123 #endif
124 else
125 /* X is not an integer constant. */
126 return false;
128 if (width < HOST_BITS_PER_WIDE_INT)
129 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
130 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
133 /* Test whether VAL is equal to the most significant bit of mode MODE
134 (after masking with the mode mask of MODE). Returns false if the
135 precision of MODE is too large to handle. */
137 bool
138 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
140 unsigned int width;
142 if (GET_MODE_CLASS (mode) != MODE_INT)
143 return false;
145 width = GET_MODE_PRECISION (mode);
146 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
147 return false;
149 val &= GET_MODE_MASK (mode);
150 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
153 /* Test whether the most significant bit of mode MODE is set in VAL.
154 Returns false if the precision of MODE is too large to handle. */
155 bool
156 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
158 unsigned int width;
160 if (GET_MODE_CLASS (mode) != MODE_INT)
161 return false;
163 width = GET_MODE_PRECISION (mode);
164 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
165 return false;
167 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
168 return val != 0;
171 /* Test whether the most significant bit of mode MODE is clear in VAL.
172 Returns false if the precision of MODE is too large to handle. */
173 bool
174 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
176 unsigned int width;
178 if (GET_MODE_CLASS (mode) != MODE_INT)
179 return false;
181 width = GET_MODE_PRECISION (mode);
182 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
183 return false;
185 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
186 return val == 0;
189 /* Make a binary operation by properly ordering the operands and
190 seeing if the expression folds. */
193 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
194 rtx op1)
196 rtx tem;
198 /* If this simplifies, do it. */
199 tem = simplify_binary_operation (code, mode, op0, op1);
200 if (tem)
201 return tem;
203 /* Put complex operands first and constants second if commutative. */
204 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
205 && swap_commutative_operands_p (op0, op1))
206 std::swap (op0, op1);
208 return gen_rtx_fmt_ee (code, mode, op0, op1);
211 /* If X is a MEM referencing the constant pool, return the real value.
212 Otherwise return X. */
214 avoid_constant_pool_reference (rtx x)
216 rtx c, tmp, addr;
217 machine_mode cmode;
218 HOST_WIDE_INT offset = 0;
220 switch (GET_CODE (x))
222 case MEM:
223 break;
225 case FLOAT_EXTEND:
226 /* Handle float extensions of constant pool references. */
227 tmp = XEXP (x, 0);
228 c = avoid_constant_pool_reference (tmp);
229 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
231 REAL_VALUE_TYPE d;
233 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
234 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
236 return x;
238 default:
239 return x;
242 if (GET_MODE (x) == BLKmode)
243 return x;
245 addr = XEXP (x, 0);
247 /* Call target hook to avoid the effects of -fpic etc.... */
248 addr = targetm.delegitimize_address (addr);
250 /* Split the address into a base and integer offset. */
251 if (GET_CODE (addr) == CONST
252 && GET_CODE (XEXP (addr, 0)) == PLUS
253 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
255 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
256 addr = XEXP (XEXP (addr, 0), 0);
259 if (GET_CODE (addr) == LO_SUM)
260 addr = XEXP (addr, 1);
262 /* If this is a constant pool reference, we can turn it into its
263 constant and hope that simplifications happen. */
264 if (GET_CODE (addr) == SYMBOL_REF
265 && CONSTANT_POOL_ADDRESS_P (addr))
267 c = get_pool_constant (addr);
268 cmode = get_pool_mode (addr);
270 /* If we're accessing the constant in a different mode than it was
271 originally stored, attempt to fix that up via subreg simplifications.
272 If that fails we have no choice but to return the original memory. */
273 if ((offset != 0 || cmode != GET_MODE (x))
274 && offset >= 0 && offset < GET_MODE_SIZE (cmode))
276 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
277 if (tem && CONSTANT_P (tem))
278 return tem;
280 else
281 return c;
284 return x;
287 /* Simplify a MEM based on its attributes. This is the default
288 delegitimize_address target hook, and it's recommended that every
289 overrider call it. */
292 delegitimize_mem_from_attrs (rtx x)
294 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
295 use their base addresses as equivalent. */
296 if (MEM_P (x)
297 && MEM_EXPR (x)
298 && MEM_OFFSET_KNOWN_P (x))
300 tree decl = MEM_EXPR (x);
301 machine_mode mode = GET_MODE (x);
302 HOST_WIDE_INT offset = 0;
304 switch (TREE_CODE (decl))
306 default:
307 decl = NULL;
308 break;
310 case VAR_DECL:
311 break;
313 case ARRAY_REF:
314 case ARRAY_RANGE_REF:
315 case COMPONENT_REF:
316 case BIT_FIELD_REF:
317 case REALPART_EXPR:
318 case IMAGPART_EXPR:
319 case VIEW_CONVERT_EXPR:
321 HOST_WIDE_INT bitsize, bitpos;
322 tree toffset;
323 int unsignedp, volatilep = 0;
325 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
326 &mode, &unsignedp, &volatilep, false);
327 if (bitsize != GET_MODE_BITSIZE (mode)
328 || (bitpos % BITS_PER_UNIT)
329 || (toffset && !tree_fits_shwi_p (toffset)))
330 decl = NULL;
331 else
333 offset += bitpos / BITS_PER_UNIT;
334 if (toffset)
335 offset += tree_to_shwi (toffset);
337 break;
341 if (decl
342 && mode == GET_MODE (x)
343 && TREE_CODE (decl) == VAR_DECL
344 && (TREE_STATIC (decl)
345 || DECL_THREAD_LOCAL_P (decl))
346 && DECL_RTL_SET_P (decl)
347 && MEM_P (DECL_RTL (decl)))
349 rtx newx;
351 offset += MEM_OFFSET (x);
353 newx = DECL_RTL (decl);
355 if (MEM_P (newx))
357 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
359 /* Avoid creating a new MEM needlessly if we already had
360 the same address. We do if there's no OFFSET and the
361 old address X is identical to NEWX, or if X is of the
362 form (plus NEWX OFFSET), or the NEWX is of the form
363 (plus Y (const_int Z)) and X is that with the offset
364 added: (plus Y (const_int Z+OFFSET)). */
365 if (!((offset == 0
366 || (GET_CODE (o) == PLUS
367 && GET_CODE (XEXP (o, 1)) == CONST_INT
368 && (offset == INTVAL (XEXP (o, 1))
369 || (GET_CODE (n) == PLUS
370 && GET_CODE (XEXP (n, 1)) == CONST_INT
371 && (INTVAL (XEXP (n, 1)) + offset
372 == INTVAL (XEXP (o, 1)))
373 && (n = XEXP (n, 0))))
374 && (o = XEXP (o, 0))))
375 && rtx_equal_p (o, n)))
376 x = adjust_address_nv (newx, mode, offset);
378 else if (GET_MODE (x) == GET_MODE (newx)
379 && offset == 0)
380 x = newx;
384 return x;
387 /* Make a unary operation by first seeing if it folds and otherwise making
388 the specified operation. */
391 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
392 machine_mode op_mode)
394 rtx tem;
396 /* If this simplifies, use it. */
397 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
398 return tem;
400 return gen_rtx_fmt_e (code, mode, op);
403 /* Likewise for ternary operations. */
406 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
407 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
409 rtx tem;
411 /* If this simplifies, use it. */
412 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
413 op0, op1, op2)))
414 return tem;
416 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
419 /* Likewise, for relational operations.
420 CMP_MODE specifies mode comparison is done in. */
423 simplify_gen_relational (enum rtx_code code, machine_mode mode,
424 machine_mode cmp_mode, rtx op0, rtx op1)
426 rtx tem;
428 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
429 op0, op1)))
430 return tem;
432 return gen_rtx_fmt_ee (code, mode, op0, op1);
435 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
436 and simplify the result. If FN is non-NULL, call this callback on each
437 X, if it returns non-NULL, replace X with its return value and simplify the
438 result. */
441 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
442 rtx (*fn) (rtx, const_rtx, void *), void *data)
444 enum rtx_code code = GET_CODE (x);
445 machine_mode mode = GET_MODE (x);
446 machine_mode op_mode;
447 const char *fmt;
448 rtx op0, op1, op2, newx, op;
449 rtvec vec, newvec;
450 int i, j;
452 if (__builtin_expect (fn != NULL, 0))
454 newx = fn (x, old_rtx, data);
455 if (newx)
456 return newx;
458 else if (rtx_equal_p (x, old_rtx))
459 return copy_rtx ((rtx) data);
461 switch (GET_RTX_CLASS (code))
463 case RTX_UNARY:
464 op0 = XEXP (x, 0);
465 op_mode = GET_MODE (op0);
466 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
467 if (op0 == XEXP (x, 0))
468 return x;
469 return simplify_gen_unary (code, mode, op0, op_mode);
471 case RTX_BIN_ARITH:
472 case RTX_COMM_ARITH:
473 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
474 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
475 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
476 return x;
477 return simplify_gen_binary (code, mode, op0, op1);
479 case RTX_COMPARE:
480 case RTX_COMM_COMPARE:
481 op0 = XEXP (x, 0);
482 op1 = XEXP (x, 1);
483 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
484 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
485 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
486 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
487 return x;
488 return simplify_gen_relational (code, mode, op_mode, op0, op1);
490 case RTX_TERNARY:
491 case RTX_BITFIELD_OPS:
492 op0 = XEXP (x, 0);
493 op_mode = GET_MODE (op0);
494 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
495 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
496 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
497 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
498 return x;
499 if (op_mode == VOIDmode)
500 op_mode = GET_MODE (op0);
501 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
503 case RTX_EXTRA:
504 if (code == SUBREG)
506 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
507 if (op0 == SUBREG_REG (x))
508 return x;
509 op0 = simplify_gen_subreg (GET_MODE (x), op0,
510 GET_MODE (SUBREG_REG (x)),
511 SUBREG_BYTE (x));
512 return op0 ? op0 : x;
514 break;
516 case RTX_OBJ:
517 if (code == MEM)
519 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
520 if (op0 == XEXP (x, 0))
521 return x;
522 return replace_equiv_address_nv (x, op0);
524 else if (code == LO_SUM)
526 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
527 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
529 /* (lo_sum (high x) y) -> y where x and y have the same base. */
530 if (GET_CODE (op0) == HIGH)
532 rtx base0, base1, offset0, offset1;
533 split_const (XEXP (op0, 0), &base0, &offset0);
534 split_const (op1, &base1, &offset1);
535 if (rtx_equal_p (base0, base1))
536 return op1;
539 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
540 return x;
541 return gen_rtx_LO_SUM (mode, op0, op1);
543 break;
545 default:
546 break;
549 newx = x;
550 fmt = GET_RTX_FORMAT (code);
551 for (i = 0; fmt[i]; i++)
552 switch (fmt[i])
554 case 'E':
555 vec = XVEC (x, i);
556 newvec = XVEC (newx, i);
557 for (j = 0; j < GET_NUM_ELEM (vec); j++)
559 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
560 old_rtx, fn, data);
561 if (op != RTVEC_ELT (vec, j))
563 if (newvec == vec)
565 newvec = shallow_copy_rtvec (vec);
566 if (x == newx)
567 newx = shallow_copy_rtx (x);
568 XVEC (newx, i) = newvec;
570 RTVEC_ELT (newvec, j) = op;
573 break;
575 case 'e':
576 if (XEXP (x, i))
578 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
579 if (op != XEXP (x, i))
581 if (x == newx)
582 newx = shallow_copy_rtx (x);
583 XEXP (newx, i) = op;
586 break;
588 return newx;
591 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
592 resulting RTX. Return a new RTX which is as simplified as possible. */
595 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
597 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
600 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
601 Only handle cases where the truncated value is inherently an rvalue.
603 RTL provides two ways of truncating a value:
605 1. a lowpart subreg. This form is only a truncation when both
606 the outer and inner modes (here MODE and OP_MODE respectively)
607 are scalar integers, and only then when the subreg is used as
608 an rvalue.
610 It is only valid to form such truncating subregs if the
611 truncation requires no action by the target. The onus for
612 proving this is on the creator of the subreg -- e.g. the
613 caller to simplify_subreg or simplify_gen_subreg -- and typically
614 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
616 2. a TRUNCATE. This form handles both scalar and compound integers.
618 The first form is preferred where valid. However, the TRUNCATE
619 handling in simplify_unary_operation turns the second form into the
620 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
621 so it is generally safe to form rvalue truncations using:
623 simplify_gen_unary (TRUNCATE, ...)
625 and leave simplify_unary_operation to work out which representation
626 should be used.
628 Because of the proof requirements on (1), simplify_truncation must
629 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
630 regardless of whether the outer truncation came from a SUBREG or a
631 TRUNCATE. For example, if the caller has proven that an SImode
632 truncation of:
634 (and:DI X Y)
636 is a no-op and can be represented as a subreg, it does not follow
637 that SImode truncations of X and Y are also no-ops. On a target
638 like 64-bit MIPS that requires SImode values to be stored in
639 sign-extended form, an SImode truncation of:
641 (and:DI (reg:DI X) (const_int 63))
643 is trivially a no-op because only the lower 6 bits can be set.
644 However, X is still an arbitrary 64-bit number and so we cannot
645 assume that truncating it too is a no-op. */
647 static rtx
648 simplify_truncation (machine_mode mode, rtx op,
649 machine_mode op_mode)
651 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
652 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
653 gcc_assert (precision <= op_precision);
655 /* Optimize truncations of zero and sign extended values. */
656 if (GET_CODE (op) == ZERO_EXTEND
657 || GET_CODE (op) == SIGN_EXTEND)
659 /* There are three possibilities. If MODE is the same as the
660 origmode, we can omit both the extension and the subreg.
661 If MODE is not larger than the origmode, we can apply the
662 truncation without the extension. Finally, if the outermode
663 is larger than the origmode, we can just extend to the appropriate
664 mode. */
665 machine_mode origmode = GET_MODE (XEXP (op, 0));
666 if (mode == origmode)
667 return XEXP (op, 0);
668 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
669 return simplify_gen_unary (TRUNCATE, mode,
670 XEXP (op, 0), origmode);
671 else
672 return simplify_gen_unary (GET_CODE (op), mode,
673 XEXP (op, 0), origmode);
676 /* If the machine can perform operations in the truncated mode, distribute
677 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
678 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
679 if (1
680 #ifdef WORD_REGISTER_OPERATIONS
681 && precision >= BITS_PER_WORD
682 #endif
683 && (GET_CODE (op) == PLUS
684 || GET_CODE (op) == MINUS
685 || GET_CODE (op) == MULT))
687 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
688 if (op0)
690 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
691 if (op1)
692 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
696 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
697 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
698 the outer subreg is effectively a truncation to the original mode. */
699 if ((GET_CODE (op) == LSHIFTRT
700 || GET_CODE (op) == ASHIFTRT)
701 /* Ensure that OP_MODE is at least twice as wide as MODE
702 to avoid the possibility that an outer LSHIFTRT shifts by more
703 than the sign extension's sign_bit_copies and introduces zeros
704 into the high bits of the result. */
705 && 2 * precision <= op_precision
706 && CONST_INT_P (XEXP (op, 1))
707 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
708 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
709 && UINTVAL (XEXP (op, 1)) < precision)
710 return simplify_gen_binary (ASHIFTRT, mode,
711 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
713 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
714 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
715 the outer subreg is effectively a truncation to the original mode. */
716 if ((GET_CODE (op) == LSHIFTRT
717 || GET_CODE (op) == ASHIFTRT)
718 && CONST_INT_P (XEXP (op, 1))
719 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
720 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
721 && UINTVAL (XEXP (op, 1)) < precision)
722 return simplify_gen_binary (LSHIFTRT, mode,
723 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
725 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
726 to (ashift:QI (x:QI) C), where C is a suitable small constant and
727 the outer subreg is effectively a truncation to the original mode. */
728 if (GET_CODE (op) == ASHIFT
729 && CONST_INT_P (XEXP (op, 1))
730 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
731 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
732 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
733 && UINTVAL (XEXP (op, 1)) < precision)
734 return simplify_gen_binary (ASHIFT, mode,
735 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
737 /* Recognize a word extraction from a multi-word subreg. */
738 if ((GET_CODE (op) == LSHIFTRT
739 || GET_CODE (op) == ASHIFTRT)
740 && SCALAR_INT_MODE_P (mode)
741 && SCALAR_INT_MODE_P (op_mode)
742 && precision >= BITS_PER_WORD
743 && 2 * precision <= op_precision
744 && CONST_INT_P (XEXP (op, 1))
745 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
746 && UINTVAL (XEXP (op, 1)) < op_precision)
748 int byte = subreg_lowpart_offset (mode, op_mode);
749 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
750 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
751 (WORDS_BIG_ENDIAN
752 ? byte - shifted_bytes
753 : byte + shifted_bytes));
756 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
757 and try replacing the TRUNCATE and shift with it. Don't do this
758 if the MEM has a mode-dependent address. */
759 if ((GET_CODE (op) == LSHIFTRT
760 || GET_CODE (op) == ASHIFTRT)
761 && SCALAR_INT_MODE_P (op_mode)
762 && MEM_P (XEXP (op, 0))
763 && CONST_INT_P (XEXP (op, 1))
764 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
765 && INTVAL (XEXP (op, 1)) > 0
766 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
767 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
768 MEM_ADDR_SPACE (XEXP (op, 0)))
769 && ! MEM_VOLATILE_P (XEXP (op, 0))
770 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
771 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
773 int byte = subreg_lowpart_offset (mode, op_mode);
774 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
775 return adjust_address_nv (XEXP (op, 0), mode,
776 (WORDS_BIG_ENDIAN
777 ? byte - shifted_bytes
778 : byte + shifted_bytes));
781 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
782 (OP:SI foo:SI) if OP is NEG or ABS. */
783 if ((GET_CODE (op) == ABS
784 || GET_CODE (op) == NEG)
785 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
786 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
787 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
788 return simplify_gen_unary (GET_CODE (op), mode,
789 XEXP (XEXP (op, 0), 0), mode);
791 /* (truncate:A (subreg:B (truncate:C X) 0)) is
792 (truncate:A X). */
793 if (GET_CODE (op) == SUBREG
794 && SCALAR_INT_MODE_P (mode)
795 && SCALAR_INT_MODE_P (op_mode)
796 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
797 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
798 && subreg_lowpart_p (op))
800 rtx inner = XEXP (SUBREG_REG (op), 0);
801 if (GET_MODE_PRECISION (mode)
802 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
803 return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
804 else
805 /* If subreg above is paradoxical and C is narrower
806 than A, return (subreg:A (truncate:C X) 0). */
807 return simplify_gen_subreg (mode, SUBREG_REG (op),
808 GET_MODE (SUBREG_REG (op)), 0);
811 /* (truncate:A (truncate:B X)) is (truncate:A X). */
812 if (GET_CODE (op) == TRUNCATE)
813 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
814 GET_MODE (XEXP (op, 0)));
816 return NULL_RTX;
819 /* Try to simplify a unary operation CODE whose output mode is to be
820 MODE with input operand OP whose mode was originally OP_MODE.
821 Return zero if no simplification can be made. */
823 simplify_unary_operation (enum rtx_code code, machine_mode mode,
824 rtx op, machine_mode op_mode)
826 rtx trueop, tem;
828 trueop = avoid_constant_pool_reference (op);
830 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
831 if (tem)
832 return tem;
834 return simplify_unary_operation_1 (code, mode, op);
837 /* Perform some simplifications we can do even if the operands
838 aren't constant. */
839 static rtx
840 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
842 enum rtx_code reversed;
843 rtx temp;
845 switch (code)
847 case NOT:
848 /* (not (not X)) == X. */
849 if (GET_CODE (op) == NOT)
850 return XEXP (op, 0);
852 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
853 comparison is all ones. */
854 if (COMPARISON_P (op)
855 && (mode == BImode || STORE_FLAG_VALUE == -1)
856 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
857 return simplify_gen_relational (reversed, mode, VOIDmode,
858 XEXP (op, 0), XEXP (op, 1));
860 /* (not (plus X -1)) can become (neg X). */
861 if (GET_CODE (op) == PLUS
862 && XEXP (op, 1) == constm1_rtx)
863 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
865 /* Similarly, (not (neg X)) is (plus X -1). */
866 if (GET_CODE (op) == NEG)
867 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
868 CONSTM1_RTX (mode));
870 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
871 if (GET_CODE (op) == XOR
872 && CONST_INT_P (XEXP (op, 1))
873 && (temp = simplify_unary_operation (NOT, mode,
874 XEXP (op, 1), mode)) != 0)
875 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
877 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
878 if (GET_CODE (op) == PLUS
879 && CONST_INT_P (XEXP (op, 1))
880 && mode_signbit_p (mode, XEXP (op, 1))
881 && (temp = simplify_unary_operation (NOT, mode,
882 XEXP (op, 1), mode)) != 0)
883 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
886 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
887 operands other than 1, but that is not valid. We could do a
888 similar simplification for (not (lshiftrt C X)) where C is
889 just the sign bit, but this doesn't seem common enough to
890 bother with. */
891 if (GET_CODE (op) == ASHIFT
892 && XEXP (op, 0) == const1_rtx)
894 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
895 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
898 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
899 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
900 so we can perform the above simplification. */
901 if (STORE_FLAG_VALUE == -1
902 && GET_CODE (op) == ASHIFTRT
903 && CONST_INT_P (XEXP (op, 1))
904 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
905 return simplify_gen_relational (GE, mode, VOIDmode,
906 XEXP (op, 0), const0_rtx);
909 if (GET_CODE (op) == SUBREG
910 && subreg_lowpart_p (op)
911 && (GET_MODE_SIZE (GET_MODE (op))
912 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
913 && GET_CODE (SUBREG_REG (op)) == ASHIFT
914 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
916 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
917 rtx x;
919 x = gen_rtx_ROTATE (inner_mode,
920 simplify_gen_unary (NOT, inner_mode, const1_rtx,
921 inner_mode),
922 XEXP (SUBREG_REG (op), 1));
923 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
924 if (temp)
925 return temp;
928 /* Apply De Morgan's laws to reduce number of patterns for machines
929 with negating logical insns (and-not, nand, etc.). If result has
930 only one NOT, put it first, since that is how the patterns are
931 coded. */
932 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
934 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
935 machine_mode op_mode;
937 op_mode = GET_MODE (in1);
938 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
940 op_mode = GET_MODE (in2);
941 if (op_mode == VOIDmode)
942 op_mode = mode;
943 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
945 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
946 std::swap (in1, in2);
948 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
949 mode, in1, in2);
952 /* (not (bswap x)) -> (bswap (not x)). */
953 if (GET_CODE (op) == BSWAP)
955 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
956 return simplify_gen_unary (BSWAP, mode, x, mode);
958 break;
960 case NEG:
961 /* (neg (neg X)) == X. */
962 if (GET_CODE (op) == NEG)
963 return XEXP (op, 0);
965 /* (neg (plus X 1)) can become (not X). */
966 if (GET_CODE (op) == PLUS
967 && XEXP (op, 1) == const1_rtx)
968 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
970 /* Similarly, (neg (not X)) is (plus X 1). */
971 if (GET_CODE (op) == NOT)
972 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
973 CONST1_RTX (mode));
975 /* (neg (minus X Y)) can become (minus Y X). This transformation
976 isn't safe for modes with signed zeros, since if X and Y are
977 both +0, (minus Y X) is the same as (minus X Y). If the
978 rounding mode is towards +infinity (or -infinity) then the two
979 expressions will be rounded differently. */
980 if (GET_CODE (op) == MINUS
981 && !HONOR_SIGNED_ZEROS (mode)
982 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
983 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
985 if (GET_CODE (op) == PLUS
986 && !HONOR_SIGNED_ZEROS (mode)
987 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
989 /* (neg (plus A C)) is simplified to (minus -C A). */
990 if (CONST_SCALAR_INT_P (XEXP (op, 1))
991 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
993 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
994 if (temp)
995 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
998 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
999 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1000 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1003 /* (neg (mult A B)) becomes (mult A (neg B)).
1004 This works even for floating-point values. */
1005 if (GET_CODE (op) == MULT
1006 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1008 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1009 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1012 /* NEG commutes with ASHIFT since it is multiplication. Only do
1013 this if we can then eliminate the NEG (e.g., if the operand
1014 is a constant). */
1015 if (GET_CODE (op) == ASHIFT)
1017 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1018 if (temp)
1019 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1022 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1023 C is equal to the width of MODE minus 1. */
1024 if (GET_CODE (op) == ASHIFTRT
1025 && CONST_INT_P (XEXP (op, 1))
1026 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1027 return simplify_gen_binary (LSHIFTRT, mode,
1028 XEXP (op, 0), XEXP (op, 1));
1030 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1031 C is equal to the width of MODE minus 1. */
1032 if (GET_CODE (op) == LSHIFTRT
1033 && CONST_INT_P (XEXP (op, 1))
1034 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1035 return simplify_gen_binary (ASHIFTRT, mode,
1036 XEXP (op, 0), XEXP (op, 1));
1038 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1039 if (GET_CODE (op) == XOR
1040 && XEXP (op, 1) == const1_rtx
1041 && nonzero_bits (XEXP (op, 0), mode) == 1)
1042 return plus_constant (mode, XEXP (op, 0), -1);
1044 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1045 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1046 if (GET_CODE (op) == LT
1047 && XEXP (op, 1) == const0_rtx
1048 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1050 machine_mode inner = GET_MODE (XEXP (op, 0));
1051 int isize = GET_MODE_PRECISION (inner);
1052 if (STORE_FLAG_VALUE == 1)
1054 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1055 GEN_INT (isize - 1));
1056 if (mode == inner)
1057 return temp;
1058 if (GET_MODE_PRECISION (mode) > isize)
1059 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1060 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1062 else if (STORE_FLAG_VALUE == -1)
1064 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1065 GEN_INT (isize - 1));
1066 if (mode == inner)
1067 return temp;
1068 if (GET_MODE_PRECISION (mode) > isize)
1069 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1070 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1073 break;
1075 case TRUNCATE:
1076 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1077 with the umulXi3_highpart patterns. */
1078 if (GET_CODE (op) == LSHIFTRT
1079 && GET_CODE (XEXP (op, 0)) == MULT)
1080 break;
1082 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1084 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1086 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1087 if (temp)
1088 return temp;
1090 /* We can't handle truncation to a partial integer mode here
1091 because we don't know the real bitsize of the partial
1092 integer mode. */
1093 break;
1096 if (GET_MODE (op) != VOIDmode)
1098 temp = simplify_truncation (mode, op, GET_MODE (op));
1099 if (temp)
1100 return temp;
1103 /* If we know that the value is already truncated, we can
1104 replace the TRUNCATE with a SUBREG. */
1105 if (GET_MODE_NUNITS (mode) == 1
1106 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1107 || truncated_to_mode (mode, op)))
1109 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1110 if (temp)
1111 return temp;
1114 /* A truncate of a comparison can be replaced with a subreg if
1115 STORE_FLAG_VALUE permits. This is like the previous test,
1116 but it works even if the comparison is done in a mode larger
1117 than HOST_BITS_PER_WIDE_INT. */
1118 if (HWI_COMPUTABLE_MODE_P (mode)
1119 && COMPARISON_P (op)
1120 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1122 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1123 if (temp)
1124 return temp;
1127 /* A truncate of a memory is just loading the low part of the memory
1128 if we are not changing the meaning of the address. */
1129 if (GET_CODE (op) == MEM
1130 && !VECTOR_MODE_P (mode)
1131 && !MEM_VOLATILE_P (op)
1132 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1134 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1135 if (temp)
1136 return temp;
1139 break;
1141 case FLOAT_TRUNCATE:
1142 if (DECIMAL_FLOAT_MODE_P (mode))
1143 break;
1145 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1146 if (GET_CODE (op) == FLOAT_EXTEND
1147 && GET_MODE (XEXP (op, 0)) == mode)
1148 return XEXP (op, 0);
1150 /* (float_truncate:SF (float_truncate:DF foo:XF))
1151 = (float_truncate:SF foo:XF).
1152 This may eliminate double rounding, so it is unsafe.
1154 (float_truncate:SF (float_extend:XF foo:DF))
1155 = (float_truncate:SF foo:DF).
1157 (float_truncate:DF (float_extend:XF foo:SF))
1158 = (float_extend:DF foo:SF). */
1159 if ((GET_CODE (op) == FLOAT_TRUNCATE
1160 && flag_unsafe_math_optimizations)
1161 || GET_CODE (op) == FLOAT_EXTEND)
1162 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1163 0)))
1164 > GET_MODE_SIZE (mode)
1165 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1166 mode,
1167 XEXP (op, 0), mode);
1169 /* (float_truncate (float x)) is (float x) */
1170 if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1171 && (flag_unsafe_math_optimizations
1172 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
1173 && ((unsigned)significand_size (GET_MODE (op))
1174 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1175 - num_sign_bit_copies (XEXP (op, 0),
1176 GET_MODE (XEXP (op, 0))))))))
1177 return simplify_gen_unary (GET_CODE (op), mode,
1178 XEXP (op, 0),
1179 GET_MODE (XEXP (op, 0)));
1181 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1182 (OP:SF foo:SF) if OP is NEG or ABS. */
1183 if ((GET_CODE (op) == ABS
1184 || GET_CODE (op) == NEG)
1185 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1186 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1187 return simplify_gen_unary (GET_CODE (op), mode,
1188 XEXP (XEXP (op, 0), 0), mode);
1190 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1191 is (float_truncate:SF x). */
1192 if (GET_CODE (op) == SUBREG
1193 && subreg_lowpart_p (op)
1194 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1195 return SUBREG_REG (op);
1196 break;
1198 case FLOAT_EXTEND:
1199 if (DECIMAL_FLOAT_MODE_P (mode))
1200 break;
1202 /* (float_extend (float_extend x)) is (float_extend x)
1204 (float_extend (float x)) is (float x) assuming that double
1205 rounding can't happen.
1207 if (GET_CODE (op) == FLOAT_EXTEND
1208 || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1209 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1210 && ((unsigned)significand_size (GET_MODE (op))
1211 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1212 - num_sign_bit_copies (XEXP (op, 0),
1213 GET_MODE (XEXP (op, 0)))))))
1214 return simplify_gen_unary (GET_CODE (op), mode,
1215 XEXP (op, 0),
1216 GET_MODE (XEXP (op, 0)));
1218 break;
1220 case ABS:
1221 /* (abs (neg <foo>)) -> (abs <foo>) */
1222 if (GET_CODE (op) == NEG)
1223 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1224 GET_MODE (XEXP (op, 0)));
1226 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1227 do nothing. */
1228 if (GET_MODE (op) == VOIDmode)
1229 break;
1231 /* If operand is something known to be positive, ignore the ABS. */
1232 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1233 || val_signbit_known_clear_p (GET_MODE (op),
1234 nonzero_bits (op, GET_MODE (op))))
1235 return op;
1237 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1238 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1239 return gen_rtx_NEG (mode, op);
1241 break;
1243 case FFS:
1244 /* (ffs (*_extend <X>)) = (ffs <X>) */
1245 if (GET_CODE (op) == SIGN_EXTEND
1246 || GET_CODE (op) == ZERO_EXTEND)
1247 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1248 GET_MODE (XEXP (op, 0)));
1249 break;
1251 case POPCOUNT:
1252 switch (GET_CODE (op))
1254 case BSWAP:
1255 case ZERO_EXTEND:
1256 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1257 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1258 GET_MODE (XEXP (op, 0)));
1260 case ROTATE:
1261 case ROTATERT:
1262 /* Rotations don't affect popcount. */
1263 if (!side_effects_p (XEXP (op, 1)))
1264 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1265 GET_MODE (XEXP (op, 0)));
1266 break;
1268 default:
1269 break;
1271 break;
1273 case PARITY:
1274 switch (GET_CODE (op))
1276 case NOT:
1277 case BSWAP:
1278 case ZERO_EXTEND:
1279 case SIGN_EXTEND:
1280 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1281 GET_MODE (XEXP (op, 0)));
1283 case ROTATE:
1284 case ROTATERT:
1285 /* Rotations don't affect parity. */
1286 if (!side_effects_p (XEXP (op, 1)))
1287 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1288 GET_MODE (XEXP (op, 0)));
1289 break;
1291 default:
1292 break;
1294 break;
1296 case BSWAP:
1297 /* (bswap (bswap x)) -> x. */
1298 if (GET_CODE (op) == BSWAP)
1299 return XEXP (op, 0);
1300 break;
1302 case FLOAT:
1303 /* (float (sign_extend <X>)) = (float <X>). */
1304 if (GET_CODE (op) == SIGN_EXTEND)
1305 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1306 GET_MODE (XEXP (op, 0)));
1307 break;
1309 case SIGN_EXTEND:
1310 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1311 becomes just the MINUS if its mode is MODE. This allows
1312 folding switch statements on machines using casesi (such as
1313 the VAX). */
1314 if (GET_CODE (op) == TRUNCATE
1315 && GET_MODE (XEXP (op, 0)) == mode
1316 && GET_CODE (XEXP (op, 0)) == MINUS
1317 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1318 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1319 return XEXP (op, 0);
1321 /* Extending a widening multiplication should be canonicalized to
1322 a wider widening multiplication. */
1323 if (GET_CODE (op) == MULT)
1325 rtx lhs = XEXP (op, 0);
1326 rtx rhs = XEXP (op, 1);
1327 enum rtx_code lcode = GET_CODE (lhs);
1328 enum rtx_code rcode = GET_CODE (rhs);
1330 /* Widening multiplies usually extend both operands, but sometimes
1331 they use a shift to extract a portion of a register. */
1332 if ((lcode == SIGN_EXTEND
1333 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1334 && (rcode == SIGN_EXTEND
1335 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1337 machine_mode lmode = GET_MODE (lhs);
1338 machine_mode rmode = GET_MODE (rhs);
1339 int bits;
1341 if (lcode == ASHIFTRT)
1342 /* Number of bits not shifted off the end. */
1343 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1344 else /* lcode == SIGN_EXTEND */
1345 /* Size of inner mode. */
1346 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1348 if (rcode == ASHIFTRT)
1349 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1350 else /* rcode == SIGN_EXTEND */
1351 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1353 /* We can only widen multiplies if the result is mathematiclly
1354 equivalent. I.e. if overflow was impossible. */
1355 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1356 return simplify_gen_binary
1357 (MULT, mode,
1358 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1359 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1363 /* Check for a sign extension of a subreg of a promoted
1364 variable, where the promotion is sign-extended, and the
1365 target mode is the same as the variable's promotion. */
1366 if (GET_CODE (op) == SUBREG
1367 && SUBREG_PROMOTED_VAR_P (op)
1368 && SUBREG_PROMOTED_SIGNED_P (op)
1369 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1371 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1372 if (temp)
1373 return temp;
1376 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1377 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1378 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1380 gcc_assert (GET_MODE_PRECISION (mode)
1381 > GET_MODE_PRECISION (GET_MODE (op)));
1382 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1383 GET_MODE (XEXP (op, 0)));
1386 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1387 is (sign_extend:M (subreg:O <X>)) if there is mode with
1388 GET_MODE_BITSIZE (N) - I bits.
1389 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1390 is similarly (zero_extend:M (subreg:O <X>)). */
1391 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1392 && GET_CODE (XEXP (op, 0)) == ASHIFT
1393 && CONST_INT_P (XEXP (op, 1))
1394 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1395 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1397 machine_mode tmode
1398 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1399 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1400 gcc_assert (GET_MODE_BITSIZE (mode)
1401 > GET_MODE_BITSIZE (GET_MODE (op)));
1402 if (tmode != BLKmode)
1404 rtx inner =
1405 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1406 if (inner)
1407 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1408 ? SIGN_EXTEND : ZERO_EXTEND,
1409 mode, inner, tmode);
1413 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1414 /* As we do not know which address space the pointer is referring to,
1415 we can do this only if the target does not support different pointer
1416 or address modes depending on the address space. */
1417 if (target_default_pointer_address_modes_p ()
1418 && ! POINTERS_EXTEND_UNSIGNED
1419 && mode == Pmode && GET_MODE (op) == ptr_mode
1420 && (CONSTANT_P (op)
1421 || (GET_CODE (op) == SUBREG
1422 && REG_P (SUBREG_REG (op))
1423 && REG_POINTER (SUBREG_REG (op))
1424 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1425 return convert_memory_address (Pmode, op);
1426 #endif
1427 break;
1429 case ZERO_EXTEND:
1430 /* Check for a zero extension of a subreg of a promoted
1431 variable, where the promotion is zero-extended, and the
1432 target mode is the same as the variable's promotion. */
1433 if (GET_CODE (op) == SUBREG
1434 && SUBREG_PROMOTED_VAR_P (op)
1435 && SUBREG_PROMOTED_UNSIGNED_P (op)
1436 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1438 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1439 if (temp)
1440 return temp;
1443 /* Extending a widening multiplication should be canonicalized to
1444 a wider widening multiplication. */
1445 if (GET_CODE (op) == MULT)
1447 rtx lhs = XEXP (op, 0);
1448 rtx rhs = XEXP (op, 1);
1449 enum rtx_code lcode = GET_CODE (lhs);
1450 enum rtx_code rcode = GET_CODE (rhs);
1452 /* Widening multiplies usually extend both operands, but sometimes
1453 they use a shift to extract a portion of a register. */
1454 if ((lcode == ZERO_EXTEND
1455 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1456 && (rcode == ZERO_EXTEND
1457 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1459 machine_mode lmode = GET_MODE (lhs);
1460 machine_mode rmode = GET_MODE (rhs);
1461 int bits;
1463 if (lcode == LSHIFTRT)
1464 /* Number of bits not shifted off the end. */
1465 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1466 else /* lcode == ZERO_EXTEND */
1467 /* Size of inner mode. */
1468 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1470 if (rcode == LSHIFTRT)
1471 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1472 else /* rcode == ZERO_EXTEND */
1473 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1475 /* We can only widen multiplies if the result is mathematiclly
1476 equivalent. I.e. if overflow was impossible. */
1477 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1478 return simplify_gen_binary
1479 (MULT, mode,
1480 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1481 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1485 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1486 if (GET_CODE (op) == ZERO_EXTEND)
1487 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1488 GET_MODE (XEXP (op, 0)));
1490 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1491 is (zero_extend:M (subreg:O <X>)) if there is mode with
1492 GET_MODE_PRECISION (N) - I bits. */
1493 if (GET_CODE (op) == LSHIFTRT
1494 && GET_CODE (XEXP (op, 0)) == ASHIFT
1495 && CONST_INT_P (XEXP (op, 1))
1496 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1497 && GET_MODE_PRECISION (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1499 machine_mode tmode
1500 = mode_for_size (GET_MODE_PRECISION (GET_MODE (op))
1501 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1502 if (tmode != BLKmode)
1504 rtx inner =
1505 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1506 if (inner)
1507 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1511 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1512 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1513 of mode N. E.g.
1514 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1515 (and:SI (reg:SI) (const_int 63)). */
1516 if (GET_CODE (op) == SUBREG
1517 && GET_MODE_PRECISION (GET_MODE (op))
1518 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1519 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1520 <= HOST_BITS_PER_WIDE_INT
1521 && GET_MODE_PRECISION (mode)
1522 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1523 && subreg_lowpart_p (op)
1524 && (nonzero_bits (SUBREG_REG (op), GET_MODE (SUBREG_REG (op)))
1525 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1527 if (GET_MODE_PRECISION (mode)
1528 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
1529 return SUBREG_REG (op);
1530 return simplify_gen_unary (ZERO_EXTEND, mode, SUBREG_REG (op),
1531 GET_MODE (SUBREG_REG (op)));
1534 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1535 /* As we do not know which address space the pointer is referring to,
1536 we can do this only if the target does not support different pointer
1537 or address modes depending on the address space. */
1538 if (target_default_pointer_address_modes_p ()
1539 && POINTERS_EXTEND_UNSIGNED > 0
1540 && mode == Pmode && GET_MODE (op) == ptr_mode
1541 && (CONSTANT_P (op)
1542 || (GET_CODE (op) == SUBREG
1543 && REG_P (SUBREG_REG (op))
1544 && REG_POINTER (SUBREG_REG (op))
1545 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1546 return convert_memory_address (Pmode, op);
1547 #endif
1548 break;
1550 default:
1551 break;
1554 return 0;
1557 /* Try to compute the value of a unary operation CODE whose output mode is to
1558 be MODE with input operand OP whose mode was originally OP_MODE.
1559 Return zero if the value cannot be computed. */
1561 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1562 rtx op, machine_mode op_mode)
1564 unsigned int width = GET_MODE_PRECISION (mode);
1566 if (code == VEC_DUPLICATE)
1568 gcc_assert (VECTOR_MODE_P (mode));
1569 if (GET_MODE (op) != VOIDmode)
1571 if (!VECTOR_MODE_P (GET_MODE (op)))
1572 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1573 else
1574 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1575 (GET_MODE (op)));
1577 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1578 || GET_CODE (op) == CONST_VECTOR)
1580 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1581 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1582 rtvec v = rtvec_alloc (n_elts);
1583 unsigned int i;
1585 if (GET_CODE (op) != CONST_VECTOR)
1586 for (i = 0; i < n_elts; i++)
1587 RTVEC_ELT (v, i) = op;
1588 else
1590 machine_mode inmode = GET_MODE (op);
1591 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1592 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1594 gcc_assert (in_n_elts < n_elts);
1595 gcc_assert ((n_elts % in_n_elts) == 0);
1596 for (i = 0; i < n_elts; i++)
1597 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1599 return gen_rtx_CONST_VECTOR (mode, v);
1603 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1605 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1606 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1607 machine_mode opmode = GET_MODE (op);
1608 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1609 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1610 rtvec v = rtvec_alloc (n_elts);
1611 unsigned int i;
1613 gcc_assert (op_n_elts == n_elts);
1614 for (i = 0; i < n_elts; i++)
1616 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1617 CONST_VECTOR_ELT (op, i),
1618 GET_MODE_INNER (opmode));
1619 if (!x)
1620 return 0;
1621 RTVEC_ELT (v, i) = x;
1623 return gen_rtx_CONST_VECTOR (mode, v);
1626 /* The order of these tests is critical so that, for example, we don't
1627 check the wrong mode (input vs. output) for a conversion operation,
1628 such as FIX. At some point, this should be simplified. */
1630 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1632 REAL_VALUE_TYPE d;
1634 if (op_mode == VOIDmode)
1636 /* CONST_INT have VOIDmode as the mode. We assume that all
1637 the bits of the constant are significant, though, this is
1638 a dangerous assumption as many times CONST_INTs are
1639 created and used with garbage in the bits outside of the
1640 precision of the implied mode of the const_int. */
1641 op_mode = MAX_MODE_INT;
1644 real_from_integer (&d, mode, std::make_pair (op, op_mode), SIGNED);
1645 d = real_value_truncate (mode, d);
1646 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1648 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1650 REAL_VALUE_TYPE d;
1652 if (op_mode == VOIDmode)
1654 /* CONST_INT have VOIDmode as the mode. We assume that all
1655 the bits of the constant are significant, though, this is
1656 a dangerous assumption as many times CONST_INTs are
1657 created and used with garbage in the bits outside of the
1658 precision of the implied mode of the const_int. */
1659 op_mode = MAX_MODE_INT;
1662 real_from_integer (&d, mode, std::make_pair (op, op_mode), UNSIGNED);
1663 d = real_value_truncate (mode, d);
1664 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1667 if (CONST_SCALAR_INT_P (op) && width > 0)
1669 wide_int result;
1670 machine_mode imode = op_mode == VOIDmode ? mode : op_mode;
1671 rtx_mode_t op0 = std::make_pair (op, imode);
1672 int int_value;
1674 #if TARGET_SUPPORTS_WIDE_INT == 0
1675 /* This assert keeps the simplification from producing a result
1676 that cannot be represented in a CONST_DOUBLE but a lot of
1677 upstream callers expect that this function never fails to
1678 simplify something and so you if you added this to the test
1679 above the code would die later anyway. If this assert
1680 happens, you just need to make the port support wide int. */
1681 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1682 #endif
1684 switch (code)
1686 case NOT:
1687 result = wi::bit_not (op0);
1688 break;
1690 case NEG:
1691 result = wi::neg (op0);
1692 break;
1694 case ABS:
1695 result = wi::abs (op0);
1696 break;
1698 case FFS:
1699 result = wi::shwi (wi::ffs (op0), mode);
1700 break;
1702 case CLZ:
1703 if (wi::ne_p (op0, 0))
1704 int_value = wi::clz (op0);
1705 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1706 int_value = GET_MODE_PRECISION (mode);
1707 result = wi::shwi (int_value, mode);
1708 break;
1710 case CLRSB:
1711 result = wi::shwi (wi::clrsb (op0), mode);
1712 break;
1714 case CTZ:
1715 if (wi::ne_p (op0, 0))
1716 int_value = wi::ctz (op0);
1717 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1718 int_value = GET_MODE_PRECISION (mode);
1719 result = wi::shwi (int_value, mode);
1720 break;
1722 case POPCOUNT:
1723 result = wi::shwi (wi::popcount (op0), mode);
1724 break;
1726 case PARITY:
1727 result = wi::shwi (wi::parity (op0), mode);
1728 break;
1730 case BSWAP:
1731 result = wide_int (op0).bswap ();
1732 break;
1734 case TRUNCATE:
1735 case ZERO_EXTEND:
1736 result = wide_int::from (op0, width, UNSIGNED);
1737 break;
1739 case SIGN_EXTEND:
1740 result = wide_int::from (op0, width, SIGNED);
1741 break;
1743 case SQRT:
1744 default:
1745 return 0;
1748 return immed_wide_int_const (result, mode);
1751 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1752 && SCALAR_FLOAT_MODE_P (mode)
1753 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1755 REAL_VALUE_TYPE d;
1756 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1758 switch (code)
1760 case SQRT:
1761 return 0;
1762 case ABS:
1763 d = real_value_abs (&d);
1764 break;
1765 case NEG:
1766 d = real_value_negate (&d);
1767 break;
1768 case FLOAT_TRUNCATE:
1769 d = real_value_truncate (mode, d);
1770 break;
1771 case FLOAT_EXTEND:
1772 /* All this does is change the mode, unless changing
1773 mode class. */
1774 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1775 real_convert (&d, mode, &d);
1776 break;
1777 case FIX:
1778 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1779 break;
1780 case NOT:
1782 long tmp[4];
1783 int i;
1785 real_to_target (tmp, &d, GET_MODE (op));
1786 for (i = 0; i < 4; i++)
1787 tmp[i] = ~tmp[i];
1788 real_from_target (&d, tmp, mode);
1789 break;
1791 default:
1792 gcc_unreachable ();
1794 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1796 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1797 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1798 && GET_MODE_CLASS (mode) == MODE_INT
1799 && width > 0)
1801 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1802 operators are intentionally left unspecified (to ease implementation
1803 by target backends), for consistency, this routine implements the
1804 same semantics for constant folding as used by the middle-end. */
1806 /* This was formerly used only for non-IEEE float.
1807 eggert@twinsun.com says it is safe for IEEE also. */
1808 REAL_VALUE_TYPE x, t;
1809 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1810 wide_int wmax, wmin;
1811 /* This is part of the abi to real_to_integer, but we check
1812 things before making this call. */
1813 bool fail;
1815 switch (code)
1817 case FIX:
1818 if (REAL_VALUE_ISNAN (x))
1819 return const0_rtx;
1821 /* Test against the signed upper bound. */
1822 wmax = wi::max_value (width, SIGNED);
1823 real_from_integer (&t, VOIDmode, wmax, SIGNED);
1824 if (REAL_VALUES_LESS (t, x))
1825 return immed_wide_int_const (wmax, mode);
1827 /* Test against the signed lower bound. */
1828 wmin = wi::min_value (width, SIGNED);
1829 real_from_integer (&t, VOIDmode, wmin, SIGNED);
1830 if (REAL_VALUES_LESS (x, t))
1831 return immed_wide_int_const (wmin, mode);
1833 return immed_wide_int_const (real_to_integer (&x, &fail, width), mode);
1834 break;
1836 case UNSIGNED_FIX:
1837 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1838 return const0_rtx;
1840 /* Test against the unsigned upper bound. */
1841 wmax = wi::max_value (width, UNSIGNED);
1842 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
1843 if (REAL_VALUES_LESS (t, x))
1844 return immed_wide_int_const (wmax, mode);
1846 return immed_wide_int_const (real_to_integer (&x, &fail, width),
1847 mode);
1848 break;
1850 default:
1851 gcc_unreachable ();
1855 return NULL_RTX;
1858 /* Subroutine of simplify_binary_operation to simplify a binary operation
1859 CODE that can commute with byte swapping, with result mode MODE and
1860 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
1861 Return zero if no simplification or canonicalization is possible. */
1863 static rtx
1864 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
1865 rtx op0, rtx op1)
1867 rtx tem;
1869 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
1870 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
1872 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
1873 simplify_gen_unary (BSWAP, mode, op1, mode));
1874 return simplify_gen_unary (BSWAP, mode, tem, mode);
1877 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
1878 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
1880 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
1881 return simplify_gen_unary (BSWAP, mode, tem, mode);
1884 return NULL_RTX;
1887 /* Subroutine of simplify_binary_operation to simplify a commutative,
1888 associative binary operation CODE with result mode MODE, operating
1889 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1890 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1891 canonicalization is possible. */
1893 static rtx
1894 simplify_associative_operation (enum rtx_code code, machine_mode mode,
1895 rtx op0, rtx op1)
1897 rtx tem;
1899 /* Linearize the operator to the left. */
1900 if (GET_CODE (op1) == code)
1902 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1903 if (GET_CODE (op0) == code)
1905 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1906 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1909 /* "a op (b op c)" becomes "(b op c) op a". */
1910 if (! swap_commutative_operands_p (op1, op0))
1911 return simplify_gen_binary (code, mode, op1, op0);
1913 std::swap (op0, op1);
1916 if (GET_CODE (op0) == code)
1918 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1919 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1921 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1922 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1925 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1926 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1927 if (tem != 0)
1928 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1930 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1931 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1932 if (tem != 0)
1933 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1936 return 0;
1940 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1941 and OP1. Return 0 if no simplification is possible.
1943 Don't use this for relational operations such as EQ or LT.
1944 Use simplify_relational_operation instead. */
1946 simplify_binary_operation (enum rtx_code code, machine_mode mode,
1947 rtx op0, rtx op1)
1949 rtx trueop0, trueop1;
1950 rtx tem;
1952 /* Relational operations don't work here. We must know the mode
1953 of the operands in order to do the comparison correctly.
1954 Assuming a full word can give incorrect results.
1955 Consider comparing 128 with -128 in QImode. */
1956 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1957 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1959 /* Make sure the constant is second. */
1960 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1961 && swap_commutative_operands_p (op0, op1))
1962 std::swap (op0, op1);
1964 trueop0 = avoid_constant_pool_reference (op0);
1965 trueop1 = avoid_constant_pool_reference (op1);
1967 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1968 if (tem)
1969 return tem;
1970 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1973 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1974 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1975 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1976 actual constants. */
1978 static rtx
1979 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
1980 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1982 rtx tem, reversed, opleft, opright;
1983 HOST_WIDE_INT val;
1984 unsigned int width = GET_MODE_PRECISION (mode);
1986 /* Even if we can't compute a constant result,
1987 there are some cases worth simplifying. */
1989 switch (code)
1991 case PLUS:
1992 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1993 when x is NaN, infinite, or finite and nonzero. They aren't
1994 when x is -0 and the rounding mode is not towards -infinity,
1995 since (-0) + 0 is then 0. */
1996 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1997 return op0;
1999 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2000 transformations are safe even for IEEE. */
2001 if (GET_CODE (op0) == NEG)
2002 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2003 else if (GET_CODE (op1) == NEG)
2004 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2006 /* (~a) + 1 -> -a */
2007 if (INTEGRAL_MODE_P (mode)
2008 && GET_CODE (op0) == NOT
2009 && trueop1 == const1_rtx)
2010 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2012 /* Handle both-operands-constant cases. We can only add
2013 CONST_INTs to constants since the sum of relocatable symbols
2014 can't be handled by most assemblers. Don't add CONST_INT
2015 to CONST_INT since overflow won't be computed properly if wider
2016 than HOST_BITS_PER_WIDE_INT. */
2018 if ((GET_CODE (op0) == CONST
2019 || GET_CODE (op0) == SYMBOL_REF
2020 || GET_CODE (op0) == LABEL_REF)
2021 && CONST_INT_P (op1))
2022 return plus_constant (mode, op0, INTVAL (op1));
2023 else if ((GET_CODE (op1) == CONST
2024 || GET_CODE (op1) == SYMBOL_REF
2025 || GET_CODE (op1) == LABEL_REF)
2026 && CONST_INT_P (op0))
2027 return plus_constant (mode, op1, INTVAL (op0));
2029 /* See if this is something like X * C - X or vice versa or
2030 if the multiplication is written as a shift. If so, we can
2031 distribute and make a new multiply, shift, or maybe just
2032 have X (if C is 2 in the example above). But don't make
2033 something more expensive than we had before. */
2035 if (SCALAR_INT_MODE_P (mode))
2037 rtx lhs = op0, rhs = op1;
2039 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2040 wide_int coeff1 = wi::one (GET_MODE_PRECISION (mode));
2042 if (GET_CODE (lhs) == NEG)
2044 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2045 lhs = XEXP (lhs, 0);
2047 else if (GET_CODE (lhs) == MULT
2048 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2050 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2051 lhs = XEXP (lhs, 0);
2053 else if (GET_CODE (lhs) == ASHIFT
2054 && CONST_INT_P (XEXP (lhs, 1))
2055 && INTVAL (XEXP (lhs, 1)) >= 0
2056 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2058 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2059 GET_MODE_PRECISION (mode));
2060 lhs = XEXP (lhs, 0);
2063 if (GET_CODE (rhs) == NEG)
2065 coeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2066 rhs = XEXP (rhs, 0);
2068 else if (GET_CODE (rhs) == MULT
2069 && CONST_INT_P (XEXP (rhs, 1)))
2071 coeff1 = std::make_pair (XEXP (rhs, 1), mode);
2072 rhs = XEXP (rhs, 0);
2074 else if (GET_CODE (rhs) == ASHIFT
2075 && CONST_INT_P (XEXP (rhs, 1))
2076 && INTVAL (XEXP (rhs, 1)) >= 0
2077 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2079 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2080 GET_MODE_PRECISION (mode));
2081 rhs = XEXP (rhs, 0);
2084 if (rtx_equal_p (lhs, rhs))
2086 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2087 rtx coeff;
2088 bool speed = optimize_function_for_speed_p (cfun);
2090 coeff = immed_wide_int_const (coeff0 + coeff1, mode);
2092 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2093 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2094 ? tem : 0;
2098 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2099 if (CONST_SCALAR_INT_P (op1)
2100 && GET_CODE (op0) == XOR
2101 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2102 && mode_signbit_p (mode, op1))
2103 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2104 simplify_gen_binary (XOR, mode, op1,
2105 XEXP (op0, 1)));
2107 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2108 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2109 && GET_CODE (op0) == MULT
2110 && GET_CODE (XEXP (op0, 0)) == NEG)
2112 rtx in1, in2;
2114 in1 = XEXP (XEXP (op0, 0), 0);
2115 in2 = XEXP (op0, 1);
2116 return simplify_gen_binary (MINUS, mode, op1,
2117 simplify_gen_binary (MULT, mode,
2118 in1, in2));
2121 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2122 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2123 is 1. */
2124 if (COMPARISON_P (op0)
2125 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2126 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2127 && (reversed = reversed_comparison (op0, mode)))
2128 return
2129 simplify_gen_unary (NEG, mode, reversed, mode);
2131 /* If one of the operands is a PLUS or a MINUS, see if we can
2132 simplify this by the associative law.
2133 Don't use the associative law for floating point.
2134 The inaccuracy makes it nonassociative,
2135 and subtle programs can break if operations are associated. */
2137 if (INTEGRAL_MODE_P (mode)
2138 && (plus_minus_operand_p (op0)
2139 || plus_minus_operand_p (op1))
2140 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2141 return tem;
2143 /* Reassociate floating point addition only when the user
2144 specifies associative math operations. */
2145 if (FLOAT_MODE_P (mode)
2146 && flag_associative_math)
2148 tem = simplify_associative_operation (code, mode, op0, op1);
2149 if (tem)
2150 return tem;
2152 break;
2154 case COMPARE:
2155 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2156 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2157 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2158 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2160 rtx xop00 = XEXP (op0, 0);
2161 rtx xop10 = XEXP (op1, 0);
2163 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2164 return xop00;
2166 if (REG_P (xop00) && REG_P (xop10)
2167 && GET_MODE (xop00) == GET_MODE (xop10)
2168 && REGNO (xop00) == REGNO (xop10)
2169 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2170 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2171 return xop00;
2173 break;
2175 case MINUS:
2176 /* We can't assume x-x is 0 even with non-IEEE floating point,
2177 but since it is zero except in very strange circumstances, we
2178 will treat it as zero with -ffinite-math-only. */
2179 if (rtx_equal_p (trueop0, trueop1)
2180 && ! side_effects_p (op0)
2181 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2182 return CONST0_RTX (mode);
2184 /* Change subtraction from zero into negation. (0 - x) is the
2185 same as -x when x is NaN, infinite, or finite and nonzero.
2186 But if the mode has signed zeros, and does not round towards
2187 -infinity, then 0 - 0 is 0, not -0. */
2188 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2189 return simplify_gen_unary (NEG, mode, op1, mode);
2191 /* (-1 - a) is ~a. */
2192 if (trueop0 == constm1_rtx)
2193 return simplify_gen_unary (NOT, mode, op1, mode);
2195 /* Subtracting 0 has no effect unless the mode has signed zeros
2196 and supports rounding towards -infinity. In such a case,
2197 0 - 0 is -0. */
2198 if (!(HONOR_SIGNED_ZEROS (mode)
2199 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2200 && trueop1 == CONST0_RTX (mode))
2201 return op0;
2203 /* See if this is something like X * C - X or vice versa or
2204 if the multiplication is written as a shift. If so, we can
2205 distribute and make a new multiply, shift, or maybe just
2206 have X (if C is 2 in the example above). But don't make
2207 something more expensive than we had before. */
2209 if (SCALAR_INT_MODE_P (mode))
2211 rtx lhs = op0, rhs = op1;
2213 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2214 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2216 if (GET_CODE (lhs) == NEG)
2218 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2219 lhs = XEXP (lhs, 0);
2221 else if (GET_CODE (lhs) == MULT
2222 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2224 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2225 lhs = XEXP (lhs, 0);
2227 else if (GET_CODE (lhs) == ASHIFT
2228 && CONST_INT_P (XEXP (lhs, 1))
2229 && INTVAL (XEXP (lhs, 1)) >= 0
2230 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2232 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2233 GET_MODE_PRECISION (mode));
2234 lhs = XEXP (lhs, 0);
2237 if (GET_CODE (rhs) == NEG)
2239 negcoeff1 = wi::one (GET_MODE_PRECISION (mode));
2240 rhs = XEXP (rhs, 0);
2242 else if (GET_CODE (rhs) == MULT
2243 && CONST_INT_P (XEXP (rhs, 1)))
2245 negcoeff1 = wi::neg (std::make_pair (XEXP (rhs, 1), mode));
2246 rhs = XEXP (rhs, 0);
2248 else if (GET_CODE (rhs) == ASHIFT
2249 && CONST_INT_P (XEXP (rhs, 1))
2250 && INTVAL (XEXP (rhs, 1)) >= 0
2251 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2253 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2254 GET_MODE_PRECISION (mode));
2255 negcoeff1 = -negcoeff1;
2256 rhs = XEXP (rhs, 0);
2259 if (rtx_equal_p (lhs, rhs))
2261 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2262 rtx coeff;
2263 bool speed = optimize_function_for_speed_p (cfun);
2265 coeff = immed_wide_int_const (coeff0 + negcoeff1, mode);
2267 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2268 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2269 ? tem : 0;
2273 /* (a - (-b)) -> (a + b). True even for IEEE. */
2274 if (GET_CODE (op1) == NEG)
2275 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2277 /* (-x - c) may be simplified as (-c - x). */
2278 if (GET_CODE (op0) == NEG
2279 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2281 tem = simplify_unary_operation (NEG, mode, op1, mode);
2282 if (tem)
2283 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2286 /* Don't let a relocatable value get a negative coeff. */
2287 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2288 return simplify_gen_binary (PLUS, mode,
2289 op0,
2290 neg_const_int (mode, op1));
2292 /* (x - (x & y)) -> (x & ~y) */
2293 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2295 if (rtx_equal_p (op0, XEXP (op1, 0)))
2297 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2298 GET_MODE (XEXP (op1, 1)));
2299 return simplify_gen_binary (AND, mode, op0, tem);
2301 if (rtx_equal_p (op0, XEXP (op1, 1)))
2303 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2304 GET_MODE (XEXP (op1, 0)));
2305 return simplify_gen_binary (AND, mode, op0, tem);
2309 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2310 by reversing the comparison code if valid. */
2311 if (STORE_FLAG_VALUE == 1
2312 && trueop0 == const1_rtx
2313 && COMPARISON_P (op1)
2314 && (reversed = reversed_comparison (op1, mode)))
2315 return reversed;
2317 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2318 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2319 && GET_CODE (op1) == MULT
2320 && GET_CODE (XEXP (op1, 0)) == NEG)
2322 rtx in1, in2;
2324 in1 = XEXP (XEXP (op1, 0), 0);
2325 in2 = XEXP (op1, 1);
2326 return simplify_gen_binary (PLUS, mode,
2327 simplify_gen_binary (MULT, mode,
2328 in1, in2),
2329 op0);
2332 /* Canonicalize (minus (neg A) (mult B C)) to
2333 (minus (mult (neg B) C) A). */
2334 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2335 && GET_CODE (op1) == MULT
2336 && GET_CODE (op0) == NEG)
2338 rtx in1, in2;
2340 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2341 in2 = XEXP (op1, 1);
2342 return simplify_gen_binary (MINUS, mode,
2343 simplify_gen_binary (MULT, mode,
2344 in1, in2),
2345 XEXP (op0, 0));
2348 /* If one of the operands is a PLUS or a MINUS, see if we can
2349 simplify this by the associative law. This will, for example,
2350 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2351 Don't use the associative law for floating point.
2352 The inaccuracy makes it nonassociative,
2353 and subtle programs can break if operations are associated. */
2355 if (INTEGRAL_MODE_P (mode)
2356 && (plus_minus_operand_p (op0)
2357 || plus_minus_operand_p (op1))
2358 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2359 return tem;
2360 break;
2362 case MULT:
2363 if (trueop1 == constm1_rtx)
2364 return simplify_gen_unary (NEG, mode, op0, mode);
2366 if (GET_CODE (op0) == NEG)
2368 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2369 /* If op1 is a MULT as well and simplify_unary_operation
2370 just moved the NEG to the second operand, simplify_gen_binary
2371 below could through simplify_associative_operation move
2372 the NEG around again and recurse endlessly. */
2373 if (temp
2374 && GET_CODE (op1) == MULT
2375 && GET_CODE (temp) == MULT
2376 && XEXP (op1, 0) == XEXP (temp, 0)
2377 && GET_CODE (XEXP (temp, 1)) == NEG
2378 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2379 temp = NULL_RTX;
2380 if (temp)
2381 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2383 if (GET_CODE (op1) == NEG)
2385 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2386 /* If op0 is a MULT as well and simplify_unary_operation
2387 just moved the NEG to the second operand, simplify_gen_binary
2388 below could through simplify_associative_operation move
2389 the NEG around again and recurse endlessly. */
2390 if (temp
2391 && GET_CODE (op0) == MULT
2392 && GET_CODE (temp) == MULT
2393 && XEXP (op0, 0) == XEXP (temp, 0)
2394 && GET_CODE (XEXP (temp, 1)) == NEG
2395 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2396 temp = NULL_RTX;
2397 if (temp)
2398 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2401 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2402 x is NaN, since x * 0 is then also NaN. Nor is it valid
2403 when the mode has signed zeros, since multiplying a negative
2404 number by 0 will give -0, not 0. */
2405 if (!HONOR_NANS (mode)
2406 && !HONOR_SIGNED_ZEROS (mode)
2407 && trueop1 == CONST0_RTX (mode)
2408 && ! side_effects_p (op0))
2409 return op1;
2411 /* In IEEE floating point, x*1 is not equivalent to x for
2412 signalling NaNs. */
2413 if (!HONOR_SNANS (mode)
2414 && trueop1 == CONST1_RTX (mode))
2415 return op0;
2417 /* Convert multiply by constant power of two into shift. */
2418 if (CONST_SCALAR_INT_P (trueop1))
2420 val = wi::exact_log2 (std::make_pair (trueop1, mode));
2421 if (val >= 0)
2422 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2425 /* x*2 is x+x and x*(-1) is -x */
2426 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2427 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2428 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2429 && GET_MODE (op0) == mode)
2431 REAL_VALUE_TYPE d;
2432 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2434 if (REAL_VALUES_EQUAL (d, dconst2))
2435 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2437 if (!HONOR_SNANS (mode)
2438 && REAL_VALUES_EQUAL (d, dconstm1))
2439 return simplify_gen_unary (NEG, mode, op0, mode);
2442 /* Optimize -x * -x as x * x. */
2443 if (FLOAT_MODE_P (mode)
2444 && GET_CODE (op0) == NEG
2445 && GET_CODE (op1) == NEG
2446 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2447 && !side_effects_p (XEXP (op0, 0)))
2448 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2450 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2451 if (SCALAR_FLOAT_MODE_P (mode)
2452 && GET_CODE (op0) == ABS
2453 && GET_CODE (op1) == ABS
2454 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2455 && !side_effects_p (XEXP (op0, 0)))
2456 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2458 /* Reassociate multiplication, but for floating point MULTs
2459 only when the user specifies unsafe math optimizations. */
2460 if (! FLOAT_MODE_P (mode)
2461 || flag_unsafe_math_optimizations)
2463 tem = simplify_associative_operation (code, mode, op0, op1);
2464 if (tem)
2465 return tem;
2467 break;
2469 case IOR:
2470 if (trueop1 == CONST0_RTX (mode))
2471 return op0;
2472 if (INTEGRAL_MODE_P (mode)
2473 && trueop1 == CONSTM1_RTX (mode)
2474 && !side_effects_p (op0))
2475 return op1;
2476 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2477 return op0;
2478 /* A | (~A) -> -1 */
2479 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2480 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2481 && ! side_effects_p (op0)
2482 && SCALAR_INT_MODE_P (mode))
2483 return constm1_rtx;
2485 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2486 if (CONST_INT_P (op1)
2487 && HWI_COMPUTABLE_MODE_P (mode)
2488 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2489 && !side_effects_p (op0))
2490 return op1;
2492 /* Canonicalize (X & C1) | C2. */
2493 if (GET_CODE (op0) == AND
2494 && CONST_INT_P (trueop1)
2495 && CONST_INT_P (XEXP (op0, 1)))
2497 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2498 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2499 HOST_WIDE_INT c2 = INTVAL (trueop1);
2501 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2502 if ((c1 & c2) == c1
2503 && !side_effects_p (XEXP (op0, 0)))
2504 return trueop1;
2506 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2507 if (((c1|c2) & mask) == mask)
2508 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2510 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2511 if (((c1 & ~c2) & mask) != (c1 & mask))
2513 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2514 gen_int_mode (c1 & ~c2, mode));
2515 return simplify_gen_binary (IOR, mode, tem, op1);
2519 /* Convert (A & B) | A to A. */
2520 if (GET_CODE (op0) == AND
2521 && (rtx_equal_p (XEXP (op0, 0), op1)
2522 || rtx_equal_p (XEXP (op0, 1), op1))
2523 && ! side_effects_p (XEXP (op0, 0))
2524 && ! side_effects_p (XEXP (op0, 1)))
2525 return op1;
2527 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2528 mode size to (rotate A CX). */
2530 if (GET_CODE (op1) == ASHIFT
2531 || GET_CODE (op1) == SUBREG)
2533 opleft = op1;
2534 opright = op0;
2536 else
2538 opright = op1;
2539 opleft = op0;
2542 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2543 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2544 && CONST_INT_P (XEXP (opleft, 1))
2545 && CONST_INT_P (XEXP (opright, 1))
2546 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2547 == GET_MODE_PRECISION (mode)))
2548 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2550 /* Same, but for ashift that has been "simplified" to a wider mode
2551 by simplify_shift_const. */
2553 if (GET_CODE (opleft) == SUBREG
2554 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2555 && GET_CODE (opright) == LSHIFTRT
2556 && GET_CODE (XEXP (opright, 0)) == SUBREG
2557 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2558 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2559 && (GET_MODE_SIZE (GET_MODE (opleft))
2560 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2561 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2562 SUBREG_REG (XEXP (opright, 0)))
2563 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2564 && CONST_INT_P (XEXP (opright, 1))
2565 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2566 == GET_MODE_PRECISION (mode)))
2567 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2568 XEXP (SUBREG_REG (opleft), 1));
2570 /* If we have (ior (and (X C1) C2)), simplify this by making
2571 C1 as small as possible if C1 actually changes. */
2572 if (CONST_INT_P (op1)
2573 && (HWI_COMPUTABLE_MODE_P (mode)
2574 || INTVAL (op1) > 0)
2575 && GET_CODE (op0) == AND
2576 && CONST_INT_P (XEXP (op0, 1))
2577 && CONST_INT_P (op1)
2578 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2580 rtx tmp = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2581 gen_int_mode (UINTVAL (XEXP (op0, 1))
2582 & ~UINTVAL (op1),
2583 mode));
2584 return simplify_gen_binary (IOR, mode, tmp, op1);
2587 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2588 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2589 the PLUS does not affect any of the bits in OP1: then we can do
2590 the IOR as a PLUS and we can associate. This is valid if OP1
2591 can be safely shifted left C bits. */
2592 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2593 && GET_CODE (XEXP (op0, 0)) == PLUS
2594 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2595 && CONST_INT_P (XEXP (op0, 1))
2596 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2598 int count = INTVAL (XEXP (op0, 1));
2599 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2601 if (mask >> count == INTVAL (trueop1)
2602 && trunc_int_for_mode (mask, mode) == mask
2603 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2604 return simplify_gen_binary (ASHIFTRT, mode,
2605 plus_constant (mode, XEXP (op0, 0),
2606 mask),
2607 XEXP (op0, 1));
2610 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2611 if (tem)
2612 return tem;
2614 tem = simplify_associative_operation (code, mode, op0, op1);
2615 if (tem)
2616 return tem;
2617 break;
2619 case XOR:
2620 if (trueop1 == CONST0_RTX (mode))
2621 return op0;
2622 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2623 return simplify_gen_unary (NOT, mode, op0, mode);
2624 if (rtx_equal_p (trueop0, trueop1)
2625 && ! side_effects_p (op0)
2626 && GET_MODE_CLASS (mode) != MODE_CC)
2627 return CONST0_RTX (mode);
2629 /* Canonicalize XOR of the most significant bit to PLUS. */
2630 if (CONST_SCALAR_INT_P (op1)
2631 && mode_signbit_p (mode, op1))
2632 return simplify_gen_binary (PLUS, mode, op0, op1);
2633 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2634 if (CONST_SCALAR_INT_P (op1)
2635 && GET_CODE (op0) == PLUS
2636 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2637 && mode_signbit_p (mode, XEXP (op0, 1)))
2638 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2639 simplify_gen_binary (XOR, mode, op1,
2640 XEXP (op0, 1)));
2642 /* If we are XORing two things that have no bits in common,
2643 convert them into an IOR. This helps to detect rotation encoded
2644 using those methods and possibly other simplifications. */
2646 if (HWI_COMPUTABLE_MODE_P (mode)
2647 && (nonzero_bits (op0, mode)
2648 & nonzero_bits (op1, mode)) == 0)
2649 return (simplify_gen_binary (IOR, mode, op0, op1));
2651 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2652 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2653 (NOT y). */
2655 int num_negated = 0;
2657 if (GET_CODE (op0) == NOT)
2658 num_negated++, op0 = XEXP (op0, 0);
2659 if (GET_CODE (op1) == NOT)
2660 num_negated++, op1 = XEXP (op1, 0);
2662 if (num_negated == 2)
2663 return simplify_gen_binary (XOR, mode, op0, op1);
2664 else if (num_negated == 1)
2665 return simplify_gen_unary (NOT, mode,
2666 simplify_gen_binary (XOR, mode, op0, op1),
2667 mode);
2670 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2671 correspond to a machine insn or result in further simplifications
2672 if B is a constant. */
2674 if (GET_CODE (op0) == AND
2675 && rtx_equal_p (XEXP (op0, 1), op1)
2676 && ! side_effects_p (op1))
2677 return simplify_gen_binary (AND, mode,
2678 simplify_gen_unary (NOT, mode,
2679 XEXP (op0, 0), mode),
2680 op1);
2682 else if (GET_CODE (op0) == AND
2683 && rtx_equal_p (XEXP (op0, 0), op1)
2684 && ! side_effects_p (op1))
2685 return simplify_gen_binary (AND, mode,
2686 simplify_gen_unary (NOT, mode,
2687 XEXP (op0, 1), mode),
2688 op1);
2690 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2691 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2692 out bits inverted twice and not set by C. Similarly, given
2693 (xor (and (xor A B) C) D), simplify without inverting C in
2694 the xor operand: (xor (and A C) (B&C)^D).
2696 else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
2697 && GET_CODE (XEXP (op0, 0)) == XOR
2698 && CONST_INT_P (op1)
2699 && CONST_INT_P (XEXP (op0, 1))
2700 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
2702 enum rtx_code op = GET_CODE (op0);
2703 rtx a = XEXP (XEXP (op0, 0), 0);
2704 rtx b = XEXP (XEXP (op0, 0), 1);
2705 rtx c = XEXP (op0, 1);
2706 rtx d = op1;
2707 HOST_WIDE_INT bval = INTVAL (b);
2708 HOST_WIDE_INT cval = INTVAL (c);
2709 HOST_WIDE_INT dval = INTVAL (d);
2710 HOST_WIDE_INT xcval;
2712 if (op == IOR)
2713 xcval = ~cval;
2714 else
2715 xcval = cval;
2717 return simplify_gen_binary (XOR, mode,
2718 simplify_gen_binary (op, mode, a, c),
2719 gen_int_mode ((bval & xcval) ^ dval,
2720 mode));
2723 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2724 we can transform like this:
2725 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2726 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2727 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2728 Attempt a few simplifications when B and C are both constants. */
2729 if (GET_CODE (op0) == AND
2730 && CONST_INT_P (op1)
2731 && CONST_INT_P (XEXP (op0, 1)))
2733 rtx a = XEXP (op0, 0);
2734 rtx b = XEXP (op0, 1);
2735 rtx c = op1;
2736 HOST_WIDE_INT bval = INTVAL (b);
2737 HOST_WIDE_INT cval = INTVAL (c);
2739 /* Instead of computing ~A&C, we compute its negated value,
2740 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2741 optimize for sure. If it does not simplify, we still try
2742 to compute ~A&C below, but since that always allocates
2743 RTL, we don't try that before committing to returning a
2744 simplified expression. */
2745 rtx n_na_c = simplify_binary_operation (IOR, mode, a,
2746 GEN_INT (~cval));
2748 if ((~cval & bval) == 0)
2750 rtx na_c = NULL_RTX;
2751 if (n_na_c)
2752 na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
2753 else
2755 /* If ~A does not simplify, don't bother: we don't
2756 want to simplify 2 operations into 3, and if na_c
2757 were to simplify with na, n_na_c would have
2758 simplified as well. */
2759 rtx na = simplify_unary_operation (NOT, mode, a, mode);
2760 if (na)
2761 na_c = simplify_gen_binary (AND, mode, na, c);
2764 /* Try to simplify ~A&C | ~B&C. */
2765 if (na_c != NULL_RTX)
2766 return simplify_gen_binary (IOR, mode, na_c,
2767 gen_int_mode (~bval & cval, mode));
2769 else
2771 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2772 if (n_na_c == CONSTM1_RTX (mode))
2774 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2775 gen_int_mode (~cval & bval,
2776 mode));
2777 return simplify_gen_binary (IOR, mode, a_nc_b,
2778 gen_int_mode (~bval & cval,
2779 mode));
2784 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2785 comparison if STORE_FLAG_VALUE is 1. */
2786 if (STORE_FLAG_VALUE == 1
2787 && trueop1 == const1_rtx
2788 && COMPARISON_P (op0)
2789 && (reversed = reversed_comparison (op0, mode)))
2790 return reversed;
2792 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2793 is (lt foo (const_int 0)), so we can perform the above
2794 simplification if STORE_FLAG_VALUE is 1. */
2796 if (STORE_FLAG_VALUE == 1
2797 && trueop1 == const1_rtx
2798 && GET_CODE (op0) == LSHIFTRT
2799 && CONST_INT_P (XEXP (op0, 1))
2800 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2801 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2803 /* (xor (comparison foo bar) (const_int sign-bit))
2804 when STORE_FLAG_VALUE is the sign bit. */
2805 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2806 && trueop1 == const_true_rtx
2807 && COMPARISON_P (op0)
2808 && (reversed = reversed_comparison (op0, mode)))
2809 return reversed;
2811 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2812 if (tem)
2813 return tem;
2815 tem = simplify_associative_operation (code, mode, op0, op1);
2816 if (tem)
2817 return tem;
2818 break;
2820 case AND:
2821 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2822 return trueop1;
2823 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2824 return op0;
2825 if (HWI_COMPUTABLE_MODE_P (mode))
2827 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2828 HOST_WIDE_INT nzop1;
2829 if (CONST_INT_P (trueop1))
2831 HOST_WIDE_INT val1 = INTVAL (trueop1);
2832 /* If we are turning off bits already known off in OP0, we need
2833 not do an AND. */
2834 if ((nzop0 & ~val1) == 0)
2835 return op0;
2837 nzop1 = nonzero_bits (trueop1, mode);
2838 /* If we are clearing all the nonzero bits, the result is zero. */
2839 if ((nzop1 & nzop0) == 0
2840 && !side_effects_p (op0) && !side_effects_p (op1))
2841 return CONST0_RTX (mode);
2843 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2844 && GET_MODE_CLASS (mode) != MODE_CC)
2845 return op0;
2846 /* A & (~A) -> 0 */
2847 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2848 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2849 && ! side_effects_p (op0)
2850 && GET_MODE_CLASS (mode) != MODE_CC)
2851 return CONST0_RTX (mode);
2853 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2854 there are no nonzero bits of C outside of X's mode. */
2855 if ((GET_CODE (op0) == SIGN_EXTEND
2856 || GET_CODE (op0) == ZERO_EXTEND)
2857 && CONST_INT_P (trueop1)
2858 && HWI_COMPUTABLE_MODE_P (mode)
2859 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2860 & UINTVAL (trueop1)) == 0)
2862 machine_mode imode = GET_MODE (XEXP (op0, 0));
2863 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2864 gen_int_mode (INTVAL (trueop1),
2865 imode));
2866 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2869 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2870 we might be able to further simplify the AND with X and potentially
2871 remove the truncation altogether. */
2872 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2874 rtx x = XEXP (op0, 0);
2875 machine_mode xmode = GET_MODE (x);
2876 tem = simplify_gen_binary (AND, xmode, x,
2877 gen_int_mode (INTVAL (trueop1), xmode));
2878 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2881 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2882 if (GET_CODE (op0) == IOR
2883 && CONST_INT_P (trueop1)
2884 && CONST_INT_P (XEXP (op0, 1)))
2886 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2887 return simplify_gen_binary (IOR, mode,
2888 simplify_gen_binary (AND, mode,
2889 XEXP (op0, 0), op1),
2890 gen_int_mode (tmp, mode));
2893 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2894 insn (and may simplify more). */
2895 if (GET_CODE (op0) == XOR
2896 && rtx_equal_p (XEXP (op0, 0), op1)
2897 && ! side_effects_p (op1))
2898 return simplify_gen_binary (AND, mode,
2899 simplify_gen_unary (NOT, mode,
2900 XEXP (op0, 1), mode),
2901 op1);
2903 if (GET_CODE (op0) == XOR
2904 && rtx_equal_p (XEXP (op0, 1), op1)
2905 && ! side_effects_p (op1))
2906 return simplify_gen_binary (AND, mode,
2907 simplify_gen_unary (NOT, mode,
2908 XEXP (op0, 0), mode),
2909 op1);
2911 /* Similarly for (~(A ^ B)) & A. */
2912 if (GET_CODE (op0) == NOT
2913 && GET_CODE (XEXP (op0, 0)) == XOR
2914 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2915 && ! side_effects_p (op1))
2916 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2918 if (GET_CODE (op0) == NOT
2919 && GET_CODE (XEXP (op0, 0)) == XOR
2920 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2921 && ! side_effects_p (op1))
2922 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2924 /* Convert (A | B) & A to A. */
2925 if (GET_CODE (op0) == IOR
2926 && (rtx_equal_p (XEXP (op0, 0), op1)
2927 || rtx_equal_p (XEXP (op0, 1), op1))
2928 && ! side_effects_p (XEXP (op0, 0))
2929 && ! side_effects_p (XEXP (op0, 1)))
2930 return op1;
2932 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2933 ((A & N) + B) & M -> (A + B) & M
2934 Similarly if (N & M) == 0,
2935 ((A | N) + B) & M -> (A + B) & M
2936 and for - instead of + and/or ^ instead of |.
2937 Also, if (N & M) == 0, then
2938 (A +- N) & M -> A & M. */
2939 if (CONST_INT_P (trueop1)
2940 && HWI_COMPUTABLE_MODE_P (mode)
2941 && ~UINTVAL (trueop1)
2942 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
2943 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2945 rtx pmop[2];
2946 int which;
2948 pmop[0] = XEXP (op0, 0);
2949 pmop[1] = XEXP (op0, 1);
2951 if (CONST_INT_P (pmop[1])
2952 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
2953 return simplify_gen_binary (AND, mode, pmop[0], op1);
2955 for (which = 0; which < 2; which++)
2957 tem = pmop[which];
2958 switch (GET_CODE (tem))
2960 case AND:
2961 if (CONST_INT_P (XEXP (tem, 1))
2962 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
2963 == UINTVAL (trueop1))
2964 pmop[which] = XEXP (tem, 0);
2965 break;
2966 case IOR:
2967 case XOR:
2968 if (CONST_INT_P (XEXP (tem, 1))
2969 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
2970 pmop[which] = XEXP (tem, 0);
2971 break;
2972 default:
2973 break;
2977 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2979 tem = simplify_gen_binary (GET_CODE (op0), mode,
2980 pmop[0], pmop[1]);
2981 return simplify_gen_binary (code, mode, tem, op1);
2985 /* (and X (ior (not X) Y) -> (and X Y) */
2986 if (GET_CODE (op1) == IOR
2987 && GET_CODE (XEXP (op1, 0)) == NOT
2988 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
2989 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2991 /* (and (ior (not X) Y) X) -> (and X Y) */
2992 if (GET_CODE (op0) == IOR
2993 && GET_CODE (XEXP (op0, 0)) == NOT
2994 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
2995 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2997 /* (and X (ior Y (not X)) -> (and X Y) */
2998 if (GET_CODE (op1) == IOR
2999 && GET_CODE (XEXP (op1, 1)) == NOT
3000 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3001 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3003 /* (and (ior Y (not X)) X) -> (and X Y) */
3004 if (GET_CODE (op0) == IOR
3005 && GET_CODE (XEXP (op0, 1)) == NOT
3006 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3007 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3009 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3010 if (tem)
3011 return tem;
3013 tem = simplify_associative_operation (code, mode, op0, op1);
3014 if (tem)
3015 return tem;
3016 break;
3018 case UDIV:
3019 /* 0/x is 0 (or x&0 if x has side-effects). */
3020 if (trueop0 == CONST0_RTX (mode))
3022 if (side_effects_p (op1))
3023 return simplify_gen_binary (AND, mode, op1, trueop0);
3024 return trueop0;
3026 /* x/1 is x. */
3027 if (trueop1 == CONST1_RTX (mode))
3029 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3030 if (tem)
3031 return tem;
3033 /* Convert divide by power of two into shift. */
3034 if (CONST_INT_P (trueop1)
3035 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3036 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3037 break;
3039 case DIV:
3040 /* Handle floating point and integers separately. */
3041 if (SCALAR_FLOAT_MODE_P (mode))
3043 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3044 safe for modes with NaNs, since 0.0 / 0.0 will then be
3045 NaN rather than 0.0. Nor is it safe for modes with signed
3046 zeros, since dividing 0 by a negative number gives -0.0 */
3047 if (trueop0 == CONST0_RTX (mode)
3048 && !HONOR_NANS (mode)
3049 && !HONOR_SIGNED_ZEROS (mode)
3050 && ! side_effects_p (op1))
3051 return op0;
3052 /* x/1.0 is x. */
3053 if (trueop1 == CONST1_RTX (mode)
3054 && !HONOR_SNANS (mode))
3055 return op0;
3057 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3058 && trueop1 != CONST0_RTX (mode))
3060 REAL_VALUE_TYPE d;
3061 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
3063 /* x/-1.0 is -x. */
3064 if (REAL_VALUES_EQUAL (d, dconstm1)
3065 && !HONOR_SNANS (mode))
3066 return simplify_gen_unary (NEG, mode, op0, mode);
3068 /* Change FP division by a constant into multiplication.
3069 Only do this with -freciprocal-math. */
3070 if (flag_reciprocal_math
3071 && !REAL_VALUES_EQUAL (d, dconst0))
3073 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
3074 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3075 return simplify_gen_binary (MULT, mode, op0, tem);
3079 else if (SCALAR_INT_MODE_P (mode))
3081 /* 0/x is 0 (or x&0 if x has side-effects). */
3082 if (trueop0 == CONST0_RTX (mode)
3083 && !cfun->can_throw_non_call_exceptions)
3085 if (side_effects_p (op1))
3086 return simplify_gen_binary (AND, mode, op1, trueop0);
3087 return trueop0;
3089 /* x/1 is x. */
3090 if (trueop1 == CONST1_RTX (mode))
3092 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3093 if (tem)
3094 return tem;
3096 /* x/-1 is -x. */
3097 if (trueop1 == constm1_rtx)
3099 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3100 if (x)
3101 return simplify_gen_unary (NEG, mode, x, mode);
3104 break;
3106 case UMOD:
3107 /* 0%x is 0 (or x&0 if x has side-effects). */
3108 if (trueop0 == CONST0_RTX (mode))
3110 if (side_effects_p (op1))
3111 return simplify_gen_binary (AND, mode, op1, trueop0);
3112 return trueop0;
3114 /* x%1 is 0 (of x&0 if x has side-effects). */
3115 if (trueop1 == CONST1_RTX (mode))
3117 if (side_effects_p (op0))
3118 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3119 return CONST0_RTX (mode);
3121 /* Implement modulus by power of two as AND. */
3122 if (CONST_INT_P (trueop1)
3123 && exact_log2 (UINTVAL (trueop1)) > 0)
3124 return simplify_gen_binary (AND, mode, op0,
3125 gen_int_mode (INTVAL (op1) - 1, mode));
3126 break;
3128 case MOD:
3129 /* 0%x is 0 (or x&0 if x has side-effects). */
3130 if (trueop0 == CONST0_RTX (mode))
3132 if (side_effects_p (op1))
3133 return simplify_gen_binary (AND, mode, op1, trueop0);
3134 return trueop0;
3136 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3137 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3139 if (side_effects_p (op0))
3140 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3141 return CONST0_RTX (mode);
3143 break;
3145 case ROTATERT:
3146 case ROTATE:
3147 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3148 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3149 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3150 amount instead. */
3151 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3152 if (CONST_INT_P (trueop1)
3153 && IN_RANGE (INTVAL (trueop1),
3154 GET_MODE_PRECISION (mode) / 2 + (code == ROTATE),
3155 GET_MODE_PRECISION (mode) - 1))
3156 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3157 mode, op0, GEN_INT (GET_MODE_PRECISION (mode)
3158 - INTVAL (trueop1)));
3159 #endif
3160 /* FALLTHRU */
3161 case ASHIFTRT:
3162 if (trueop1 == CONST0_RTX (mode))
3163 return op0;
3164 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3165 return op0;
3166 /* Rotating ~0 always results in ~0. */
3167 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3168 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3169 && ! side_effects_p (op1))
3170 return op0;
3171 /* Given:
3172 scalar modes M1, M2
3173 scalar constants c1, c2
3174 size (M2) > size (M1)
3175 c1 == size (M2) - size (M1)
3176 optimize:
3177 (ashiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3178 <low_part>)
3179 (const_int <c2>))
3181 (subreg:M1 (ashiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3182 <low_part>). */
3183 if (code == ASHIFTRT
3184 && !VECTOR_MODE_P (mode)
3185 && SUBREG_P (op0)
3186 && CONST_INT_P (op1)
3187 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3188 && !VECTOR_MODE_P (GET_MODE (SUBREG_REG (op0)))
3189 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3190 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3191 > GET_MODE_BITSIZE (mode))
3192 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3193 == (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3194 - GET_MODE_BITSIZE (mode)))
3195 && subreg_lowpart_p (op0))
3197 rtx tmp = GEN_INT (INTVAL (XEXP (SUBREG_REG (op0), 1))
3198 + INTVAL (op1));
3199 machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
3200 tmp = simplify_gen_binary (ASHIFTRT,
3201 GET_MODE (SUBREG_REG (op0)),
3202 XEXP (SUBREG_REG (op0), 0),
3203 tmp);
3204 return simplify_gen_subreg (mode, tmp, inner_mode,
3205 subreg_lowpart_offset (mode,
3206 inner_mode));
3208 canonicalize_shift:
3209 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3211 val = INTVAL (op1) & (GET_MODE_PRECISION (mode) - 1);
3212 if (val != INTVAL (op1))
3213 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3215 break;
3217 case ASHIFT:
3218 case SS_ASHIFT:
3219 case US_ASHIFT:
3220 if (trueop1 == CONST0_RTX (mode))
3221 return op0;
3222 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3223 return op0;
3224 goto canonicalize_shift;
3226 case LSHIFTRT:
3227 if (trueop1 == CONST0_RTX (mode))
3228 return op0;
3229 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3230 return op0;
3231 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3232 if (GET_CODE (op0) == CLZ
3233 && CONST_INT_P (trueop1)
3234 && STORE_FLAG_VALUE == 1
3235 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3237 machine_mode imode = GET_MODE (XEXP (op0, 0));
3238 unsigned HOST_WIDE_INT zero_val = 0;
3240 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3241 && zero_val == GET_MODE_PRECISION (imode)
3242 && INTVAL (trueop1) == exact_log2 (zero_val))
3243 return simplify_gen_relational (EQ, mode, imode,
3244 XEXP (op0, 0), const0_rtx);
3246 goto canonicalize_shift;
3248 case SMIN:
3249 if (width <= HOST_BITS_PER_WIDE_INT
3250 && mode_signbit_p (mode, trueop1)
3251 && ! side_effects_p (op0))
3252 return op1;
3253 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3254 return op0;
3255 tem = simplify_associative_operation (code, mode, op0, op1);
3256 if (tem)
3257 return tem;
3258 break;
3260 case SMAX:
3261 if (width <= HOST_BITS_PER_WIDE_INT
3262 && CONST_INT_P (trueop1)
3263 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3264 && ! side_effects_p (op0))
3265 return op1;
3266 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3267 return op0;
3268 tem = simplify_associative_operation (code, mode, op0, op1);
3269 if (tem)
3270 return tem;
3271 break;
3273 case UMIN:
3274 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3275 return op1;
3276 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3277 return op0;
3278 tem = simplify_associative_operation (code, mode, op0, op1);
3279 if (tem)
3280 return tem;
3281 break;
3283 case UMAX:
3284 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3285 return op1;
3286 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3287 return op0;
3288 tem = simplify_associative_operation (code, mode, op0, op1);
3289 if (tem)
3290 return tem;
3291 break;
3293 case SS_PLUS:
3294 case US_PLUS:
3295 case SS_MINUS:
3296 case US_MINUS:
3297 case SS_MULT:
3298 case US_MULT:
3299 case SS_DIV:
3300 case US_DIV:
3301 /* ??? There are simplifications that can be done. */
3302 return 0;
3304 case VEC_SELECT:
3305 if (!VECTOR_MODE_P (mode))
3307 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3308 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3309 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3310 gcc_assert (XVECLEN (trueop1, 0) == 1);
3311 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3313 if (GET_CODE (trueop0) == CONST_VECTOR)
3314 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3315 (trueop1, 0, 0)));
3317 /* Extract a scalar element from a nested VEC_SELECT expression
3318 (with optional nested VEC_CONCAT expression). Some targets
3319 (i386) extract scalar element from a vector using chain of
3320 nested VEC_SELECT expressions. When input operand is a memory
3321 operand, this operation can be simplified to a simple scalar
3322 load from an offseted memory address. */
3323 if (GET_CODE (trueop0) == VEC_SELECT)
3325 rtx op0 = XEXP (trueop0, 0);
3326 rtx op1 = XEXP (trueop0, 1);
3328 machine_mode opmode = GET_MODE (op0);
3329 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3330 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3332 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3333 int elem;
3335 rtvec vec;
3336 rtx tmp_op, tmp;
3338 gcc_assert (GET_CODE (op1) == PARALLEL);
3339 gcc_assert (i < n_elts);
3341 /* Select element, pointed by nested selector. */
3342 elem = INTVAL (XVECEXP (op1, 0, i));
3344 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3345 if (GET_CODE (op0) == VEC_CONCAT)
3347 rtx op00 = XEXP (op0, 0);
3348 rtx op01 = XEXP (op0, 1);
3350 machine_mode mode00, mode01;
3351 int n_elts00, n_elts01;
3353 mode00 = GET_MODE (op00);
3354 mode01 = GET_MODE (op01);
3356 /* Find out number of elements of each operand. */
3357 if (VECTOR_MODE_P (mode00))
3359 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3360 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3362 else
3363 n_elts00 = 1;
3365 if (VECTOR_MODE_P (mode01))
3367 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3368 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3370 else
3371 n_elts01 = 1;
3373 gcc_assert (n_elts == n_elts00 + n_elts01);
3375 /* Select correct operand of VEC_CONCAT
3376 and adjust selector. */
3377 if (elem < n_elts01)
3378 tmp_op = op00;
3379 else
3381 tmp_op = op01;
3382 elem -= n_elts00;
3385 else
3386 tmp_op = op0;
3388 vec = rtvec_alloc (1);
3389 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3391 tmp = gen_rtx_fmt_ee (code, mode,
3392 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3393 return tmp;
3395 if (GET_CODE (trueop0) == VEC_DUPLICATE
3396 && GET_MODE (XEXP (trueop0, 0)) == mode)
3397 return XEXP (trueop0, 0);
3399 else
3401 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3402 gcc_assert (GET_MODE_INNER (mode)
3403 == GET_MODE_INNER (GET_MODE (trueop0)));
3404 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3406 if (GET_CODE (trueop0) == CONST_VECTOR)
3408 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3409 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3410 rtvec v = rtvec_alloc (n_elts);
3411 unsigned int i;
3413 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3414 for (i = 0; i < n_elts; i++)
3416 rtx x = XVECEXP (trueop1, 0, i);
3418 gcc_assert (CONST_INT_P (x));
3419 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3420 INTVAL (x));
3423 return gen_rtx_CONST_VECTOR (mode, v);
3426 /* Recognize the identity. */
3427 if (GET_MODE (trueop0) == mode)
3429 bool maybe_ident = true;
3430 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3432 rtx j = XVECEXP (trueop1, 0, i);
3433 if (!CONST_INT_P (j) || INTVAL (j) != i)
3435 maybe_ident = false;
3436 break;
3439 if (maybe_ident)
3440 return trueop0;
3443 /* If we build {a,b} then permute it, build the result directly. */
3444 if (XVECLEN (trueop1, 0) == 2
3445 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3446 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3447 && GET_CODE (trueop0) == VEC_CONCAT
3448 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3449 && GET_MODE (XEXP (trueop0, 0)) == mode
3450 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3451 && GET_MODE (XEXP (trueop0, 1)) == mode)
3453 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3454 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3455 rtx subop0, subop1;
3457 gcc_assert (i0 < 4 && i1 < 4);
3458 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3459 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3461 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3464 if (XVECLEN (trueop1, 0) == 2
3465 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3466 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3467 && GET_CODE (trueop0) == VEC_CONCAT
3468 && GET_MODE (trueop0) == mode)
3470 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3471 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3472 rtx subop0, subop1;
3474 gcc_assert (i0 < 2 && i1 < 2);
3475 subop0 = XEXP (trueop0, i0);
3476 subop1 = XEXP (trueop0, i1);
3478 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3481 /* If we select one half of a vec_concat, return that. */
3482 if (GET_CODE (trueop0) == VEC_CONCAT
3483 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3485 rtx subop0 = XEXP (trueop0, 0);
3486 rtx subop1 = XEXP (trueop0, 1);
3487 machine_mode mode0 = GET_MODE (subop0);
3488 machine_mode mode1 = GET_MODE (subop1);
3489 int li = GET_MODE_SIZE (GET_MODE_INNER (mode0));
3490 int l0 = GET_MODE_SIZE (mode0) / li;
3491 int l1 = GET_MODE_SIZE (mode1) / li;
3492 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3493 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3495 bool success = true;
3496 for (int i = 1; i < l0; ++i)
3498 rtx j = XVECEXP (trueop1, 0, i);
3499 if (!CONST_INT_P (j) || INTVAL (j) != i)
3501 success = false;
3502 break;
3505 if (success)
3506 return subop0;
3508 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3510 bool success = true;
3511 for (int i = 1; i < l1; ++i)
3513 rtx j = XVECEXP (trueop1, 0, i);
3514 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3516 success = false;
3517 break;
3520 if (success)
3521 return subop1;
3526 if (XVECLEN (trueop1, 0) == 1
3527 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3528 && GET_CODE (trueop0) == VEC_CONCAT)
3530 rtx vec = trueop0;
3531 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3533 /* Try to find the element in the VEC_CONCAT. */
3534 while (GET_MODE (vec) != mode
3535 && GET_CODE (vec) == VEC_CONCAT)
3537 HOST_WIDE_INT vec_size;
3539 if (CONST_INT_P (XEXP (vec, 0)))
3541 /* vec_concat of two const_ints doesn't make sense with
3542 respect to modes. */
3543 if (CONST_INT_P (XEXP (vec, 1)))
3544 return 0;
3546 vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
3547 - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
3549 else
3550 vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3552 if (offset < vec_size)
3553 vec = XEXP (vec, 0);
3554 else
3556 offset -= vec_size;
3557 vec = XEXP (vec, 1);
3559 vec = avoid_constant_pool_reference (vec);
3562 if (GET_MODE (vec) == mode)
3563 return vec;
3566 /* If we select elements in a vec_merge that all come from the same
3567 operand, select from that operand directly. */
3568 if (GET_CODE (op0) == VEC_MERGE)
3570 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3571 if (CONST_INT_P (trueop02))
3573 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3574 bool all_operand0 = true;
3575 bool all_operand1 = true;
3576 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3578 rtx j = XVECEXP (trueop1, 0, i);
3579 if (sel & (1 << UINTVAL (j)))
3580 all_operand1 = false;
3581 else
3582 all_operand0 = false;
3584 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3585 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3586 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3587 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3591 /* If we have two nested selects that are inverses of each
3592 other, replace them with the source operand. */
3593 if (GET_CODE (trueop0) == VEC_SELECT
3594 && GET_MODE (XEXP (trueop0, 0)) == mode)
3596 rtx op0_subop1 = XEXP (trueop0, 1);
3597 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3598 gcc_assert (XVECLEN (trueop1, 0) == GET_MODE_NUNITS (mode));
3600 /* Apply the outer ordering vector to the inner one. (The inner
3601 ordering vector is expressly permitted to be of a different
3602 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3603 then the two VEC_SELECTs cancel. */
3604 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3606 rtx x = XVECEXP (trueop1, 0, i);
3607 if (!CONST_INT_P (x))
3608 return 0;
3609 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3610 if (!CONST_INT_P (y) || i != INTVAL (y))
3611 return 0;
3613 return XEXP (trueop0, 0);
3616 return 0;
3617 case VEC_CONCAT:
3619 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3620 ? GET_MODE (trueop0)
3621 : GET_MODE_INNER (mode));
3622 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3623 ? GET_MODE (trueop1)
3624 : GET_MODE_INNER (mode));
3626 gcc_assert (VECTOR_MODE_P (mode));
3627 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3628 == GET_MODE_SIZE (mode));
3630 if (VECTOR_MODE_P (op0_mode))
3631 gcc_assert (GET_MODE_INNER (mode)
3632 == GET_MODE_INNER (op0_mode));
3633 else
3634 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3636 if (VECTOR_MODE_P (op1_mode))
3637 gcc_assert (GET_MODE_INNER (mode)
3638 == GET_MODE_INNER (op1_mode));
3639 else
3640 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3642 if ((GET_CODE (trueop0) == CONST_VECTOR
3643 || CONST_SCALAR_INT_P (trueop0)
3644 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3645 && (GET_CODE (trueop1) == CONST_VECTOR
3646 || CONST_SCALAR_INT_P (trueop1)
3647 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3649 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3650 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3651 rtvec v = rtvec_alloc (n_elts);
3652 unsigned int i;
3653 unsigned in_n_elts = 1;
3655 if (VECTOR_MODE_P (op0_mode))
3656 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3657 for (i = 0; i < n_elts; i++)
3659 if (i < in_n_elts)
3661 if (!VECTOR_MODE_P (op0_mode))
3662 RTVEC_ELT (v, i) = trueop0;
3663 else
3664 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3666 else
3668 if (!VECTOR_MODE_P (op1_mode))
3669 RTVEC_ELT (v, i) = trueop1;
3670 else
3671 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3672 i - in_n_elts);
3676 return gen_rtx_CONST_VECTOR (mode, v);
3679 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3680 Restrict the transformation to avoid generating a VEC_SELECT with a
3681 mode unrelated to its operand. */
3682 if (GET_CODE (trueop0) == VEC_SELECT
3683 && GET_CODE (trueop1) == VEC_SELECT
3684 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3685 && GET_MODE (XEXP (trueop0, 0)) == mode)
3687 rtx par0 = XEXP (trueop0, 1);
3688 rtx par1 = XEXP (trueop1, 1);
3689 int len0 = XVECLEN (par0, 0);
3690 int len1 = XVECLEN (par1, 0);
3691 rtvec vec = rtvec_alloc (len0 + len1);
3692 for (int i = 0; i < len0; i++)
3693 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3694 for (int i = 0; i < len1; i++)
3695 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3696 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3697 gen_rtx_PARALLEL (VOIDmode, vec));
3700 return 0;
3702 default:
3703 gcc_unreachable ();
3706 return 0;
3710 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
3711 rtx op0, rtx op1)
3713 unsigned int width = GET_MODE_PRECISION (mode);
3715 if (VECTOR_MODE_P (mode)
3716 && code != VEC_CONCAT
3717 && GET_CODE (op0) == CONST_VECTOR
3718 && GET_CODE (op1) == CONST_VECTOR)
3720 unsigned n_elts = GET_MODE_NUNITS (mode);
3721 machine_mode op0mode = GET_MODE (op0);
3722 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3723 machine_mode op1mode = GET_MODE (op1);
3724 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3725 rtvec v = rtvec_alloc (n_elts);
3726 unsigned int i;
3728 gcc_assert (op0_n_elts == n_elts);
3729 gcc_assert (op1_n_elts == n_elts);
3730 for (i = 0; i < n_elts; i++)
3732 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3733 CONST_VECTOR_ELT (op0, i),
3734 CONST_VECTOR_ELT (op1, i));
3735 if (!x)
3736 return 0;
3737 RTVEC_ELT (v, i) = x;
3740 return gen_rtx_CONST_VECTOR (mode, v);
3743 if (VECTOR_MODE_P (mode)
3744 && code == VEC_CONCAT
3745 && (CONST_SCALAR_INT_P (op0)
3746 || GET_CODE (op0) == CONST_FIXED
3747 || CONST_DOUBLE_AS_FLOAT_P (op0))
3748 && (CONST_SCALAR_INT_P (op1)
3749 || CONST_DOUBLE_AS_FLOAT_P (op1)
3750 || GET_CODE (op1) == CONST_FIXED))
3752 unsigned n_elts = GET_MODE_NUNITS (mode);
3753 rtvec v = rtvec_alloc (n_elts);
3755 gcc_assert (n_elts >= 2);
3756 if (n_elts == 2)
3758 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3759 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3761 RTVEC_ELT (v, 0) = op0;
3762 RTVEC_ELT (v, 1) = op1;
3764 else
3766 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3767 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3768 unsigned i;
3770 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3771 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3772 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3774 for (i = 0; i < op0_n_elts; ++i)
3775 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3776 for (i = 0; i < op1_n_elts; ++i)
3777 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3780 return gen_rtx_CONST_VECTOR (mode, v);
3783 if (SCALAR_FLOAT_MODE_P (mode)
3784 && CONST_DOUBLE_AS_FLOAT_P (op0)
3785 && CONST_DOUBLE_AS_FLOAT_P (op1)
3786 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3788 if (code == AND
3789 || code == IOR
3790 || code == XOR)
3792 long tmp0[4];
3793 long tmp1[4];
3794 REAL_VALUE_TYPE r;
3795 int i;
3797 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3798 GET_MODE (op0));
3799 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3800 GET_MODE (op1));
3801 for (i = 0; i < 4; i++)
3803 switch (code)
3805 case AND:
3806 tmp0[i] &= tmp1[i];
3807 break;
3808 case IOR:
3809 tmp0[i] |= tmp1[i];
3810 break;
3811 case XOR:
3812 tmp0[i] ^= tmp1[i];
3813 break;
3814 default:
3815 gcc_unreachable ();
3818 real_from_target (&r, tmp0, mode);
3819 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3821 else
3823 REAL_VALUE_TYPE f0, f1, value, result;
3824 bool inexact;
3826 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3827 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3828 real_convert (&f0, mode, &f0);
3829 real_convert (&f1, mode, &f1);
3831 if (HONOR_SNANS (mode)
3832 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3833 return 0;
3835 if (code == DIV
3836 && REAL_VALUES_EQUAL (f1, dconst0)
3837 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3838 return 0;
3840 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3841 && flag_trapping_math
3842 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3844 int s0 = REAL_VALUE_NEGATIVE (f0);
3845 int s1 = REAL_VALUE_NEGATIVE (f1);
3847 switch (code)
3849 case PLUS:
3850 /* Inf + -Inf = NaN plus exception. */
3851 if (s0 != s1)
3852 return 0;
3853 break;
3854 case MINUS:
3855 /* Inf - Inf = NaN plus exception. */
3856 if (s0 == s1)
3857 return 0;
3858 break;
3859 case DIV:
3860 /* Inf / Inf = NaN plus exception. */
3861 return 0;
3862 default:
3863 break;
3867 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3868 && flag_trapping_math
3869 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3870 || (REAL_VALUE_ISINF (f1)
3871 && REAL_VALUES_EQUAL (f0, dconst0))))
3872 /* Inf * 0 = NaN plus exception. */
3873 return 0;
3875 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3876 &f0, &f1);
3877 real_convert (&result, mode, &value);
3879 /* Don't constant fold this floating point operation if
3880 the result has overflowed and flag_trapping_math. */
3882 if (flag_trapping_math
3883 && MODE_HAS_INFINITIES (mode)
3884 && REAL_VALUE_ISINF (result)
3885 && !REAL_VALUE_ISINF (f0)
3886 && !REAL_VALUE_ISINF (f1))
3887 /* Overflow plus exception. */
3888 return 0;
3890 /* Don't constant fold this floating point operation if the
3891 result may dependent upon the run-time rounding mode and
3892 flag_rounding_math is set, or if GCC's software emulation
3893 is unable to accurately represent the result. */
3895 if ((flag_rounding_math
3896 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3897 && (inexact || !real_identical (&result, &value)))
3898 return NULL_RTX;
3900 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3904 /* We can fold some multi-word operations. */
3905 if ((GET_MODE_CLASS (mode) == MODE_INT
3906 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
3907 && CONST_SCALAR_INT_P (op0)
3908 && CONST_SCALAR_INT_P (op1))
3910 wide_int result;
3911 bool overflow;
3912 rtx_mode_t pop0 = std::make_pair (op0, mode);
3913 rtx_mode_t pop1 = std::make_pair (op1, mode);
3915 #if TARGET_SUPPORTS_WIDE_INT == 0
3916 /* This assert keeps the simplification from producing a result
3917 that cannot be represented in a CONST_DOUBLE but a lot of
3918 upstream callers expect that this function never fails to
3919 simplify something and so you if you added this to the test
3920 above the code would die later anyway. If this assert
3921 happens, you just need to make the port support wide int. */
3922 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
3923 #endif
3924 switch (code)
3926 case MINUS:
3927 result = wi::sub (pop0, pop1);
3928 break;
3930 case PLUS:
3931 result = wi::add (pop0, pop1);
3932 break;
3934 case MULT:
3935 result = wi::mul (pop0, pop1);
3936 break;
3938 case DIV:
3939 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
3940 if (overflow)
3941 return NULL_RTX;
3942 break;
3944 case MOD:
3945 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
3946 if (overflow)
3947 return NULL_RTX;
3948 break;
3950 case UDIV:
3951 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
3952 if (overflow)
3953 return NULL_RTX;
3954 break;
3956 case UMOD:
3957 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
3958 if (overflow)
3959 return NULL_RTX;
3960 break;
3962 case AND:
3963 result = wi::bit_and (pop0, pop1);
3964 break;
3966 case IOR:
3967 result = wi::bit_or (pop0, pop1);
3968 break;
3970 case XOR:
3971 result = wi::bit_xor (pop0, pop1);
3972 break;
3974 case SMIN:
3975 result = wi::smin (pop0, pop1);
3976 break;
3978 case SMAX:
3979 result = wi::smax (pop0, pop1);
3980 break;
3982 case UMIN:
3983 result = wi::umin (pop0, pop1);
3984 break;
3986 case UMAX:
3987 result = wi::umax (pop0, pop1);
3988 break;
3990 case LSHIFTRT:
3991 case ASHIFTRT:
3992 case ASHIFT:
3994 wide_int wop1 = pop1;
3995 if (SHIFT_COUNT_TRUNCATED)
3996 wop1 = wi::umod_trunc (wop1, width);
3997 else if (wi::geu_p (wop1, width))
3998 return NULL_RTX;
4000 switch (code)
4002 case LSHIFTRT:
4003 result = wi::lrshift (pop0, wop1);
4004 break;
4006 case ASHIFTRT:
4007 result = wi::arshift (pop0, wop1);
4008 break;
4010 case ASHIFT:
4011 result = wi::lshift (pop0, wop1);
4012 break;
4014 default:
4015 gcc_unreachable ();
4017 break;
4019 case ROTATE:
4020 case ROTATERT:
4022 if (wi::neg_p (pop1))
4023 return NULL_RTX;
4025 switch (code)
4027 case ROTATE:
4028 result = wi::lrotate (pop0, pop1);
4029 break;
4031 case ROTATERT:
4032 result = wi::rrotate (pop0, pop1);
4033 break;
4035 default:
4036 gcc_unreachable ();
4038 break;
4040 default:
4041 return NULL_RTX;
4043 return immed_wide_int_const (result, mode);
4046 return NULL_RTX;
4051 /* Return a positive integer if X should sort after Y. The value
4052 returned is 1 if and only if X and Y are both regs. */
4054 static int
4055 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4057 int result;
4059 result = (commutative_operand_precedence (y)
4060 - commutative_operand_precedence (x));
4061 if (result)
4062 return result + result;
4064 /* Group together equal REGs to do more simplification. */
4065 if (REG_P (x) && REG_P (y))
4066 return REGNO (x) > REGNO (y);
4068 return 0;
4071 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4072 operands may be another PLUS or MINUS.
4074 Rather than test for specific case, we do this by a brute-force method
4075 and do all possible simplifications until no more changes occur. Then
4076 we rebuild the operation.
4078 May return NULL_RTX when no changes were made. */
4080 static rtx
4081 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4082 rtx op1)
4084 struct simplify_plus_minus_op_data
4086 rtx op;
4087 short neg;
4088 } ops[16];
4089 rtx result, tem;
4090 int n_ops = 2;
4091 int changed, n_constants, canonicalized = 0;
4092 int i, j;
4094 memset (ops, 0, sizeof ops);
4096 /* Set up the two operands and then expand them until nothing has been
4097 changed. If we run out of room in our array, give up; this should
4098 almost never happen. */
4100 ops[0].op = op0;
4101 ops[0].neg = 0;
4102 ops[1].op = op1;
4103 ops[1].neg = (code == MINUS);
4107 changed = 0;
4108 n_constants = 0;
4110 for (i = 0; i < n_ops; i++)
4112 rtx this_op = ops[i].op;
4113 int this_neg = ops[i].neg;
4114 enum rtx_code this_code = GET_CODE (this_op);
4116 switch (this_code)
4118 case PLUS:
4119 case MINUS:
4120 if (n_ops == ARRAY_SIZE (ops))
4121 return NULL_RTX;
4123 ops[n_ops].op = XEXP (this_op, 1);
4124 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4125 n_ops++;
4127 ops[i].op = XEXP (this_op, 0);
4128 changed = 1;
4129 /* If this operand was negated then we will potentially
4130 canonicalize the expression. Similarly if we don't
4131 place the operands adjacent we're re-ordering the
4132 expression and thus might be performing a
4133 canonicalization. Ignore register re-ordering.
4134 ??? It might be better to shuffle the ops array here,
4135 but then (plus (plus (A, B), plus (C, D))) wouldn't
4136 be seen as non-canonical. */
4137 if (this_neg
4138 || (i != n_ops - 2
4139 && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
4140 canonicalized = 1;
4141 break;
4143 case NEG:
4144 ops[i].op = XEXP (this_op, 0);
4145 ops[i].neg = ! this_neg;
4146 changed = 1;
4147 canonicalized = 1;
4148 break;
4150 case CONST:
4151 if (n_ops != ARRAY_SIZE (ops)
4152 && GET_CODE (XEXP (this_op, 0)) == PLUS
4153 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4154 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4156 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4157 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4158 ops[n_ops].neg = this_neg;
4159 n_ops++;
4160 changed = 1;
4161 canonicalized = 1;
4163 break;
4165 case NOT:
4166 /* ~a -> (-a - 1) */
4167 if (n_ops != ARRAY_SIZE (ops))
4169 ops[n_ops].op = CONSTM1_RTX (mode);
4170 ops[n_ops++].neg = this_neg;
4171 ops[i].op = XEXP (this_op, 0);
4172 ops[i].neg = !this_neg;
4173 changed = 1;
4174 canonicalized = 1;
4176 break;
4178 case CONST_INT:
4179 n_constants++;
4180 if (this_neg)
4182 ops[i].op = neg_const_int (mode, this_op);
4183 ops[i].neg = 0;
4184 changed = 1;
4185 canonicalized = 1;
4187 break;
4189 default:
4190 break;
4194 while (changed);
4196 if (n_constants > 1)
4197 canonicalized = 1;
4199 gcc_assert (n_ops >= 2);
4201 /* If we only have two operands, we can avoid the loops. */
4202 if (n_ops == 2)
4204 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4205 rtx lhs, rhs;
4207 /* Get the two operands. Be careful with the order, especially for
4208 the cases where code == MINUS. */
4209 if (ops[0].neg && ops[1].neg)
4211 lhs = gen_rtx_NEG (mode, ops[0].op);
4212 rhs = ops[1].op;
4214 else if (ops[0].neg)
4216 lhs = ops[1].op;
4217 rhs = ops[0].op;
4219 else
4221 lhs = ops[0].op;
4222 rhs = ops[1].op;
4225 return simplify_const_binary_operation (code, mode, lhs, rhs);
4228 /* Now simplify each pair of operands until nothing changes. */
4229 while (1)
4231 /* Insertion sort is good enough for a small array. */
4232 for (i = 1; i < n_ops; i++)
4234 struct simplify_plus_minus_op_data save;
4235 int cmp;
4237 j = i - 1;
4238 cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
4239 if (cmp <= 0)
4240 continue;
4241 /* Just swapping registers doesn't count as canonicalization. */
4242 if (cmp != 1)
4243 canonicalized = 1;
4245 save = ops[i];
4247 ops[j + 1] = ops[j];
4248 while (j--
4249 && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
4250 ops[j + 1] = save;
4253 changed = 0;
4254 for (i = n_ops - 1; i > 0; i--)
4255 for (j = i - 1; j >= 0; j--)
4257 rtx lhs = ops[j].op, rhs = ops[i].op;
4258 int lneg = ops[j].neg, rneg = ops[i].neg;
4260 if (lhs != 0 && rhs != 0)
4262 enum rtx_code ncode = PLUS;
4264 if (lneg != rneg)
4266 ncode = MINUS;
4267 if (lneg)
4268 std::swap (lhs, rhs);
4270 else if (swap_commutative_operands_p (lhs, rhs))
4271 std::swap (lhs, rhs);
4273 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4274 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4276 rtx tem_lhs, tem_rhs;
4278 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4279 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4280 tem = simplify_binary_operation (ncode, mode, tem_lhs,
4281 tem_rhs);
4283 if (tem && !CONSTANT_P (tem))
4284 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4286 else
4287 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4289 if (tem)
4291 /* Reject "simplifications" that just wrap the two
4292 arguments in a CONST. Failure to do so can result
4293 in infinite recursion with simplify_binary_operation
4294 when it calls us to simplify CONST operations.
4295 Also, if we find such a simplification, don't try
4296 any more combinations with this rhs: We must have
4297 something like symbol+offset, ie. one of the
4298 trivial CONST expressions we handle later. */
4299 if (GET_CODE (tem) == CONST
4300 && GET_CODE (XEXP (tem, 0)) == ncode
4301 && XEXP (XEXP (tem, 0), 0) == lhs
4302 && XEXP (XEXP (tem, 0), 1) == rhs)
4303 break;
4304 lneg &= rneg;
4305 if (GET_CODE (tem) == NEG)
4306 tem = XEXP (tem, 0), lneg = !lneg;
4307 if (CONST_INT_P (tem) && lneg)
4308 tem = neg_const_int (mode, tem), lneg = 0;
4310 ops[i].op = tem;
4311 ops[i].neg = lneg;
4312 ops[j].op = NULL_RTX;
4313 changed = 1;
4314 canonicalized = 1;
4319 if (!changed)
4320 break;
4322 /* Pack all the operands to the lower-numbered entries. */
4323 for (i = 0, j = 0; j < n_ops; j++)
4324 if (ops[j].op)
4326 ops[i] = ops[j];
4327 i++;
4329 n_ops = i;
4332 /* If nothing changed, fail. */
4333 if (!canonicalized)
4334 return NULL_RTX;
4336 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4337 if (n_ops == 2
4338 && CONST_INT_P (ops[1].op)
4339 && CONSTANT_P (ops[0].op)
4340 && ops[0].neg)
4341 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4343 /* We suppressed creation of trivial CONST expressions in the
4344 combination loop to avoid recursion. Create one manually now.
4345 The combination loop should have ensured that there is exactly
4346 one CONST_INT, and the sort will have ensured that it is last
4347 in the array and that any other constant will be next-to-last. */
4349 if (n_ops > 1
4350 && CONST_INT_P (ops[n_ops - 1].op)
4351 && CONSTANT_P (ops[n_ops - 2].op))
4353 rtx value = ops[n_ops - 1].op;
4354 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4355 value = neg_const_int (mode, value);
4356 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4357 INTVAL (value));
4358 n_ops--;
4361 /* Put a non-negated operand first, if possible. */
4363 for (i = 0; i < n_ops && ops[i].neg; i++)
4364 continue;
4365 if (i == n_ops)
4366 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4367 else if (i != 0)
4369 tem = ops[0].op;
4370 ops[0] = ops[i];
4371 ops[i].op = tem;
4372 ops[i].neg = 1;
4375 /* Now make the result by performing the requested operations. */
4376 result = ops[0].op;
4377 for (i = 1; i < n_ops; i++)
4378 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4379 mode, result, ops[i].op);
4381 return result;
4384 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4385 static bool
4386 plus_minus_operand_p (const_rtx x)
4388 return GET_CODE (x) == PLUS
4389 || GET_CODE (x) == MINUS
4390 || (GET_CODE (x) == CONST
4391 && GET_CODE (XEXP (x, 0)) == PLUS
4392 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4393 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4396 /* Like simplify_binary_operation except used for relational operators.
4397 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4398 not also be VOIDmode.
4400 CMP_MODE specifies in which mode the comparison is done in, so it is
4401 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4402 the operands or, if both are VOIDmode, the operands are compared in
4403 "infinite precision". */
4405 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4406 machine_mode cmp_mode, rtx op0, rtx op1)
4408 rtx tem, trueop0, trueop1;
4410 if (cmp_mode == VOIDmode)
4411 cmp_mode = GET_MODE (op0);
4412 if (cmp_mode == VOIDmode)
4413 cmp_mode = GET_MODE (op1);
4415 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4416 if (tem)
4418 if (SCALAR_FLOAT_MODE_P (mode))
4420 if (tem == const0_rtx)
4421 return CONST0_RTX (mode);
4422 #ifdef FLOAT_STORE_FLAG_VALUE
4424 REAL_VALUE_TYPE val;
4425 val = FLOAT_STORE_FLAG_VALUE (mode);
4426 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4428 #else
4429 return NULL_RTX;
4430 #endif
4432 if (VECTOR_MODE_P (mode))
4434 if (tem == const0_rtx)
4435 return CONST0_RTX (mode);
4436 #ifdef VECTOR_STORE_FLAG_VALUE
4438 int i, units;
4439 rtvec v;
4441 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4442 if (val == NULL_RTX)
4443 return NULL_RTX;
4444 if (val == const1_rtx)
4445 return CONST1_RTX (mode);
4447 units = GET_MODE_NUNITS (mode);
4448 v = rtvec_alloc (units);
4449 for (i = 0; i < units; i++)
4450 RTVEC_ELT (v, i) = val;
4451 return gen_rtx_raw_CONST_VECTOR (mode, v);
4453 #else
4454 return NULL_RTX;
4455 #endif
4458 return tem;
4461 /* For the following tests, ensure const0_rtx is op1. */
4462 if (swap_commutative_operands_p (op0, op1)
4463 || (op0 == const0_rtx && op1 != const0_rtx))
4464 std::swap (op0, op1), code = swap_condition (code);
4466 /* If op0 is a compare, extract the comparison arguments from it. */
4467 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4468 return simplify_gen_relational (code, mode, VOIDmode,
4469 XEXP (op0, 0), XEXP (op0, 1));
4471 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4472 || CC0_P (op0))
4473 return NULL_RTX;
4475 trueop0 = avoid_constant_pool_reference (op0);
4476 trueop1 = avoid_constant_pool_reference (op1);
4477 return simplify_relational_operation_1 (code, mode, cmp_mode,
4478 trueop0, trueop1);
4481 /* This part of simplify_relational_operation is only used when CMP_MODE
4482 is not in class MODE_CC (i.e. it is a real comparison).
4484 MODE is the mode of the result, while CMP_MODE specifies in which
4485 mode the comparison is done in, so it is the mode of the operands. */
4487 static rtx
4488 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4489 machine_mode cmp_mode, rtx op0, rtx op1)
4491 enum rtx_code op0code = GET_CODE (op0);
4493 if (op1 == const0_rtx && COMPARISON_P (op0))
4495 /* If op0 is a comparison, extract the comparison arguments
4496 from it. */
4497 if (code == NE)
4499 if (GET_MODE (op0) == mode)
4500 return simplify_rtx (op0);
4501 else
4502 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4503 XEXP (op0, 0), XEXP (op0, 1));
4505 else if (code == EQ)
4507 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4508 if (new_code != UNKNOWN)
4509 return simplify_gen_relational (new_code, mode, VOIDmode,
4510 XEXP (op0, 0), XEXP (op0, 1));
4514 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4515 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4516 if ((code == LTU || code == GEU)
4517 && GET_CODE (op0) == PLUS
4518 && CONST_INT_P (XEXP (op0, 1))
4519 && (rtx_equal_p (op1, XEXP (op0, 0))
4520 || rtx_equal_p (op1, XEXP (op0, 1)))
4521 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4522 && XEXP (op0, 1) != const0_rtx)
4524 rtx new_cmp
4525 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4526 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4527 cmp_mode, XEXP (op0, 0), new_cmp);
4530 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4531 if ((code == LTU || code == GEU)
4532 && GET_CODE (op0) == PLUS
4533 && rtx_equal_p (op1, XEXP (op0, 1))
4534 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4535 && !rtx_equal_p (op1, XEXP (op0, 0)))
4536 return simplify_gen_relational (code, mode, cmp_mode, op0,
4537 copy_rtx (XEXP (op0, 0)));
4539 if (op1 == const0_rtx)
4541 /* Canonicalize (GTU x 0) as (NE x 0). */
4542 if (code == GTU)
4543 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4544 /* Canonicalize (LEU x 0) as (EQ x 0). */
4545 if (code == LEU)
4546 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4548 else if (op1 == const1_rtx)
4550 switch (code)
4552 case GE:
4553 /* Canonicalize (GE x 1) as (GT x 0). */
4554 return simplify_gen_relational (GT, mode, cmp_mode,
4555 op0, const0_rtx);
4556 case GEU:
4557 /* Canonicalize (GEU x 1) as (NE x 0). */
4558 return simplify_gen_relational (NE, mode, cmp_mode,
4559 op0, const0_rtx);
4560 case LT:
4561 /* Canonicalize (LT x 1) as (LE x 0). */
4562 return simplify_gen_relational (LE, mode, cmp_mode,
4563 op0, const0_rtx);
4564 case LTU:
4565 /* Canonicalize (LTU x 1) as (EQ x 0). */
4566 return simplify_gen_relational (EQ, mode, cmp_mode,
4567 op0, const0_rtx);
4568 default:
4569 break;
4572 else if (op1 == constm1_rtx)
4574 /* Canonicalize (LE x -1) as (LT x 0). */
4575 if (code == LE)
4576 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4577 /* Canonicalize (GT x -1) as (GE x 0). */
4578 if (code == GT)
4579 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4582 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4583 if ((code == EQ || code == NE)
4584 && (op0code == PLUS || op0code == MINUS)
4585 && CONSTANT_P (op1)
4586 && CONSTANT_P (XEXP (op0, 1))
4587 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4589 rtx x = XEXP (op0, 0);
4590 rtx c = XEXP (op0, 1);
4591 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4592 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4594 /* Detect an infinite recursive condition, where we oscillate at this
4595 simplification case between:
4596 A + B == C <---> C - B == A,
4597 where A, B, and C are all constants with non-simplifiable expressions,
4598 usually SYMBOL_REFs. */
4599 if (GET_CODE (tem) == invcode
4600 && CONSTANT_P (x)
4601 && rtx_equal_p (c, XEXP (tem, 1)))
4602 return NULL_RTX;
4604 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4607 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4608 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4609 if (code == NE
4610 && op1 == const0_rtx
4611 && GET_MODE_CLASS (mode) == MODE_INT
4612 && cmp_mode != VOIDmode
4613 /* ??? Work-around BImode bugs in the ia64 backend. */
4614 && mode != BImode
4615 && cmp_mode != BImode
4616 && nonzero_bits (op0, cmp_mode) == 1
4617 && STORE_FLAG_VALUE == 1)
4618 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4619 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4620 : lowpart_subreg (mode, op0, cmp_mode);
4622 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4623 if ((code == EQ || code == NE)
4624 && op1 == const0_rtx
4625 && op0code == XOR)
4626 return simplify_gen_relational (code, mode, cmp_mode,
4627 XEXP (op0, 0), XEXP (op0, 1));
4629 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4630 if ((code == EQ || code == NE)
4631 && op0code == XOR
4632 && rtx_equal_p (XEXP (op0, 0), op1)
4633 && !side_effects_p (XEXP (op0, 0)))
4634 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
4635 CONST0_RTX (mode));
4637 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4638 if ((code == EQ || code == NE)
4639 && op0code == XOR
4640 && rtx_equal_p (XEXP (op0, 1), op1)
4641 && !side_effects_p (XEXP (op0, 1)))
4642 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4643 CONST0_RTX (mode));
4645 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4646 if ((code == EQ || code == NE)
4647 && op0code == XOR
4648 && CONST_SCALAR_INT_P (op1)
4649 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4650 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4651 simplify_gen_binary (XOR, cmp_mode,
4652 XEXP (op0, 1), op1));
4654 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4655 can be implemented with a BICS instruction on some targets, or
4656 constant-folded if y is a constant. */
4657 if ((code == EQ || code == NE)
4658 && op0code == AND
4659 && rtx_equal_p (XEXP (op0, 0), op1)
4660 && !side_effects_p (op1)
4661 && op1 != CONST0_RTX (cmp_mode))
4663 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1), cmp_mode);
4664 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
4666 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4667 CONST0_RTX (cmp_mode));
4670 /* Likewise for (eq/ne (and x y) y). */
4671 if ((code == EQ || code == NE)
4672 && op0code == AND
4673 && rtx_equal_p (XEXP (op0, 1), op1)
4674 && !side_effects_p (op1)
4675 && op1 != CONST0_RTX (cmp_mode))
4677 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0), cmp_mode);
4678 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
4680 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4681 CONST0_RTX (cmp_mode));
4684 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4685 if ((code == EQ || code == NE)
4686 && GET_CODE (op0) == BSWAP
4687 && CONST_SCALAR_INT_P (op1))
4688 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4689 simplify_gen_unary (BSWAP, cmp_mode,
4690 op1, cmp_mode));
4692 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4693 if ((code == EQ || code == NE)
4694 && GET_CODE (op0) == BSWAP
4695 && GET_CODE (op1) == BSWAP)
4696 return simplify_gen_relational (code, mode, cmp_mode,
4697 XEXP (op0, 0), XEXP (op1, 0));
4699 if (op0code == POPCOUNT && op1 == const0_rtx)
4700 switch (code)
4702 case EQ:
4703 case LE:
4704 case LEU:
4705 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4706 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4707 XEXP (op0, 0), const0_rtx);
4709 case NE:
4710 case GT:
4711 case GTU:
4712 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4713 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4714 XEXP (op0, 0), const0_rtx);
4716 default:
4717 break;
4720 return NULL_RTX;
4723 enum
4725 CMP_EQ = 1,
4726 CMP_LT = 2,
4727 CMP_GT = 4,
4728 CMP_LTU = 8,
4729 CMP_GTU = 16
4733 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4734 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4735 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4736 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4737 For floating-point comparisons, assume that the operands were ordered. */
4739 static rtx
4740 comparison_result (enum rtx_code code, int known_results)
4742 switch (code)
4744 case EQ:
4745 case UNEQ:
4746 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4747 case NE:
4748 case LTGT:
4749 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4751 case LT:
4752 case UNLT:
4753 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4754 case GE:
4755 case UNGE:
4756 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4758 case GT:
4759 case UNGT:
4760 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4761 case LE:
4762 case UNLE:
4763 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4765 case LTU:
4766 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4767 case GEU:
4768 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4770 case GTU:
4771 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4772 case LEU:
4773 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4775 case ORDERED:
4776 return const_true_rtx;
4777 case UNORDERED:
4778 return const0_rtx;
4779 default:
4780 gcc_unreachable ();
4784 /* Check if the given comparison (done in the given MODE) is actually
4785 a tautology or a contradiction. If the mode is VOID_mode, the
4786 comparison is done in "infinite precision". If no simplification
4787 is possible, this function returns zero. Otherwise, it returns
4788 either const_true_rtx or const0_rtx. */
4791 simplify_const_relational_operation (enum rtx_code code,
4792 machine_mode mode,
4793 rtx op0, rtx op1)
4795 rtx tem;
4796 rtx trueop0;
4797 rtx trueop1;
4799 gcc_assert (mode != VOIDmode
4800 || (GET_MODE (op0) == VOIDmode
4801 && GET_MODE (op1) == VOIDmode));
4803 /* If op0 is a compare, extract the comparison arguments from it. */
4804 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4806 op1 = XEXP (op0, 1);
4807 op0 = XEXP (op0, 0);
4809 if (GET_MODE (op0) != VOIDmode)
4810 mode = GET_MODE (op0);
4811 else if (GET_MODE (op1) != VOIDmode)
4812 mode = GET_MODE (op1);
4813 else
4814 return 0;
4817 /* We can't simplify MODE_CC values since we don't know what the
4818 actual comparison is. */
4819 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4820 return 0;
4822 /* Make sure the constant is second. */
4823 if (swap_commutative_operands_p (op0, op1))
4825 std::swap (op0, op1);
4826 code = swap_condition (code);
4829 trueop0 = avoid_constant_pool_reference (op0);
4830 trueop1 = avoid_constant_pool_reference (op1);
4832 /* For integer comparisons of A and B maybe we can simplify A - B and can
4833 then simplify a comparison of that with zero. If A and B are both either
4834 a register or a CONST_INT, this can't help; testing for these cases will
4835 prevent infinite recursion here and speed things up.
4837 We can only do this for EQ and NE comparisons as otherwise we may
4838 lose or introduce overflow which we cannot disregard as undefined as
4839 we do not know the signedness of the operation on either the left or
4840 the right hand side of the comparison. */
4842 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4843 && (code == EQ || code == NE)
4844 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4845 && (REG_P (op1) || CONST_INT_P (trueop1)))
4846 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4847 /* We cannot do this if tem is a nonzero address. */
4848 && ! nonzero_address_p (tem))
4849 return simplify_const_relational_operation (signed_condition (code),
4850 mode, tem, const0_rtx);
4852 if (! HONOR_NANS (mode) && code == ORDERED)
4853 return const_true_rtx;
4855 if (! HONOR_NANS (mode) && code == UNORDERED)
4856 return const0_rtx;
4858 /* For modes without NaNs, if the two operands are equal, we know the
4859 result except if they have side-effects. Even with NaNs we know
4860 the result of unordered comparisons and, if signaling NaNs are
4861 irrelevant, also the result of LT/GT/LTGT. */
4862 if ((! HONOR_NANS (trueop0)
4863 || code == UNEQ || code == UNLE || code == UNGE
4864 || ((code == LT || code == GT || code == LTGT)
4865 && ! HONOR_SNANS (trueop0)))
4866 && rtx_equal_p (trueop0, trueop1)
4867 && ! side_effects_p (trueop0))
4868 return comparison_result (code, CMP_EQ);
4870 /* If the operands are floating-point constants, see if we can fold
4871 the result. */
4872 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
4873 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
4874 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4876 REAL_VALUE_TYPE d0, d1;
4878 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4879 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4881 /* Comparisons are unordered iff at least one of the values is NaN. */
4882 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4883 switch (code)
4885 case UNEQ:
4886 case UNLT:
4887 case UNGT:
4888 case UNLE:
4889 case UNGE:
4890 case NE:
4891 case UNORDERED:
4892 return const_true_rtx;
4893 case EQ:
4894 case LT:
4895 case GT:
4896 case LE:
4897 case GE:
4898 case LTGT:
4899 case ORDERED:
4900 return const0_rtx;
4901 default:
4902 return 0;
4905 return comparison_result (code,
4906 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4907 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4910 /* Otherwise, see if the operands are both integers. */
4911 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4912 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
4914 /* It would be nice if we really had a mode here. However, the
4915 largest int representable on the target is as good as
4916 infinite. */
4917 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
4918 rtx_mode_t ptrueop0 = std::make_pair (trueop0, cmode);
4919 rtx_mode_t ptrueop1 = std::make_pair (trueop1, cmode);
4921 if (wi::eq_p (ptrueop0, ptrueop1))
4922 return comparison_result (code, CMP_EQ);
4923 else
4925 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
4926 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
4927 return comparison_result (code, cr);
4931 /* Optimize comparisons with upper and lower bounds. */
4932 if (HWI_COMPUTABLE_MODE_P (mode)
4933 && CONST_INT_P (trueop1))
4935 int sign;
4936 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4937 HOST_WIDE_INT val = INTVAL (trueop1);
4938 HOST_WIDE_INT mmin, mmax;
4940 if (code == GEU
4941 || code == LEU
4942 || code == GTU
4943 || code == LTU)
4944 sign = 0;
4945 else
4946 sign = 1;
4948 /* Get a reduced range if the sign bit is zero. */
4949 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4951 mmin = 0;
4952 mmax = nonzero;
4954 else
4956 rtx mmin_rtx, mmax_rtx;
4957 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4959 mmin = INTVAL (mmin_rtx);
4960 mmax = INTVAL (mmax_rtx);
4961 if (sign)
4963 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4965 mmin >>= (sign_copies - 1);
4966 mmax >>= (sign_copies - 1);
4970 switch (code)
4972 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4973 case GEU:
4974 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4975 return const_true_rtx;
4976 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4977 return const0_rtx;
4978 break;
4979 case GE:
4980 if (val <= mmin)
4981 return const_true_rtx;
4982 if (val > mmax)
4983 return const0_rtx;
4984 break;
4986 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4987 case LEU:
4988 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4989 return const_true_rtx;
4990 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4991 return const0_rtx;
4992 break;
4993 case LE:
4994 if (val >= mmax)
4995 return const_true_rtx;
4996 if (val < mmin)
4997 return const0_rtx;
4998 break;
5000 case EQ:
5001 /* x == y is always false for y out of range. */
5002 if (val < mmin || val > mmax)
5003 return const0_rtx;
5004 break;
5006 /* x > y is always false for y >= mmax, always true for y < mmin. */
5007 case GTU:
5008 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5009 return const0_rtx;
5010 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5011 return const_true_rtx;
5012 break;
5013 case GT:
5014 if (val >= mmax)
5015 return const0_rtx;
5016 if (val < mmin)
5017 return const_true_rtx;
5018 break;
5020 /* x < y is always false for y <= mmin, always true for y > mmax. */
5021 case LTU:
5022 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5023 return const0_rtx;
5024 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5025 return const_true_rtx;
5026 break;
5027 case LT:
5028 if (val <= mmin)
5029 return const0_rtx;
5030 if (val > mmax)
5031 return const_true_rtx;
5032 break;
5034 case NE:
5035 /* x != y is always true for y out of range. */
5036 if (val < mmin || val > mmax)
5037 return const_true_rtx;
5038 break;
5040 default:
5041 break;
5045 /* Optimize integer comparisons with zero. */
5046 if (trueop1 == const0_rtx)
5048 /* Some addresses are known to be nonzero. We don't know
5049 their sign, but equality comparisons are known. */
5050 if (nonzero_address_p (trueop0))
5052 if (code == EQ || code == LEU)
5053 return const0_rtx;
5054 if (code == NE || code == GTU)
5055 return const_true_rtx;
5058 /* See if the first operand is an IOR with a constant. If so, we
5059 may be able to determine the result of this comparison. */
5060 if (GET_CODE (op0) == IOR)
5062 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5063 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5065 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
5066 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5067 && (UINTVAL (inner_const)
5068 & ((unsigned HOST_WIDE_INT) 1
5069 << sign_bitnum)));
5071 switch (code)
5073 case EQ:
5074 case LEU:
5075 return const0_rtx;
5076 case NE:
5077 case GTU:
5078 return const_true_rtx;
5079 case LT:
5080 case LE:
5081 if (has_sign)
5082 return const_true_rtx;
5083 break;
5084 case GT:
5085 case GE:
5086 if (has_sign)
5087 return const0_rtx;
5088 break;
5089 default:
5090 break;
5096 /* Optimize comparison of ABS with zero. */
5097 if (trueop1 == CONST0_RTX (mode)
5098 && (GET_CODE (trueop0) == ABS
5099 || (GET_CODE (trueop0) == FLOAT_EXTEND
5100 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5102 switch (code)
5104 case LT:
5105 /* Optimize abs(x) < 0.0. */
5106 if (!HONOR_SNANS (mode)
5107 && (!INTEGRAL_MODE_P (mode)
5108 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5110 if (INTEGRAL_MODE_P (mode)
5111 && (issue_strict_overflow_warning
5112 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5113 warning (OPT_Wstrict_overflow,
5114 ("assuming signed overflow does not occur when "
5115 "assuming abs (x) < 0 is false"));
5116 return const0_rtx;
5118 break;
5120 case GE:
5121 /* Optimize abs(x) >= 0.0. */
5122 if (!HONOR_NANS (mode)
5123 && (!INTEGRAL_MODE_P (mode)
5124 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5126 if (INTEGRAL_MODE_P (mode)
5127 && (issue_strict_overflow_warning
5128 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5129 warning (OPT_Wstrict_overflow,
5130 ("assuming signed overflow does not occur when "
5131 "assuming abs (x) >= 0 is true"));
5132 return const_true_rtx;
5134 break;
5136 case UNGE:
5137 /* Optimize ! (abs(x) < 0.0). */
5138 return const_true_rtx;
5140 default:
5141 break;
5145 return 0;
5148 /* Simplify CODE, an operation with result mode MODE and three operands,
5149 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5150 a constant. Return 0 if no simplifications is possible. */
5153 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5154 machine_mode op0_mode, rtx op0, rtx op1,
5155 rtx op2)
5157 unsigned int width = GET_MODE_PRECISION (mode);
5158 bool any_change = false;
5159 rtx tem, trueop2;
5161 /* VOIDmode means "infinite" precision. */
5162 if (width == 0)
5163 width = HOST_BITS_PER_WIDE_INT;
5165 switch (code)
5167 case FMA:
5168 /* Simplify negations around the multiplication. */
5169 /* -a * -b + c => a * b + c. */
5170 if (GET_CODE (op0) == NEG)
5172 tem = simplify_unary_operation (NEG, mode, op1, mode);
5173 if (tem)
5174 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5176 else if (GET_CODE (op1) == NEG)
5178 tem = simplify_unary_operation (NEG, mode, op0, mode);
5179 if (tem)
5180 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5183 /* Canonicalize the two multiplication operands. */
5184 /* a * -b + c => -b * a + c. */
5185 if (swap_commutative_operands_p (op0, op1))
5186 std::swap (op0, op1), any_change = true;
5188 if (any_change)
5189 return gen_rtx_FMA (mode, op0, op1, op2);
5190 return NULL_RTX;
5192 case SIGN_EXTRACT:
5193 case ZERO_EXTRACT:
5194 if (CONST_INT_P (op0)
5195 && CONST_INT_P (op1)
5196 && CONST_INT_P (op2)
5197 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5198 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5200 /* Extracting a bit-field from a constant */
5201 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5202 HOST_WIDE_INT op1val = INTVAL (op1);
5203 HOST_WIDE_INT op2val = INTVAL (op2);
5204 if (BITS_BIG_ENDIAN)
5205 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5206 else
5207 val >>= op2val;
5209 if (HOST_BITS_PER_WIDE_INT != op1val)
5211 /* First zero-extend. */
5212 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
5213 /* If desired, propagate sign bit. */
5214 if (code == SIGN_EXTRACT
5215 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
5216 != 0)
5217 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5220 return gen_int_mode (val, mode);
5222 break;
5224 case IF_THEN_ELSE:
5225 if (CONST_INT_P (op0))
5226 return op0 != const0_rtx ? op1 : op2;
5228 /* Convert c ? a : a into "a". */
5229 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5230 return op1;
5232 /* Convert a != b ? a : b into "a". */
5233 if (GET_CODE (op0) == NE
5234 && ! side_effects_p (op0)
5235 && ! HONOR_NANS (mode)
5236 && ! HONOR_SIGNED_ZEROS (mode)
5237 && ((rtx_equal_p (XEXP (op0, 0), op1)
5238 && rtx_equal_p (XEXP (op0, 1), op2))
5239 || (rtx_equal_p (XEXP (op0, 0), op2)
5240 && rtx_equal_p (XEXP (op0, 1), op1))))
5241 return op1;
5243 /* Convert a == b ? a : b into "b". */
5244 if (GET_CODE (op0) == EQ
5245 && ! side_effects_p (op0)
5246 && ! HONOR_NANS (mode)
5247 && ! HONOR_SIGNED_ZEROS (mode)
5248 && ((rtx_equal_p (XEXP (op0, 0), op1)
5249 && rtx_equal_p (XEXP (op0, 1), op2))
5250 || (rtx_equal_p (XEXP (op0, 0), op2)
5251 && rtx_equal_p (XEXP (op0, 1), op1))))
5252 return op2;
5254 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5256 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5257 ? GET_MODE (XEXP (op0, 1))
5258 : GET_MODE (XEXP (op0, 0)));
5259 rtx temp;
5261 /* Look for happy constants in op1 and op2. */
5262 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5264 HOST_WIDE_INT t = INTVAL (op1);
5265 HOST_WIDE_INT f = INTVAL (op2);
5267 if (t == STORE_FLAG_VALUE && f == 0)
5268 code = GET_CODE (op0);
5269 else if (t == 0 && f == STORE_FLAG_VALUE)
5271 enum rtx_code tmp;
5272 tmp = reversed_comparison_code (op0, NULL_RTX);
5273 if (tmp == UNKNOWN)
5274 break;
5275 code = tmp;
5277 else
5278 break;
5280 return simplify_gen_relational (code, mode, cmp_mode,
5281 XEXP (op0, 0), XEXP (op0, 1));
5284 if (cmp_mode == VOIDmode)
5285 cmp_mode = op0_mode;
5286 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5287 cmp_mode, XEXP (op0, 0),
5288 XEXP (op0, 1));
5290 /* See if any simplifications were possible. */
5291 if (temp)
5293 if (CONST_INT_P (temp))
5294 return temp == const0_rtx ? op2 : op1;
5295 else if (temp)
5296 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5299 break;
5301 case VEC_MERGE:
5302 gcc_assert (GET_MODE (op0) == mode);
5303 gcc_assert (GET_MODE (op1) == mode);
5304 gcc_assert (VECTOR_MODE_P (mode));
5305 trueop2 = avoid_constant_pool_reference (op2);
5306 if (CONST_INT_P (trueop2))
5308 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5309 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5310 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5311 unsigned HOST_WIDE_INT mask;
5312 if (n_elts == HOST_BITS_PER_WIDE_INT)
5313 mask = -1;
5314 else
5315 mask = ((unsigned HOST_WIDE_INT) 1 << n_elts) - 1;
5317 if (!(sel & mask) && !side_effects_p (op0))
5318 return op1;
5319 if ((sel & mask) == mask && !side_effects_p (op1))
5320 return op0;
5322 rtx trueop0 = avoid_constant_pool_reference (op0);
5323 rtx trueop1 = avoid_constant_pool_reference (op1);
5324 if (GET_CODE (trueop0) == CONST_VECTOR
5325 && GET_CODE (trueop1) == CONST_VECTOR)
5327 rtvec v = rtvec_alloc (n_elts);
5328 unsigned int i;
5330 for (i = 0; i < n_elts; i++)
5331 RTVEC_ELT (v, i) = ((sel & ((unsigned HOST_WIDE_INT) 1 << i))
5332 ? CONST_VECTOR_ELT (trueop0, i)
5333 : CONST_VECTOR_ELT (trueop1, i));
5334 return gen_rtx_CONST_VECTOR (mode, v);
5337 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5338 if no element from a appears in the result. */
5339 if (GET_CODE (op0) == VEC_MERGE)
5341 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5342 if (CONST_INT_P (tem))
5344 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5345 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5346 return simplify_gen_ternary (code, mode, mode,
5347 XEXP (op0, 1), op1, op2);
5348 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5349 return simplify_gen_ternary (code, mode, mode,
5350 XEXP (op0, 0), op1, op2);
5353 if (GET_CODE (op1) == VEC_MERGE)
5355 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5356 if (CONST_INT_P (tem))
5358 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5359 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5360 return simplify_gen_ternary (code, mode, mode,
5361 op0, XEXP (op1, 1), op2);
5362 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5363 return simplify_gen_ternary (code, mode, mode,
5364 op0, XEXP (op1, 0), op2);
5368 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5369 with a. */
5370 if (GET_CODE (op0) == VEC_DUPLICATE
5371 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5372 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5373 && mode_nunits[GET_MODE (XEXP (op0, 0))] == 1)
5375 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5376 if (CONST_INT_P (tem) && CONST_INT_P (op2))
5378 if (XEXP (XEXP (op0, 0), 0) == op1
5379 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5380 return op1;
5385 if (rtx_equal_p (op0, op1)
5386 && !side_effects_p (op2) && !side_effects_p (op1))
5387 return op0;
5389 break;
5391 default:
5392 gcc_unreachable ();
5395 return 0;
5398 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5399 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5400 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5402 Works by unpacking OP into a collection of 8-bit values
5403 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5404 and then repacking them again for OUTERMODE. */
5406 static rtx
5407 simplify_immed_subreg (machine_mode outermode, rtx op,
5408 machine_mode innermode, unsigned int byte)
5410 enum {
5411 value_bit = 8,
5412 value_mask = (1 << value_bit) - 1
5414 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
5415 int value_start;
5416 int i;
5417 int elem;
5419 int num_elem;
5420 rtx * elems;
5421 int elem_bitsize;
5422 rtx result_s;
5423 rtvec result_v = NULL;
5424 enum mode_class outer_class;
5425 machine_mode outer_submode;
5426 int max_bitsize;
5428 /* Some ports misuse CCmode. */
5429 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5430 return op;
5432 /* We have no way to represent a complex constant at the rtl level. */
5433 if (COMPLEX_MODE_P (outermode))
5434 return NULL_RTX;
5436 /* We support any size mode. */
5437 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5438 GET_MODE_BITSIZE (innermode));
5440 /* Unpack the value. */
5442 if (GET_CODE (op) == CONST_VECTOR)
5444 num_elem = CONST_VECTOR_NUNITS (op);
5445 elems = &CONST_VECTOR_ELT (op, 0);
5446 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5448 else
5450 num_elem = 1;
5451 elems = &op;
5452 elem_bitsize = max_bitsize;
5454 /* If this asserts, it is too complicated; reducing value_bit may help. */
5455 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5456 /* I don't know how to handle endianness of sub-units. */
5457 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5459 for (elem = 0; elem < num_elem; elem++)
5461 unsigned char * vp;
5462 rtx el = elems[elem];
5464 /* Vectors are kept in target memory order. (This is probably
5465 a mistake.) */
5467 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5468 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5469 / BITS_PER_UNIT);
5470 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5471 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5472 unsigned bytele = (subword_byte % UNITS_PER_WORD
5473 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5474 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5477 switch (GET_CODE (el))
5479 case CONST_INT:
5480 for (i = 0;
5481 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5482 i += value_bit)
5483 *vp++ = INTVAL (el) >> i;
5484 /* CONST_INTs are always logically sign-extended. */
5485 for (; i < elem_bitsize; i += value_bit)
5486 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5487 break;
5489 case CONST_WIDE_INT:
5491 rtx_mode_t val = std::make_pair (el, innermode);
5492 unsigned char extend = wi::sign_mask (val);
5494 for (i = 0; i < elem_bitsize; i += value_bit)
5495 *vp++ = wi::extract_uhwi (val, i, value_bit);
5496 for (; i < elem_bitsize; i += value_bit)
5497 *vp++ = extend;
5499 break;
5501 case CONST_DOUBLE:
5502 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
5504 unsigned char extend = 0;
5505 /* If this triggers, someone should have generated a
5506 CONST_INT instead. */
5507 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5509 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5510 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5511 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5513 *vp++
5514 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5515 i += value_bit;
5518 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5519 extend = -1;
5520 for (; i < elem_bitsize; i += value_bit)
5521 *vp++ = extend;
5523 else
5525 /* This is big enough for anything on the platform. */
5526 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5527 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5529 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5530 gcc_assert (bitsize <= elem_bitsize);
5531 gcc_assert (bitsize % value_bit == 0);
5533 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5534 GET_MODE (el));
5536 /* real_to_target produces its result in words affected by
5537 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5538 and use WORDS_BIG_ENDIAN instead; see the documentation
5539 of SUBREG in rtl.texi. */
5540 for (i = 0; i < bitsize; i += value_bit)
5542 int ibase;
5543 if (WORDS_BIG_ENDIAN)
5544 ibase = bitsize - 1 - i;
5545 else
5546 ibase = i;
5547 *vp++ = tmp[ibase / 32] >> i % 32;
5550 /* It shouldn't matter what's done here, so fill it with
5551 zero. */
5552 for (; i < elem_bitsize; i += value_bit)
5553 *vp++ = 0;
5555 break;
5557 case CONST_FIXED:
5558 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5560 for (i = 0; i < elem_bitsize; i += value_bit)
5561 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5563 else
5565 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5566 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5567 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5568 i += value_bit)
5569 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5570 >> (i - HOST_BITS_PER_WIDE_INT);
5571 for (; i < elem_bitsize; i += value_bit)
5572 *vp++ = 0;
5574 break;
5576 default:
5577 gcc_unreachable ();
5581 /* Now, pick the right byte to start with. */
5582 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5583 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5584 will already have offset 0. */
5585 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5587 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5588 - byte);
5589 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5590 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5591 byte = (subword_byte % UNITS_PER_WORD
5592 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5595 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5596 so if it's become negative it will instead be very large.) */
5597 gcc_assert (byte < GET_MODE_SIZE (innermode));
5599 /* Convert from bytes to chunks of size value_bit. */
5600 value_start = byte * (BITS_PER_UNIT / value_bit);
5602 /* Re-pack the value. */
5604 if (VECTOR_MODE_P (outermode))
5606 num_elem = GET_MODE_NUNITS (outermode);
5607 result_v = rtvec_alloc (num_elem);
5608 elems = &RTVEC_ELT (result_v, 0);
5609 outer_submode = GET_MODE_INNER (outermode);
5611 else
5613 num_elem = 1;
5614 elems = &result_s;
5615 outer_submode = outermode;
5618 outer_class = GET_MODE_CLASS (outer_submode);
5619 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5621 gcc_assert (elem_bitsize % value_bit == 0);
5622 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5624 for (elem = 0; elem < num_elem; elem++)
5626 unsigned char *vp;
5628 /* Vectors are stored in target memory order. (This is probably
5629 a mistake.) */
5631 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5632 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5633 / BITS_PER_UNIT);
5634 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5635 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5636 unsigned bytele = (subword_byte % UNITS_PER_WORD
5637 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5638 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5641 switch (outer_class)
5643 case MODE_INT:
5644 case MODE_PARTIAL_INT:
5646 int u;
5647 int base = 0;
5648 int units
5649 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
5650 / HOST_BITS_PER_WIDE_INT;
5651 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
5652 wide_int r;
5654 if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
5655 return NULL_RTX;
5656 for (u = 0; u < units; u++)
5658 unsigned HOST_WIDE_INT buf = 0;
5659 for (i = 0;
5660 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
5661 i += value_bit)
5662 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5664 tmp[u] = buf;
5665 base += HOST_BITS_PER_WIDE_INT;
5667 r = wide_int::from_array (tmp, units,
5668 GET_MODE_PRECISION (outer_submode));
5669 #if TARGET_SUPPORTS_WIDE_INT == 0
5670 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5671 if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
5672 return NULL_RTX;
5673 #endif
5674 elems[elem] = immed_wide_int_const (r, outer_submode);
5676 break;
5678 case MODE_FLOAT:
5679 case MODE_DECIMAL_FLOAT:
5681 REAL_VALUE_TYPE r;
5682 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5684 /* real_from_target wants its input in words affected by
5685 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5686 and use WORDS_BIG_ENDIAN instead; see the documentation
5687 of SUBREG in rtl.texi. */
5688 for (i = 0; i < max_bitsize / 32; i++)
5689 tmp[i] = 0;
5690 for (i = 0; i < elem_bitsize; i += value_bit)
5692 int ibase;
5693 if (WORDS_BIG_ENDIAN)
5694 ibase = elem_bitsize - 1 - i;
5695 else
5696 ibase = i;
5697 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5700 real_from_target (&r, tmp, outer_submode);
5701 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5703 break;
5705 case MODE_FRACT:
5706 case MODE_UFRACT:
5707 case MODE_ACCUM:
5708 case MODE_UACCUM:
5710 FIXED_VALUE_TYPE f;
5711 f.data.low = 0;
5712 f.data.high = 0;
5713 f.mode = outer_submode;
5715 for (i = 0;
5716 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5717 i += value_bit)
5718 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5719 for (; i < elem_bitsize; i += value_bit)
5720 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5721 << (i - HOST_BITS_PER_WIDE_INT));
5723 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5725 break;
5727 default:
5728 gcc_unreachable ();
5731 if (VECTOR_MODE_P (outermode))
5732 return gen_rtx_CONST_VECTOR (outermode, result_v);
5733 else
5734 return result_s;
5737 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5738 Return 0 if no simplifications are possible. */
5740 simplify_subreg (machine_mode outermode, rtx op,
5741 machine_mode innermode, unsigned int byte)
5743 /* Little bit of sanity checking. */
5744 gcc_assert (innermode != VOIDmode);
5745 gcc_assert (outermode != VOIDmode);
5746 gcc_assert (innermode != BLKmode);
5747 gcc_assert (outermode != BLKmode);
5749 gcc_assert (GET_MODE (op) == innermode
5750 || GET_MODE (op) == VOIDmode);
5752 if ((byte % GET_MODE_SIZE (outermode)) != 0)
5753 return NULL_RTX;
5755 if (byte >= GET_MODE_SIZE (innermode))
5756 return NULL_RTX;
5758 if (outermode == innermode && !byte)
5759 return op;
5761 if (CONST_SCALAR_INT_P (op)
5762 || CONST_DOUBLE_AS_FLOAT_P (op)
5763 || GET_CODE (op) == CONST_FIXED
5764 || GET_CODE (op) == CONST_VECTOR)
5765 return simplify_immed_subreg (outermode, op, innermode, byte);
5767 /* Changing mode twice with SUBREG => just change it once,
5768 or not at all if changing back op starting mode. */
5769 if (GET_CODE (op) == SUBREG)
5771 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5772 int final_offset = byte + SUBREG_BYTE (op);
5773 rtx newx;
5775 if (outermode == innermostmode
5776 && byte == 0 && SUBREG_BYTE (op) == 0)
5777 return SUBREG_REG (op);
5779 /* The SUBREG_BYTE represents offset, as if the value were stored
5780 in memory. Irritating exception is paradoxical subreg, where
5781 we define SUBREG_BYTE to be 0. On big endian machines, this
5782 value should be negative. For a moment, undo this exception. */
5783 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5785 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5786 if (WORDS_BIG_ENDIAN)
5787 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5788 if (BYTES_BIG_ENDIAN)
5789 final_offset += difference % UNITS_PER_WORD;
5791 if (SUBREG_BYTE (op) == 0
5792 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5794 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5795 if (WORDS_BIG_ENDIAN)
5796 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5797 if (BYTES_BIG_ENDIAN)
5798 final_offset += difference % UNITS_PER_WORD;
5801 /* See whether resulting subreg will be paradoxical. */
5802 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5804 /* In nonparadoxical subregs we can't handle negative offsets. */
5805 if (final_offset < 0)
5806 return NULL_RTX;
5807 /* Bail out in case resulting subreg would be incorrect. */
5808 if (final_offset % GET_MODE_SIZE (outermode)
5809 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5810 return NULL_RTX;
5812 else
5814 int offset = 0;
5815 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5817 /* In paradoxical subreg, see if we are still looking on lower part.
5818 If so, our SUBREG_BYTE will be 0. */
5819 if (WORDS_BIG_ENDIAN)
5820 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5821 if (BYTES_BIG_ENDIAN)
5822 offset += difference % UNITS_PER_WORD;
5823 if (offset == final_offset)
5824 final_offset = 0;
5825 else
5826 return NULL_RTX;
5829 /* Recurse for further possible simplifications. */
5830 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5831 final_offset);
5832 if (newx)
5833 return newx;
5834 if (validate_subreg (outermode, innermostmode,
5835 SUBREG_REG (op), final_offset))
5837 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5838 if (SUBREG_PROMOTED_VAR_P (op)
5839 && SUBREG_PROMOTED_SIGN (op) >= 0
5840 && GET_MODE_CLASS (outermode) == MODE_INT
5841 && IN_RANGE (GET_MODE_SIZE (outermode),
5842 GET_MODE_SIZE (innermode),
5843 GET_MODE_SIZE (innermostmode))
5844 && subreg_lowpart_p (newx))
5846 SUBREG_PROMOTED_VAR_P (newx) = 1;
5847 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
5849 return newx;
5851 return NULL_RTX;
5854 /* SUBREG of a hard register => just change the register number
5855 and/or mode. If the hard register is not valid in that mode,
5856 suppress this simplification. If the hard register is the stack,
5857 frame, or argument pointer, leave this as a SUBREG. */
5859 if (REG_P (op) && HARD_REGISTER_P (op))
5861 unsigned int regno, final_regno;
5863 regno = REGNO (op);
5864 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5865 if (HARD_REGISTER_NUM_P (final_regno))
5867 rtx x;
5868 int final_offset = byte;
5870 /* Adjust offset for paradoxical subregs. */
5871 if (byte == 0
5872 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5874 int difference = (GET_MODE_SIZE (innermode)
5875 - GET_MODE_SIZE (outermode));
5876 if (WORDS_BIG_ENDIAN)
5877 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5878 if (BYTES_BIG_ENDIAN)
5879 final_offset += difference % UNITS_PER_WORD;
5882 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5884 /* Propagate original regno. We don't have any way to specify
5885 the offset inside original regno, so do so only for lowpart.
5886 The information is used only by alias analysis that can not
5887 grog partial register anyway. */
5889 if (subreg_lowpart_offset (outermode, innermode) == byte)
5890 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5891 return x;
5895 /* If we have a SUBREG of a register that we are replacing and we are
5896 replacing it with a MEM, make a new MEM and try replacing the
5897 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5898 or if we would be widening it. */
5900 if (MEM_P (op)
5901 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
5902 /* Allow splitting of volatile memory references in case we don't
5903 have instruction to move the whole thing. */
5904 && (! MEM_VOLATILE_P (op)
5905 || ! have_insn_for (SET, innermode))
5906 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5907 return adjust_address_nv (op, outermode, byte);
5909 /* Handle complex values represented as CONCAT
5910 of real and imaginary part. */
5911 if (GET_CODE (op) == CONCAT)
5913 unsigned int part_size, final_offset;
5914 rtx part, res;
5916 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5917 if (byte < part_size)
5919 part = XEXP (op, 0);
5920 final_offset = byte;
5922 else
5924 part = XEXP (op, 1);
5925 final_offset = byte - part_size;
5928 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5929 return NULL_RTX;
5931 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5932 if (res)
5933 return res;
5934 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5935 return gen_rtx_SUBREG (outermode, part, final_offset);
5936 return NULL_RTX;
5939 /* A SUBREG resulting from a zero extension may fold to zero if
5940 it extracts higher bits that the ZERO_EXTEND's source bits. */
5941 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
5943 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5944 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
5945 return CONST0_RTX (outermode);
5948 if (SCALAR_INT_MODE_P (outermode)
5949 && SCALAR_INT_MODE_P (innermode)
5950 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5951 && byte == subreg_lowpart_offset (outermode, innermode))
5953 rtx tem = simplify_truncation (outermode, op, innermode);
5954 if (tem)
5955 return tem;
5958 return NULL_RTX;
5961 /* Make a SUBREG operation or equivalent if it folds. */
5964 simplify_gen_subreg (machine_mode outermode, rtx op,
5965 machine_mode innermode, unsigned int byte)
5967 rtx newx;
5969 newx = simplify_subreg (outermode, op, innermode, byte);
5970 if (newx)
5971 return newx;
5973 if (GET_CODE (op) == SUBREG
5974 || GET_CODE (op) == CONCAT
5975 || GET_MODE (op) == VOIDmode)
5976 return NULL_RTX;
5978 if (validate_subreg (outermode, innermode, op, byte))
5979 return gen_rtx_SUBREG (outermode, op, byte);
5981 return NULL_RTX;
5984 /* Simplify X, an rtx expression.
5986 Return the simplified expression or NULL if no simplifications
5987 were possible.
5989 This is the preferred entry point into the simplification routines;
5990 however, we still allow passes to call the more specific routines.
5992 Right now GCC has three (yes, three) major bodies of RTL simplification
5993 code that need to be unified.
5995 1. fold_rtx in cse.c. This code uses various CSE specific
5996 information to aid in RTL simplification.
5998 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5999 it uses combine specific information to aid in RTL
6000 simplification.
6002 3. The routines in this file.
6005 Long term we want to only have one body of simplification code; to
6006 get to that state I recommend the following steps:
6008 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6009 which are not pass dependent state into these routines.
6011 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6012 use this routine whenever possible.
6014 3. Allow for pass dependent state to be provided to these
6015 routines and add simplifications based on the pass dependent
6016 state. Remove code from cse.c & combine.c that becomes
6017 redundant/dead.
6019 It will take time, but ultimately the compiler will be easier to
6020 maintain and improve. It's totally silly that when we add a
6021 simplification that it needs to be added to 4 places (3 for RTL
6022 simplification and 1 for tree simplification. */
6025 simplify_rtx (const_rtx x)
6027 const enum rtx_code code = GET_CODE (x);
6028 const machine_mode mode = GET_MODE (x);
6030 switch (GET_RTX_CLASS (code))
6032 case RTX_UNARY:
6033 return simplify_unary_operation (code, mode,
6034 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6035 case RTX_COMM_ARITH:
6036 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6037 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6039 /* Fall through.... */
6041 case RTX_BIN_ARITH:
6042 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6044 case RTX_TERNARY:
6045 case RTX_BITFIELD_OPS:
6046 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6047 XEXP (x, 0), XEXP (x, 1),
6048 XEXP (x, 2));
6050 case RTX_COMPARE:
6051 case RTX_COMM_COMPARE:
6052 return simplify_relational_operation (code, mode,
6053 ((GET_MODE (XEXP (x, 0))
6054 != VOIDmode)
6055 ? GET_MODE (XEXP (x, 0))
6056 : GET_MODE (XEXP (x, 1))),
6057 XEXP (x, 0),
6058 XEXP (x, 1));
6060 case RTX_EXTRA:
6061 if (code == SUBREG)
6062 return simplify_subreg (mode, SUBREG_REG (x),
6063 GET_MODE (SUBREG_REG (x)),
6064 SUBREG_BYTE (x));
6065 break;
6067 case RTX_OBJ:
6068 if (code == LO_SUM)
6070 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6071 if (GET_CODE (XEXP (x, 0)) == HIGH
6072 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6073 return XEXP (x, 1);
6075 break;
6077 default:
6078 break;
6080 return NULL;