* config/rx/rx.c (ADD_RX_BUILTIN0): New macro, used for builtins
[official-gcc.git] / gcc / simplify-rtx.c
blob432842ecf0b1647b264f1fdb58021c21b613c867
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2013 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "tm_p.h"
28 #include "regs.h"
29 #include "hard-reg-set.h"
30 #include "flags.h"
31 #include "insn-config.h"
32 #include "recog.h"
33 #include "function.h"
34 #include "expr.h"
35 #include "diagnostic-core.h"
36 #include "ggc.h"
37 #include "target.h"
39 /* Simplification and canonicalization of RTL. */
41 /* Much code operates on (low, high) pairs; the low value is an
42 unsigned wide int, the high value a signed wide int. We
43 occasionally need to sign extend from low to high as if low were a
44 signed wide int. */
45 #define HWI_SIGN_EXTEND(low) \
46 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
48 static rtx neg_const_int (enum machine_mode, const_rtx);
49 static bool plus_minus_operand_p (const_rtx);
50 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
51 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
52 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
53 unsigned int);
54 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
55 rtx, rtx);
56 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
57 enum machine_mode, rtx, rtx);
58 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
59 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
60 rtx, rtx, rtx, rtx);
62 /* Negate a CONST_INT rtx, truncating (because a conversion from a
63 maximally negative number can overflow). */
64 static rtx
65 neg_const_int (enum machine_mode mode, const_rtx i)
67 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
70 /* Test whether expression, X, is an immediate constant that represents
71 the most significant bit of machine mode MODE. */
73 bool
74 mode_signbit_p (enum machine_mode mode, const_rtx x)
76 unsigned HOST_WIDE_INT val;
77 unsigned int width;
79 if (GET_MODE_CLASS (mode) != MODE_INT)
80 return false;
82 width = GET_MODE_PRECISION (mode);
83 if (width == 0)
84 return false;
86 if (width <= HOST_BITS_PER_WIDE_INT
87 && CONST_INT_P (x))
88 val = INTVAL (x);
89 else if (width <= HOST_BITS_PER_DOUBLE_INT
90 && CONST_DOUBLE_AS_INT_P (x)
91 && CONST_DOUBLE_LOW (x) == 0)
93 val = CONST_DOUBLE_HIGH (x);
94 width -= HOST_BITS_PER_WIDE_INT;
96 else
97 /* FIXME: We don't yet have a representation for wider modes. */
98 return false;
100 if (width < HOST_BITS_PER_WIDE_INT)
101 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
102 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
105 /* Test whether VAL is equal to the most significant bit of mode MODE
106 (after masking with the mode mask of MODE). Returns false if the
107 precision of MODE is too large to handle. */
109 bool
110 val_signbit_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
112 unsigned int width;
114 if (GET_MODE_CLASS (mode) != MODE_INT)
115 return false;
117 width = GET_MODE_PRECISION (mode);
118 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
119 return false;
121 val &= GET_MODE_MASK (mode);
122 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
125 /* Test whether the most significant bit of mode MODE is set in VAL.
126 Returns false if the precision of MODE is too large to handle. */
127 bool
128 val_signbit_known_set_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
130 unsigned int width;
132 if (GET_MODE_CLASS (mode) != MODE_INT)
133 return false;
135 width = GET_MODE_PRECISION (mode);
136 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
137 return false;
139 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
140 return val != 0;
143 /* Test whether the most significant bit of mode MODE is clear in VAL.
144 Returns false if the precision of MODE is too large to handle. */
145 bool
146 val_signbit_known_clear_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
148 unsigned int width;
150 if (GET_MODE_CLASS (mode) != MODE_INT)
151 return false;
153 width = GET_MODE_PRECISION (mode);
154 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
155 return false;
157 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
158 return val == 0;
161 /* Make a binary operation by properly ordering the operands and
162 seeing if the expression folds. */
165 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
166 rtx op1)
168 rtx tem;
170 /* If this simplifies, do it. */
171 tem = simplify_binary_operation (code, mode, op0, op1);
172 if (tem)
173 return tem;
175 /* Put complex operands first and constants second if commutative. */
176 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
177 && swap_commutative_operands_p (op0, op1))
178 tem = op0, op0 = op1, op1 = tem;
180 return gen_rtx_fmt_ee (code, mode, op0, op1);
183 /* If X is a MEM referencing the constant pool, return the real value.
184 Otherwise return X. */
186 avoid_constant_pool_reference (rtx x)
188 rtx c, tmp, addr;
189 enum machine_mode cmode;
190 HOST_WIDE_INT offset = 0;
192 switch (GET_CODE (x))
194 case MEM:
195 break;
197 case FLOAT_EXTEND:
198 /* Handle float extensions of constant pool references. */
199 tmp = XEXP (x, 0);
200 c = avoid_constant_pool_reference (tmp);
201 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
203 REAL_VALUE_TYPE d;
205 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
206 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
208 return x;
210 default:
211 return x;
214 if (GET_MODE (x) == BLKmode)
215 return x;
217 addr = XEXP (x, 0);
219 /* Call target hook to avoid the effects of -fpic etc.... */
220 addr = targetm.delegitimize_address (addr);
222 /* Split the address into a base and integer offset. */
223 if (GET_CODE (addr) == CONST
224 && GET_CODE (XEXP (addr, 0)) == PLUS
225 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
227 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
228 addr = XEXP (XEXP (addr, 0), 0);
231 if (GET_CODE (addr) == LO_SUM)
232 addr = XEXP (addr, 1);
234 /* If this is a constant pool reference, we can turn it into its
235 constant and hope that simplifications happen. */
236 if (GET_CODE (addr) == SYMBOL_REF
237 && CONSTANT_POOL_ADDRESS_P (addr))
239 c = get_pool_constant (addr);
240 cmode = get_pool_mode (addr);
242 /* If we're accessing the constant in a different mode than it was
243 originally stored, attempt to fix that up via subreg simplifications.
244 If that fails we have no choice but to return the original memory. */
245 if ((offset != 0 || cmode != GET_MODE (x))
246 && offset >= 0 && offset < GET_MODE_SIZE (cmode))
248 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
249 if (tem && CONSTANT_P (tem))
250 return tem;
252 else
253 return c;
256 return x;
259 /* Simplify a MEM based on its attributes. This is the default
260 delegitimize_address target hook, and it's recommended that every
261 overrider call it. */
264 delegitimize_mem_from_attrs (rtx x)
266 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
267 use their base addresses as equivalent. */
268 if (MEM_P (x)
269 && MEM_EXPR (x)
270 && MEM_OFFSET_KNOWN_P (x))
272 tree decl = MEM_EXPR (x);
273 enum machine_mode mode = GET_MODE (x);
274 HOST_WIDE_INT offset = 0;
276 switch (TREE_CODE (decl))
278 default:
279 decl = NULL;
280 break;
282 case VAR_DECL:
283 break;
285 case ARRAY_REF:
286 case ARRAY_RANGE_REF:
287 case COMPONENT_REF:
288 case BIT_FIELD_REF:
289 case REALPART_EXPR:
290 case IMAGPART_EXPR:
291 case VIEW_CONVERT_EXPR:
293 HOST_WIDE_INT bitsize, bitpos;
294 tree toffset;
295 int unsignedp, volatilep = 0;
297 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
298 &mode, &unsignedp, &volatilep, false);
299 if (bitsize != GET_MODE_BITSIZE (mode)
300 || (bitpos % BITS_PER_UNIT)
301 || (toffset && !host_integerp (toffset, 0)))
302 decl = NULL;
303 else
305 offset += bitpos / BITS_PER_UNIT;
306 if (toffset)
307 offset += TREE_INT_CST_LOW (toffset);
309 break;
313 if (decl
314 && mode == GET_MODE (x)
315 && TREE_CODE (decl) == VAR_DECL
316 && (TREE_STATIC (decl)
317 || DECL_THREAD_LOCAL_P (decl))
318 && DECL_RTL_SET_P (decl)
319 && MEM_P (DECL_RTL (decl)))
321 rtx newx;
323 offset += MEM_OFFSET (x);
325 newx = DECL_RTL (decl);
327 if (MEM_P (newx))
329 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
331 /* Avoid creating a new MEM needlessly if we already had
332 the same address. We do if there's no OFFSET and the
333 old address X is identical to NEWX, or if X is of the
334 form (plus NEWX OFFSET), or the NEWX is of the form
335 (plus Y (const_int Z)) and X is that with the offset
336 added: (plus Y (const_int Z+OFFSET)). */
337 if (!((offset == 0
338 || (GET_CODE (o) == PLUS
339 && GET_CODE (XEXP (o, 1)) == CONST_INT
340 && (offset == INTVAL (XEXP (o, 1))
341 || (GET_CODE (n) == PLUS
342 && GET_CODE (XEXP (n, 1)) == CONST_INT
343 && (INTVAL (XEXP (n, 1)) + offset
344 == INTVAL (XEXP (o, 1)))
345 && (n = XEXP (n, 0))))
346 && (o = XEXP (o, 0))))
347 && rtx_equal_p (o, n)))
348 x = adjust_address_nv (newx, mode, offset);
350 else if (GET_MODE (x) == GET_MODE (newx)
351 && offset == 0)
352 x = newx;
356 return x;
359 /* Make a unary operation by first seeing if it folds and otherwise making
360 the specified operation. */
363 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
364 enum machine_mode op_mode)
366 rtx tem;
368 /* If this simplifies, use it. */
369 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
370 return tem;
372 return gen_rtx_fmt_e (code, mode, op);
375 /* Likewise for ternary operations. */
378 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
379 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
381 rtx tem;
383 /* If this simplifies, use it. */
384 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
385 op0, op1, op2)))
386 return tem;
388 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
391 /* Likewise, for relational operations.
392 CMP_MODE specifies mode comparison is done in. */
395 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
396 enum machine_mode cmp_mode, rtx op0, rtx op1)
398 rtx tem;
400 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
401 op0, op1)))
402 return tem;
404 return gen_rtx_fmt_ee (code, mode, op0, op1);
407 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
408 and simplify the result. If FN is non-NULL, call this callback on each
409 X, if it returns non-NULL, replace X with its return value and simplify the
410 result. */
413 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
414 rtx (*fn) (rtx, const_rtx, void *), void *data)
416 enum rtx_code code = GET_CODE (x);
417 enum machine_mode mode = GET_MODE (x);
418 enum machine_mode op_mode;
419 const char *fmt;
420 rtx op0, op1, op2, newx, op;
421 rtvec vec, newvec;
422 int i, j;
424 if (__builtin_expect (fn != NULL, 0))
426 newx = fn (x, old_rtx, data);
427 if (newx)
428 return newx;
430 else if (rtx_equal_p (x, old_rtx))
431 return copy_rtx ((rtx) data);
433 switch (GET_RTX_CLASS (code))
435 case RTX_UNARY:
436 op0 = XEXP (x, 0);
437 op_mode = GET_MODE (op0);
438 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
439 if (op0 == XEXP (x, 0))
440 return x;
441 return simplify_gen_unary (code, mode, op0, op_mode);
443 case RTX_BIN_ARITH:
444 case RTX_COMM_ARITH:
445 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
446 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
447 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
448 return x;
449 return simplify_gen_binary (code, mode, op0, op1);
451 case RTX_COMPARE:
452 case RTX_COMM_COMPARE:
453 op0 = XEXP (x, 0);
454 op1 = XEXP (x, 1);
455 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
456 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
457 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
458 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
459 return x;
460 return simplify_gen_relational (code, mode, op_mode, op0, op1);
462 case RTX_TERNARY:
463 case RTX_BITFIELD_OPS:
464 op0 = XEXP (x, 0);
465 op_mode = GET_MODE (op0);
466 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
467 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
468 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
469 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
470 return x;
471 if (op_mode == VOIDmode)
472 op_mode = GET_MODE (op0);
473 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
475 case RTX_EXTRA:
476 if (code == SUBREG)
478 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
479 if (op0 == SUBREG_REG (x))
480 return x;
481 op0 = simplify_gen_subreg (GET_MODE (x), op0,
482 GET_MODE (SUBREG_REG (x)),
483 SUBREG_BYTE (x));
484 return op0 ? op0 : x;
486 break;
488 case RTX_OBJ:
489 if (code == MEM)
491 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
492 if (op0 == XEXP (x, 0))
493 return x;
494 return replace_equiv_address_nv (x, op0);
496 else if (code == LO_SUM)
498 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
499 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
501 /* (lo_sum (high x) x) -> x */
502 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
503 return op1;
505 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
506 return x;
507 return gen_rtx_LO_SUM (mode, op0, op1);
509 break;
511 default:
512 break;
515 newx = x;
516 fmt = GET_RTX_FORMAT (code);
517 for (i = 0; fmt[i]; i++)
518 switch (fmt[i])
520 case 'E':
521 vec = XVEC (x, i);
522 newvec = XVEC (newx, i);
523 for (j = 0; j < GET_NUM_ELEM (vec); j++)
525 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
526 old_rtx, fn, data);
527 if (op != RTVEC_ELT (vec, j))
529 if (newvec == vec)
531 newvec = shallow_copy_rtvec (vec);
532 if (x == newx)
533 newx = shallow_copy_rtx (x);
534 XVEC (newx, i) = newvec;
536 RTVEC_ELT (newvec, j) = op;
539 break;
541 case 'e':
542 if (XEXP (x, i))
544 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
545 if (op != XEXP (x, i))
547 if (x == newx)
548 newx = shallow_copy_rtx (x);
549 XEXP (newx, i) = op;
552 break;
554 return newx;
557 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
558 resulting RTX. Return a new RTX which is as simplified as possible. */
561 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
563 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
566 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
567 Only handle cases where the truncated value is inherently an rvalue.
569 RTL provides two ways of truncating a value:
571 1. a lowpart subreg. This form is only a truncation when both
572 the outer and inner modes (here MODE and OP_MODE respectively)
573 are scalar integers, and only then when the subreg is used as
574 an rvalue.
576 It is only valid to form such truncating subregs if the
577 truncation requires no action by the target. The onus for
578 proving this is on the creator of the subreg -- e.g. the
579 caller to simplify_subreg or simplify_gen_subreg -- and typically
580 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
582 2. a TRUNCATE. This form handles both scalar and compound integers.
584 The first form is preferred where valid. However, the TRUNCATE
585 handling in simplify_unary_operation turns the second form into the
586 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
587 so it is generally safe to form rvalue truncations using:
589 simplify_gen_unary (TRUNCATE, ...)
591 and leave simplify_unary_operation to work out which representation
592 should be used.
594 Because of the proof requirements on (1), simplify_truncation must
595 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
596 regardless of whether the outer truncation came from a SUBREG or a
597 TRUNCATE. For example, if the caller has proven that an SImode
598 truncation of:
600 (and:DI X Y)
602 is a no-op and can be represented as a subreg, it does not follow
603 that SImode truncations of X and Y are also no-ops. On a target
604 like 64-bit MIPS that requires SImode values to be stored in
605 sign-extended form, an SImode truncation of:
607 (and:DI (reg:DI X) (const_int 63))
609 is trivially a no-op because only the lower 6 bits can be set.
610 However, X is still an arbitrary 64-bit number and so we cannot
611 assume that truncating it too is a no-op. */
613 static rtx
614 simplify_truncation (enum machine_mode mode, rtx op,
615 enum machine_mode op_mode)
617 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
618 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
619 gcc_assert (precision <= op_precision);
621 /* Optimize truncations of zero and sign extended values. */
622 if (GET_CODE (op) == ZERO_EXTEND
623 || GET_CODE (op) == SIGN_EXTEND)
625 /* There are three possibilities. If MODE is the same as the
626 origmode, we can omit both the extension and the subreg.
627 If MODE is not larger than the origmode, we can apply the
628 truncation without the extension. Finally, if the outermode
629 is larger than the origmode, we can just extend to the appropriate
630 mode. */
631 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
632 if (mode == origmode)
633 return XEXP (op, 0);
634 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
635 return simplify_gen_unary (TRUNCATE, mode,
636 XEXP (op, 0), origmode);
637 else
638 return simplify_gen_unary (GET_CODE (op), mode,
639 XEXP (op, 0), origmode);
642 /* Simplify (truncate:SI (op:DI (x:DI) (y:DI)))
643 to (op:SI (truncate:SI (x:DI)) (truncate:SI (x:DI))). */
644 if (GET_CODE (op) == PLUS
645 || GET_CODE (op) == MINUS
646 || GET_CODE (op) == MULT)
648 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
649 if (op0)
651 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
652 if (op1)
653 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
657 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
658 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
659 the outer subreg is effectively a truncation to the original mode. */
660 if ((GET_CODE (op) == LSHIFTRT
661 || GET_CODE (op) == ASHIFTRT)
662 /* Ensure that OP_MODE is at least twice as wide as MODE
663 to avoid the possibility that an outer LSHIFTRT shifts by more
664 than the sign extension's sign_bit_copies and introduces zeros
665 into the high bits of the result. */
666 && 2 * precision <= op_precision
667 && CONST_INT_P (XEXP (op, 1))
668 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
669 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
670 && UINTVAL (XEXP (op, 1)) < precision)
671 return simplify_gen_binary (ASHIFTRT, mode,
672 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
674 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
675 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
676 the outer subreg is effectively a truncation to the original mode. */
677 if ((GET_CODE (op) == LSHIFTRT
678 || GET_CODE (op) == ASHIFTRT)
679 && CONST_INT_P (XEXP (op, 1))
680 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
681 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
682 && UINTVAL (XEXP (op, 1)) < precision)
683 return simplify_gen_binary (LSHIFTRT, mode,
684 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
686 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
687 to (ashift:QI (x:QI) C), where C is a suitable small constant and
688 the outer subreg is effectively a truncation to the original mode. */
689 if (GET_CODE (op) == ASHIFT
690 && CONST_INT_P (XEXP (op, 1))
691 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
692 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
693 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
694 && UINTVAL (XEXP (op, 1)) < precision)
695 return simplify_gen_binary (ASHIFT, mode,
696 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
698 /* Recognize a word extraction from a multi-word subreg. */
699 if ((GET_CODE (op) == LSHIFTRT
700 || GET_CODE (op) == ASHIFTRT)
701 && SCALAR_INT_MODE_P (mode)
702 && SCALAR_INT_MODE_P (op_mode)
703 && precision >= BITS_PER_WORD
704 && 2 * precision <= op_precision
705 && CONST_INT_P (XEXP (op, 1))
706 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
707 && UINTVAL (XEXP (op, 1)) < op_precision)
709 int byte = subreg_lowpart_offset (mode, op_mode);
710 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
711 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
712 (WORDS_BIG_ENDIAN
713 ? byte - shifted_bytes
714 : byte + shifted_bytes));
717 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
718 and try replacing the TRUNCATE and shift with it. Don't do this
719 if the MEM has a mode-dependent address. */
720 if ((GET_CODE (op) == LSHIFTRT
721 || GET_CODE (op) == ASHIFTRT)
722 && SCALAR_INT_MODE_P (op_mode)
723 && MEM_P (XEXP (op, 0))
724 && CONST_INT_P (XEXP (op, 1))
725 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
726 && INTVAL (XEXP (op, 1)) > 0
727 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
728 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
729 MEM_ADDR_SPACE (XEXP (op, 0)))
730 && ! MEM_VOLATILE_P (XEXP (op, 0))
731 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
732 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
734 int byte = subreg_lowpart_offset (mode, op_mode);
735 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
736 return adjust_address_nv (XEXP (op, 0), mode,
737 (WORDS_BIG_ENDIAN
738 ? byte - shifted_bytes
739 : byte + shifted_bytes));
742 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
743 (OP:SI foo:SI) if OP is NEG or ABS. */
744 if ((GET_CODE (op) == ABS
745 || GET_CODE (op) == NEG)
746 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
747 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
748 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
749 return simplify_gen_unary (GET_CODE (op), mode,
750 XEXP (XEXP (op, 0), 0), mode);
752 /* (truncate:A (subreg:B (truncate:C X) 0)) is
753 (truncate:A X). */
754 if (GET_CODE (op) == SUBREG
755 && SCALAR_INT_MODE_P (mode)
756 && SCALAR_INT_MODE_P (op_mode)
757 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
758 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
759 && subreg_lowpart_p (op))
761 rtx inner = XEXP (SUBREG_REG (op), 0);
762 if (GET_MODE_PRECISION (mode)
763 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
764 return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
765 else
766 /* If subreg above is paradoxical and C is narrower
767 than A, return (subreg:A (truncate:C X) 0). */
768 return simplify_gen_subreg (mode, SUBREG_REG (op),
769 GET_MODE (SUBREG_REG (op)), 0);
772 /* (truncate:A (truncate:B X)) is (truncate:A X). */
773 if (GET_CODE (op) == TRUNCATE)
774 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
775 GET_MODE (XEXP (op, 0)));
777 return NULL_RTX;
780 /* Try to simplify a unary operation CODE whose output mode is to be
781 MODE with input operand OP whose mode was originally OP_MODE.
782 Return zero if no simplification can be made. */
784 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
785 rtx op, enum machine_mode op_mode)
787 rtx trueop, tem;
789 trueop = avoid_constant_pool_reference (op);
791 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
792 if (tem)
793 return tem;
795 return simplify_unary_operation_1 (code, mode, op);
798 /* Perform some simplifications we can do even if the operands
799 aren't constant. */
800 static rtx
801 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
803 enum rtx_code reversed;
804 rtx temp;
806 switch (code)
808 case NOT:
809 /* (not (not X)) == X. */
810 if (GET_CODE (op) == NOT)
811 return XEXP (op, 0);
813 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
814 comparison is all ones. */
815 if (COMPARISON_P (op)
816 && (mode == BImode || STORE_FLAG_VALUE == -1)
817 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
818 return simplify_gen_relational (reversed, mode, VOIDmode,
819 XEXP (op, 0), XEXP (op, 1));
821 /* (not (plus X -1)) can become (neg X). */
822 if (GET_CODE (op) == PLUS
823 && XEXP (op, 1) == constm1_rtx)
824 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
826 /* Similarly, (not (neg X)) is (plus X -1). */
827 if (GET_CODE (op) == NEG)
828 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
829 CONSTM1_RTX (mode));
831 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
832 if (GET_CODE (op) == XOR
833 && CONST_INT_P (XEXP (op, 1))
834 && (temp = simplify_unary_operation (NOT, mode,
835 XEXP (op, 1), mode)) != 0)
836 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
838 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
839 if (GET_CODE (op) == PLUS
840 && CONST_INT_P (XEXP (op, 1))
841 && mode_signbit_p (mode, XEXP (op, 1))
842 && (temp = simplify_unary_operation (NOT, mode,
843 XEXP (op, 1), mode)) != 0)
844 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
847 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
848 operands other than 1, but that is not valid. We could do a
849 similar simplification for (not (lshiftrt C X)) where C is
850 just the sign bit, but this doesn't seem common enough to
851 bother with. */
852 if (GET_CODE (op) == ASHIFT
853 && XEXP (op, 0) == const1_rtx)
855 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
856 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
859 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
860 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
861 so we can perform the above simplification. */
862 if (STORE_FLAG_VALUE == -1
863 && GET_CODE (op) == ASHIFTRT
864 && GET_CODE (XEXP (op, 1))
865 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
866 return simplify_gen_relational (GE, mode, VOIDmode,
867 XEXP (op, 0), const0_rtx);
870 if (GET_CODE (op) == SUBREG
871 && subreg_lowpart_p (op)
872 && (GET_MODE_SIZE (GET_MODE (op))
873 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
874 && GET_CODE (SUBREG_REG (op)) == ASHIFT
875 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
877 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
878 rtx x;
880 x = gen_rtx_ROTATE (inner_mode,
881 simplify_gen_unary (NOT, inner_mode, const1_rtx,
882 inner_mode),
883 XEXP (SUBREG_REG (op), 1));
884 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
885 if (temp)
886 return temp;
889 /* Apply De Morgan's laws to reduce number of patterns for machines
890 with negating logical insns (and-not, nand, etc.). If result has
891 only one NOT, put it first, since that is how the patterns are
892 coded. */
893 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
895 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
896 enum machine_mode op_mode;
898 op_mode = GET_MODE (in1);
899 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
901 op_mode = GET_MODE (in2);
902 if (op_mode == VOIDmode)
903 op_mode = mode;
904 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
906 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
908 rtx tem = in2;
909 in2 = in1; in1 = tem;
912 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
913 mode, in1, in2);
916 /* (not (bswap x)) -> (bswap (not x)). */
917 if (GET_CODE (op) == BSWAP)
919 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
920 return simplify_gen_unary (BSWAP, mode, x, mode);
922 break;
924 case NEG:
925 /* (neg (neg X)) == X. */
926 if (GET_CODE (op) == NEG)
927 return XEXP (op, 0);
929 /* (neg (plus X 1)) can become (not X). */
930 if (GET_CODE (op) == PLUS
931 && XEXP (op, 1) == const1_rtx)
932 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
934 /* Similarly, (neg (not X)) is (plus X 1). */
935 if (GET_CODE (op) == NOT)
936 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
937 CONST1_RTX (mode));
939 /* (neg (minus X Y)) can become (minus Y X). This transformation
940 isn't safe for modes with signed zeros, since if X and Y are
941 both +0, (minus Y X) is the same as (minus X Y). If the
942 rounding mode is towards +infinity (or -infinity) then the two
943 expressions will be rounded differently. */
944 if (GET_CODE (op) == MINUS
945 && !HONOR_SIGNED_ZEROS (mode)
946 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
947 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
949 if (GET_CODE (op) == PLUS
950 && !HONOR_SIGNED_ZEROS (mode)
951 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
953 /* (neg (plus A C)) is simplified to (minus -C A). */
954 if (CONST_SCALAR_INT_P (XEXP (op, 1))
955 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
957 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
958 if (temp)
959 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
962 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
963 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
964 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
967 /* (neg (mult A B)) becomes (mult A (neg B)).
968 This works even for floating-point values. */
969 if (GET_CODE (op) == MULT
970 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
972 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
973 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
976 /* NEG commutes with ASHIFT since it is multiplication. Only do
977 this if we can then eliminate the NEG (e.g., if the operand
978 is a constant). */
979 if (GET_CODE (op) == ASHIFT)
981 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
982 if (temp)
983 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
986 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
987 C is equal to the width of MODE minus 1. */
988 if (GET_CODE (op) == ASHIFTRT
989 && CONST_INT_P (XEXP (op, 1))
990 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
991 return simplify_gen_binary (LSHIFTRT, mode,
992 XEXP (op, 0), XEXP (op, 1));
994 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
995 C is equal to the width of MODE minus 1. */
996 if (GET_CODE (op) == LSHIFTRT
997 && CONST_INT_P (XEXP (op, 1))
998 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
999 return simplify_gen_binary (ASHIFTRT, mode,
1000 XEXP (op, 0), XEXP (op, 1));
1002 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1003 if (GET_CODE (op) == XOR
1004 && XEXP (op, 1) == const1_rtx
1005 && nonzero_bits (XEXP (op, 0), mode) == 1)
1006 return plus_constant (mode, XEXP (op, 0), -1);
1008 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1009 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1010 if (GET_CODE (op) == LT
1011 && XEXP (op, 1) == const0_rtx
1012 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1014 enum machine_mode inner = GET_MODE (XEXP (op, 0));
1015 int isize = GET_MODE_PRECISION (inner);
1016 if (STORE_FLAG_VALUE == 1)
1018 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1019 GEN_INT (isize - 1));
1020 if (mode == inner)
1021 return temp;
1022 if (GET_MODE_PRECISION (mode) > isize)
1023 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1024 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1026 else if (STORE_FLAG_VALUE == -1)
1028 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1029 GEN_INT (isize - 1));
1030 if (mode == inner)
1031 return temp;
1032 if (GET_MODE_PRECISION (mode) > isize)
1033 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1034 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1037 break;
1039 case TRUNCATE:
1040 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1041 with the umulXi3_highpart patterns. */
1042 if (GET_CODE (op) == LSHIFTRT
1043 && GET_CODE (XEXP (op, 0)) == MULT)
1044 break;
1046 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1048 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1050 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1051 if (temp)
1052 return temp;
1054 /* We can't handle truncation to a partial integer mode here
1055 because we don't know the real bitsize of the partial
1056 integer mode. */
1057 break;
1060 if (GET_MODE (op) != VOIDmode)
1062 temp = simplify_truncation (mode, op, GET_MODE (op));
1063 if (temp)
1064 return temp;
1067 /* If we know that the value is already truncated, we can
1068 replace the TRUNCATE with a SUBREG. */
1069 if (GET_MODE_NUNITS (mode) == 1
1070 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1071 || truncated_to_mode (mode, op)))
1073 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1074 if (temp)
1075 return temp;
1078 /* A truncate of a comparison can be replaced with a subreg if
1079 STORE_FLAG_VALUE permits. This is like the previous test,
1080 but it works even if the comparison is done in a mode larger
1081 than HOST_BITS_PER_WIDE_INT. */
1082 if (HWI_COMPUTABLE_MODE_P (mode)
1083 && COMPARISON_P (op)
1084 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1086 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1087 if (temp)
1088 return temp;
1091 /* A truncate of a memory is just loading the low part of the memory
1092 if we are not changing the meaning of the address. */
1093 if (GET_CODE (op) == MEM
1094 && !VECTOR_MODE_P (mode)
1095 && !MEM_VOLATILE_P (op)
1096 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1098 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1099 if (temp)
1100 return temp;
1103 break;
1105 case FLOAT_TRUNCATE:
1106 if (DECIMAL_FLOAT_MODE_P (mode))
1107 break;
1109 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1110 if (GET_CODE (op) == FLOAT_EXTEND
1111 && GET_MODE (XEXP (op, 0)) == mode)
1112 return XEXP (op, 0);
1114 /* (float_truncate:SF (float_truncate:DF foo:XF))
1115 = (float_truncate:SF foo:XF).
1116 This may eliminate double rounding, so it is unsafe.
1118 (float_truncate:SF (float_extend:XF foo:DF))
1119 = (float_truncate:SF foo:DF).
1121 (float_truncate:DF (float_extend:XF foo:SF))
1122 = (float_extend:SF foo:DF). */
1123 if ((GET_CODE (op) == FLOAT_TRUNCATE
1124 && flag_unsafe_math_optimizations)
1125 || GET_CODE (op) == FLOAT_EXTEND)
1126 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1127 0)))
1128 > GET_MODE_SIZE (mode)
1129 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1130 mode,
1131 XEXP (op, 0), mode);
1133 /* (float_truncate (float x)) is (float x) */
1134 if (GET_CODE (op) == FLOAT
1135 && (flag_unsafe_math_optimizations
1136 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
1137 && ((unsigned)significand_size (GET_MODE (op))
1138 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1139 - num_sign_bit_copies (XEXP (op, 0),
1140 GET_MODE (XEXP (op, 0))))))))
1141 return simplify_gen_unary (FLOAT, mode,
1142 XEXP (op, 0),
1143 GET_MODE (XEXP (op, 0)));
1145 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1146 (OP:SF foo:SF) if OP is NEG or ABS. */
1147 if ((GET_CODE (op) == ABS
1148 || GET_CODE (op) == NEG)
1149 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1150 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1151 return simplify_gen_unary (GET_CODE (op), mode,
1152 XEXP (XEXP (op, 0), 0), mode);
1154 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1155 is (float_truncate:SF x). */
1156 if (GET_CODE (op) == SUBREG
1157 && subreg_lowpart_p (op)
1158 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1159 return SUBREG_REG (op);
1160 break;
1162 case FLOAT_EXTEND:
1163 if (DECIMAL_FLOAT_MODE_P (mode))
1164 break;
1166 /* (float_extend (float_extend x)) is (float_extend x)
1168 (float_extend (float x)) is (float x) assuming that double
1169 rounding can't happen.
1171 if (GET_CODE (op) == FLOAT_EXTEND
1172 || (GET_CODE (op) == FLOAT
1173 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1174 && ((unsigned)significand_size (GET_MODE (op))
1175 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1176 - num_sign_bit_copies (XEXP (op, 0),
1177 GET_MODE (XEXP (op, 0)))))))
1178 return simplify_gen_unary (GET_CODE (op), mode,
1179 XEXP (op, 0),
1180 GET_MODE (XEXP (op, 0)));
1182 break;
1184 case ABS:
1185 /* (abs (neg <foo>)) -> (abs <foo>) */
1186 if (GET_CODE (op) == NEG)
1187 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1188 GET_MODE (XEXP (op, 0)));
1190 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1191 do nothing. */
1192 if (GET_MODE (op) == VOIDmode)
1193 break;
1195 /* If operand is something known to be positive, ignore the ABS. */
1196 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1197 || val_signbit_known_clear_p (GET_MODE (op),
1198 nonzero_bits (op, GET_MODE (op))))
1199 return op;
1201 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1202 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1203 return gen_rtx_NEG (mode, op);
1205 break;
1207 case FFS:
1208 /* (ffs (*_extend <X>)) = (ffs <X>) */
1209 if (GET_CODE (op) == SIGN_EXTEND
1210 || GET_CODE (op) == ZERO_EXTEND)
1211 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1212 GET_MODE (XEXP (op, 0)));
1213 break;
1215 case POPCOUNT:
1216 switch (GET_CODE (op))
1218 case BSWAP:
1219 case ZERO_EXTEND:
1220 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1221 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1222 GET_MODE (XEXP (op, 0)));
1224 case ROTATE:
1225 case ROTATERT:
1226 /* Rotations don't affect popcount. */
1227 if (!side_effects_p (XEXP (op, 1)))
1228 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1229 GET_MODE (XEXP (op, 0)));
1230 break;
1232 default:
1233 break;
1235 break;
1237 case PARITY:
1238 switch (GET_CODE (op))
1240 case NOT:
1241 case BSWAP:
1242 case ZERO_EXTEND:
1243 case SIGN_EXTEND:
1244 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1245 GET_MODE (XEXP (op, 0)));
1247 case ROTATE:
1248 case ROTATERT:
1249 /* Rotations don't affect parity. */
1250 if (!side_effects_p (XEXP (op, 1)))
1251 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1252 GET_MODE (XEXP (op, 0)));
1253 break;
1255 default:
1256 break;
1258 break;
1260 case BSWAP:
1261 /* (bswap (bswap x)) -> x. */
1262 if (GET_CODE (op) == BSWAP)
1263 return XEXP (op, 0);
1264 break;
1266 case FLOAT:
1267 /* (float (sign_extend <X>)) = (float <X>). */
1268 if (GET_CODE (op) == SIGN_EXTEND)
1269 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1270 GET_MODE (XEXP (op, 0)));
1271 break;
1273 case SIGN_EXTEND:
1274 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1275 becomes just the MINUS if its mode is MODE. This allows
1276 folding switch statements on machines using casesi (such as
1277 the VAX). */
1278 if (GET_CODE (op) == TRUNCATE
1279 && GET_MODE (XEXP (op, 0)) == mode
1280 && GET_CODE (XEXP (op, 0)) == MINUS
1281 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1282 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1283 return XEXP (op, 0);
1285 /* Extending a widening multiplication should be canonicalized to
1286 a wider widening multiplication. */
1287 if (GET_CODE (op) == MULT)
1289 rtx lhs = XEXP (op, 0);
1290 rtx rhs = XEXP (op, 1);
1291 enum rtx_code lcode = GET_CODE (lhs);
1292 enum rtx_code rcode = GET_CODE (rhs);
1294 /* Widening multiplies usually extend both operands, but sometimes
1295 they use a shift to extract a portion of a register. */
1296 if ((lcode == SIGN_EXTEND
1297 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1298 && (rcode == SIGN_EXTEND
1299 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1301 enum machine_mode lmode = GET_MODE (lhs);
1302 enum machine_mode rmode = GET_MODE (rhs);
1303 int bits;
1305 if (lcode == ASHIFTRT)
1306 /* Number of bits not shifted off the end. */
1307 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1308 else /* lcode == SIGN_EXTEND */
1309 /* Size of inner mode. */
1310 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1312 if (rcode == ASHIFTRT)
1313 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1314 else /* rcode == SIGN_EXTEND */
1315 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1317 /* We can only widen multiplies if the result is mathematiclly
1318 equivalent. I.e. if overflow was impossible. */
1319 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1320 return simplify_gen_binary
1321 (MULT, mode,
1322 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1323 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1327 /* Check for a sign extension of a subreg of a promoted
1328 variable, where the promotion is sign-extended, and the
1329 target mode is the same as the variable's promotion. */
1330 if (GET_CODE (op) == SUBREG
1331 && SUBREG_PROMOTED_VAR_P (op)
1332 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1333 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1335 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1336 if (temp)
1337 return temp;
1340 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1341 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1342 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1344 gcc_assert (GET_MODE_BITSIZE (mode)
1345 > GET_MODE_BITSIZE (GET_MODE (op)));
1346 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1347 GET_MODE (XEXP (op, 0)));
1350 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1351 is (sign_extend:M (subreg:O <X>)) if there is mode with
1352 GET_MODE_BITSIZE (N) - I bits.
1353 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1354 is similarly (zero_extend:M (subreg:O <X>)). */
1355 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1356 && GET_CODE (XEXP (op, 0)) == ASHIFT
1357 && CONST_INT_P (XEXP (op, 1))
1358 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1359 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1361 enum machine_mode tmode
1362 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1363 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1364 gcc_assert (GET_MODE_BITSIZE (mode)
1365 > GET_MODE_BITSIZE (GET_MODE (op)));
1366 if (tmode != BLKmode)
1368 rtx inner =
1369 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1370 if (inner)
1371 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1372 ? SIGN_EXTEND : ZERO_EXTEND,
1373 mode, inner, tmode);
1377 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1378 /* As we do not know which address space the pointer is referring to,
1379 we can do this only if the target does not support different pointer
1380 or address modes depending on the address space. */
1381 if (target_default_pointer_address_modes_p ()
1382 && ! POINTERS_EXTEND_UNSIGNED
1383 && mode == Pmode && GET_MODE (op) == ptr_mode
1384 && (CONSTANT_P (op)
1385 || (GET_CODE (op) == SUBREG
1386 && REG_P (SUBREG_REG (op))
1387 && REG_POINTER (SUBREG_REG (op))
1388 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1389 return convert_memory_address (Pmode, op);
1390 #endif
1391 break;
1393 case ZERO_EXTEND:
1394 /* Check for a zero extension of a subreg of a promoted
1395 variable, where the promotion is zero-extended, and the
1396 target mode is the same as the variable's promotion. */
1397 if (GET_CODE (op) == SUBREG
1398 && SUBREG_PROMOTED_VAR_P (op)
1399 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1400 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1402 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1403 if (temp)
1404 return temp;
1407 /* Extending a widening multiplication should be canonicalized to
1408 a wider widening multiplication. */
1409 if (GET_CODE (op) == MULT)
1411 rtx lhs = XEXP (op, 0);
1412 rtx rhs = XEXP (op, 1);
1413 enum rtx_code lcode = GET_CODE (lhs);
1414 enum rtx_code rcode = GET_CODE (rhs);
1416 /* Widening multiplies usually extend both operands, but sometimes
1417 they use a shift to extract a portion of a register. */
1418 if ((lcode == ZERO_EXTEND
1419 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1420 && (rcode == ZERO_EXTEND
1421 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1423 enum machine_mode lmode = GET_MODE (lhs);
1424 enum machine_mode rmode = GET_MODE (rhs);
1425 int bits;
1427 if (lcode == LSHIFTRT)
1428 /* Number of bits not shifted off the end. */
1429 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1430 else /* lcode == ZERO_EXTEND */
1431 /* Size of inner mode. */
1432 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1434 if (rcode == LSHIFTRT)
1435 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1436 else /* rcode == ZERO_EXTEND */
1437 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1439 /* We can only widen multiplies if the result is mathematiclly
1440 equivalent. I.e. if overflow was impossible. */
1441 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1442 return simplify_gen_binary
1443 (MULT, mode,
1444 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1445 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1449 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1450 if (GET_CODE (op) == ZERO_EXTEND)
1451 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1452 GET_MODE (XEXP (op, 0)));
1454 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1455 is (zero_extend:M (subreg:O <X>)) if there is mode with
1456 GET_MODE_BITSIZE (N) - I bits. */
1457 if (GET_CODE (op) == LSHIFTRT
1458 && GET_CODE (XEXP (op, 0)) == ASHIFT
1459 && CONST_INT_P (XEXP (op, 1))
1460 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1461 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1463 enum machine_mode tmode
1464 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1465 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1466 if (tmode != BLKmode)
1468 rtx inner =
1469 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1470 if (inner)
1471 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1475 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1476 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1477 of mode N. E.g.
1478 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1479 (and:SI (reg:SI) (const_int 63)). */
1480 if (GET_CODE (op) == SUBREG
1481 && GET_MODE_PRECISION (GET_MODE (op))
1482 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1483 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1484 <= HOST_BITS_PER_WIDE_INT
1485 && GET_MODE_PRECISION (mode)
1486 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1487 && subreg_lowpart_p (op)
1488 && (nonzero_bits (SUBREG_REG (op), GET_MODE (SUBREG_REG (op)))
1489 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1491 if (GET_MODE_PRECISION (mode)
1492 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
1493 return SUBREG_REG (op);
1494 return simplify_gen_unary (ZERO_EXTEND, mode, SUBREG_REG (op),
1495 GET_MODE (SUBREG_REG (op)));
1498 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1499 /* As we do not know which address space the pointer is referring to,
1500 we can do this only if the target does not support different pointer
1501 or address modes depending on the address space. */
1502 if (target_default_pointer_address_modes_p ()
1503 && POINTERS_EXTEND_UNSIGNED > 0
1504 && mode == Pmode && GET_MODE (op) == ptr_mode
1505 && (CONSTANT_P (op)
1506 || (GET_CODE (op) == SUBREG
1507 && REG_P (SUBREG_REG (op))
1508 && REG_POINTER (SUBREG_REG (op))
1509 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1510 return convert_memory_address (Pmode, op);
1511 #endif
1512 break;
1514 default:
1515 break;
1518 return 0;
1521 /* Try to compute the value of a unary operation CODE whose output mode is to
1522 be MODE with input operand OP whose mode was originally OP_MODE.
1523 Return zero if the value cannot be computed. */
1525 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1526 rtx op, enum machine_mode op_mode)
1528 unsigned int width = GET_MODE_PRECISION (mode);
1529 unsigned int op_width = GET_MODE_PRECISION (op_mode);
1531 if (code == VEC_DUPLICATE)
1533 gcc_assert (VECTOR_MODE_P (mode));
1534 if (GET_MODE (op) != VOIDmode)
1536 if (!VECTOR_MODE_P (GET_MODE (op)))
1537 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1538 else
1539 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1540 (GET_MODE (op)));
1542 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1543 || GET_CODE (op) == CONST_VECTOR)
1545 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1546 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1547 rtvec v = rtvec_alloc (n_elts);
1548 unsigned int i;
1550 if (GET_CODE (op) != CONST_VECTOR)
1551 for (i = 0; i < n_elts; i++)
1552 RTVEC_ELT (v, i) = op;
1553 else
1555 enum machine_mode inmode = GET_MODE (op);
1556 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1557 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1559 gcc_assert (in_n_elts < n_elts);
1560 gcc_assert ((n_elts % in_n_elts) == 0);
1561 for (i = 0; i < n_elts; i++)
1562 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1564 return gen_rtx_CONST_VECTOR (mode, v);
1568 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1570 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1571 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1572 enum machine_mode opmode = GET_MODE (op);
1573 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1574 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1575 rtvec v = rtvec_alloc (n_elts);
1576 unsigned int i;
1578 gcc_assert (op_n_elts == n_elts);
1579 for (i = 0; i < n_elts; i++)
1581 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1582 CONST_VECTOR_ELT (op, i),
1583 GET_MODE_INNER (opmode));
1584 if (!x)
1585 return 0;
1586 RTVEC_ELT (v, i) = x;
1588 return gen_rtx_CONST_VECTOR (mode, v);
1591 /* The order of these tests is critical so that, for example, we don't
1592 check the wrong mode (input vs. output) for a conversion operation,
1593 such as FIX. At some point, this should be simplified. */
1595 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1597 HOST_WIDE_INT hv, lv;
1598 REAL_VALUE_TYPE d;
1600 if (CONST_INT_P (op))
1601 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1602 else
1603 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1605 REAL_VALUE_FROM_INT (d, lv, hv, mode);
1606 d = real_value_truncate (mode, d);
1607 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1609 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1611 HOST_WIDE_INT hv, lv;
1612 REAL_VALUE_TYPE d;
1614 if (CONST_INT_P (op))
1615 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1616 else
1617 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1619 if (op_mode == VOIDmode
1620 || GET_MODE_PRECISION (op_mode) > HOST_BITS_PER_DOUBLE_INT)
1621 /* We should never get a negative number. */
1622 gcc_assert (hv >= 0);
1623 else if (GET_MODE_PRECISION (op_mode) <= HOST_BITS_PER_WIDE_INT)
1624 hv = 0, lv &= GET_MODE_MASK (op_mode);
1626 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1627 d = real_value_truncate (mode, d);
1628 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1631 if (CONST_INT_P (op)
1632 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1634 HOST_WIDE_INT arg0 = INTVAL (op);
1635 HOST_WIDE_INT val;
1637 switch (code)
1639 case NOT:
1640 val = ~ arg0;
1641 break;
1643 case NEG:
1644 val = - arg0;
1645 break;
1647 case ABS:
1648 val = (arg0 >= 0 ? arg0 : - arg0);
1649 break;
1651 case FFS:
1652 arg0 &= GET_MODE_MASK (mode);
1653 val = ffs_hwi (arg0);
1654 break;
1656 case CLZ:
1657 arg0 &= GET_MODE_MASK (mode);
1658 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1660 else
1661 val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 1;
1662 break;
1664 case CLRSB:
1665 arg0 &= GET_MODE_MASK (mode);
1666 if (arg0 == 0)
1667 val = GET_MODE_PRECISION (mode) - 1;
1668 else if (arg0 >= 0)
1669 val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 2;
1670 else if (arg0 < 0)
1671 val = GET_MODE_PRECISION (mode) - floor_log2 (~arg0) - 2;
1672 break;
1674 case CTZ:
1675 arg0 &= GET_MODE_MASK (mode);
1676 if (arg0 == 0)
1678 /* Even if the value at zero is undefined, we have to come
1679 up with some replacement. Seems good enough. */
1680 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1681 val = GET_MODE_PRECISION (mode);
1683 else
1684 val = ctz_hwi (arg0);
1685 break;
1687 case POPCOUNT:
1688 arg0 &= GET_MODE_MASK (mode);
1689 val = 0;
1690 while (arg0)
1691 val++, arg0 &= arg0 - 1;
1692 break;
1694 case PARITY:
1695 arg0 &= GET_MODE_MASK (mode);
1696 val = 0;
1697 while (arg0)
1698 val++, arg0 &= arg0 - 1;
1699 val &= 1;
1700 break;
1702 case BSWAP:
1704 unsigned int s;
1706 val = 0;
1707 for (s = 0; s < width; s += 8)
1709 unsigned int d = width - s - 8;
1710 unsigned HOST_WIDE_INT byte;
1711 byte = (arg0 >> s) & 0xff;
1712 val |= byte << d;
1715 break;
1717 case TRUNCATE:
1718 val = arg0;
1719 break;
1721 case ZERO_EXTEND:
1722 /* When zero-extending a CONST_INT, we need to know its
1723 original mode. */
1724 gcc_assert (op_mode != VOIDmode);
1725 if (op_width == HOST_BITS_PER_WIDE_INT)
1727 /* If we were really extending the mode,
1728 we would have to distinguish between zero-extension
1729 and sign-extension. */
1730 gcc_assert (width == op_width);
1731 val = arg0;
1733 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1734 val = arg0 & GET_MODE_MASK (op_mode);
1735 else
1736 return 0;
1737 break;
1739 case SIGN_EXTEND:
1740 if (op_mode == VOIDmode)
1741 op_mode = mode;
1742 op_width = GET_MODE_PRECISION (op_mode);
1743 if (op_width == HOST_BITS_PER_WIDE_INT)
1745 /* If we were really extending the mode,
1746 we would have to distinguish between zero-extension
1747 and sign-extension. */
1748 gcc_assert (width == op_width);
1749 val = arg0;
1751 else if (op_width < HOST_BITS_PER_WIDE_INT)
1753 val = arg0 & GET_MODE_MASK (op_mode);
1754 if (val_signbit_known_set_p (op_mode, val))
1755 val |= ~GET_MODE_MASK (op_mode);
1757 else
1758 return 0;
1759 break;
1761 case SQRT:
1762 case FLOAT_EXTEND:
1763 case FLOAT_TRUNCATE:
1764 case SS_TRUNCATE:
1765 case US_TRUNCATE:
1766 case SS_NEG:
1767 case US_NEG:
1768 case SS_ABS:
1769 return 0;
1771 default:
1772 gcc_unreachable ();
1775 return gen_int_mode (val, mode);
1778 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1779 for a DImode operation on a CONST_INT. */
1780 else if (width <= HOST_BITS_PER_DOUBLE_INT
1781 && (CONST_DOUBLE_AS_INT_P (op) || CONST_INT_P (op)))
1783 double_int first, value;
1785 if (CONST_DOUBLE_AS_INT_P (op))
1786 first = double_int::from_pair (CONST_DOUBLE_HIGH (op),
1787 CONST_DOUBLE_LOW (op));
1788 else
1789 first = double_int::from_shwi (INTVAL (op));
1791 switch (code)
1793 case NOT:
1794 value = ~first;
1795 break;
1797 case NEG:
1798 value = -first;
1799 break;
1801 case ABS:
1802 if (first.is_negative ())
1803 value = -first;
1804 else
1805 value = first;
1806 break;
1808 case FFS:
1809 value.high = 0;
1810 if (first.low != 0)
1811 value.low = ffs_hwi (first.low);
1812 else if (first.high != 0)
1813 value.low = HOST_BITS_PER_WIDE_INT + ffs_hwi (first.high);
1814 else
1815 value.low = 0;
1816 break;
1818 case CLZ:
1819 value.high = 0;
1820 if (first.high != 0)
1821 value.low = GET_MODE_PRECISION (mode) - floor_log2 (first.high) - 1
1822 - HOST_BITS_PER_WIDE_INT;
1823 else if (first.low != 0)
1824 value.low = GET_MODE_PRECISION (mode) - floor_log2 (first.low) - 1;
1825 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, value.low))
1826 value.low = GET_MODE_PRECISION (mode);
1827 break;
1829 case CTZ:
1830 value.high = 0;
1831 if (first.low != 0)
1832 value.low = ctz_hwi (first.low);
1833 else if (first.high != 0)
1834 value.low = HOST_BITS_PER_WIDE_INT + ctz_hwi (first.high);
1835 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, value.low))
1836 value.low = GET_MODE_PRECISION (mode);
1837 break;
1839 case POPCOUNT:
1840 value = double_int_zero;
1841 while (first.low)
1843 value.low++;
1844 first.low &= first.low - 1;
1846 while (first.high)
1848 value.low++;
1849 first.high &= first.high - 1;
1851 break;
1853 case PARITY:
1854 value = double_int_zero;
1855 while (first.low)
1857 value.low++;
1858 first.low &= first.low - 1;
1860 while (first.high)
1862 value.low++;
1863 first.high &= first.high - 1;
1865 value.low &= 1;
1866 break;
1868 case BSWAP:
1870 unsigned int s;
1872 value = double_int_zero;
1873 for (s = 0; s < width; s += 8)
1875 unsigned int d = width - s - 8;
1876 unsigned HOST_WIDE_INT byte;
1878 if (s < HOST_BITS_PER_WIDE_INT)
1879 byte = (first.low >> s) & 0xff;
1880 else
1881 byte = (first.high >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1883 if (d < HOST_BITS_PER_WIDE_INT)
1884 value.low |= byte << d;
1885 else
1886 value.high |= byte << (d - HOST_BITS_PER_WIDE_INT);
1889 break;
1891 case TRUNCATE:
1892 /* This is just a change-of-mode, so do nothing. */
1893 value = first;
1894 break;
1896 case ZERO_EXTEND:
1897 gcc_assert (op_mode != VOIDmode);
1899 if (op_width > HOST_BITS_PER_WIDE_INT)
1900 return 0;
1902 value = double_int::from_uhwi (first.low & GET_MODE_MASK (op_mode));
1903 break;
1905 case SIGN_EXTEND:
1906 if (op_mode == VOIDmode
1907 || op_width > HOST_BITS_PER_WIDE_INT)
1908 return 0;
1909 else
1911 value.low = first.low & GET_MODE_MASK (op_mode);
1912 if (val_signbit_known_set_p (op_mode, value.low))
1913 value.low |= ~GET_MODE_MASK (op_mode);
1915 value.high = HWI_SIGN_EXTEND (value.low);
1917 break;
1919 case SQRT:
1920 return 0;
1922 default:
1923 return 0;
1926 return immed_double_int_const (value, mode);
1929 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1930 && SCALAR_FLOAT_MODE_P (mode)
1931 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1933 REAL_VALUE_TYPE d, t;
1934 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1936 switch (code)
1938 case SQRT:
1939 if (HONOR_SNANS (mode) && real_isnan (&d))
1940 return 0;
1941 real_sqrt (&t, mode, &d);
1942 d = t;
1943 break;
1944 case ABS:
1945 d = real_value_abs (&d);
1946 break;
1947 case NEG:
1948 d = real_value_negate (&d);
1949 break;
1950 case FLOAT_TRUNCATE:
1951 d = real_value_truncate (mode, d);
1952 break;
1953 case FLOAT_EXTEND:
1954 /* All this does is change the mode, unless changing
1955 mode class. */
1956 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1957 real_convert (&d, mode, &d);
1958 break;
1959 case FIX:
1960 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1961 break;
1962 case NOT:
1964 long tmp[4];
1965 int i;
1967 real_to_target (tmp, &d, GET_MODE (op));
1968 for (i = 0; i < 4; i++)
1969 tmp[i] = ~tmp[i];
1970 real_from_target (&d, tmp, mode);
1971 break;
1973 default:
1974 gcc_unreachable ();
1976 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1979 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1980 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1981 && GET_MODE_CLASS (mode) == MODE_INT
1982 && width <= HOST_BITS_PER_DOUBLE_INT && width > 0)
1984 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1985 operators are intentionally left unspecified (to ease implementation
1986 by target backends), for consistency, this routine implements the
1987 same semantics for constant folding as used by the middle-end. */
1989 /* This was formerly used only for non-IEEE float.
1990 eggert@twinsun.com says it is safe for IEEE also. */
1991 HOST_WIDE_INT xh, xl, th, tl;
1992 REAL_VALUE_TYPE x, t;
1993 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1994 switch (code)
1996 case FIX:
1997 if (REAL_VALUE_ISNAN (x))
1998 return const0_rtx;
2000 /* Test against the signed upper bound. */
2001 if (width > HOST_BITS_PER_WIDE_INT)
2003 th = ((unsigned HOST_WIDE_INT) 1
2004 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
2005 tl = -1;
2007 else
2009 th = 0;
2010 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
2012 real_from_integer (&t, VOIDmode, tl, th, 0);
2013 if (REAL_VALUES_LESS (t, x))
2015 xh = th;
2016 xl = tl;
2017 break;
2020 /* Test against the signed lower bound. */
2021 if (width > HOST_BITS_PER_WIDE_INT)
2023 th = HOST_WIDE_INT_M1U << (width - HOST_BITS_PER_WIDE_INT - 1);
2024 tl = 0;
2026 else
2028 th = -1;
2029 tl = HOST_WIDE_INT_M1U << (width - 1);
2031 real_from_integer (&t, VOIDmode, tl, th, 0);
2032 if (REAL_VALUES_LESS (x, t))
2034 xh = th;
2035 xl = tl;
2036 break;
2038 REAL_VALUE_TO_INT (&xl, &xh, x);
2039 break;
2041 case UNSIGNED_FIX:
2042 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
2043 return const0_rtx;
2045 /* Test against the unsigned upper bound. */
2046 if (width == HOST_BITS_PER_DOUBLE_INT)
2048 th = -1;
2049 tl = -1;
2051 else if (width >= HOST_BITS_PER_WIDE_INT)
2053 th = ((unsigned HOST_WIDE_INT) 1
2054 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
2055 tl = -1;
2057 else
2059 th = 0;
2060 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
2062 real_from_integer (&t, VOIDmode, tl, th, 1);
2063 if (REAL_VALUES_LESS (t, x))
2065 xh = th;
2066 xl = tl;
2067 break;
2070 REAL_VALUE_TO_INT (&xl, &xh, x);
2071 break;
2073 default:
2074 gcc_unreachable ();
2076 return immed_double_const (xl, xh, mode);
2079 return NULL_RTX;
2082 /* Subroutine of simplify_binary_operation to simplify a binary operation
2083 CODE that can commute with byte swapping, with result mode MODE and
2084 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2085 Return zero if no simplification or canonicalization is possible. */
2087 static rtx
2088 simplify_byte_swapping_operation (enum rtx_code code, enum machine_mode mode,
2089 rtx op0, rtx op1)
2091 rtx tem;
2093 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2094 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
2096 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
2097 simplify_gen_unary (BSWAP, mode, op1, mode));
2098 return simplify_gen_unary (BSWAP, mode, tem, mode);
2101 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2102 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
2104 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
2105 return simplify_gen_unary (BSWAP, mode, tem, mode);
2108 return NULL_RTX;
2111 /* Subroutine of simplify_binary_operation to simplify a commutative,
2112 associative binary operation CODE with result mode MODE, operating
2113 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2114 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2115 canonicalization is possible. */
2117 static rtx
2118 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
2119 rtx op0, rtx op1)
2121 rtx tem;
2123 /* Linearize the operator to the left. */
2124 if (GET_CODE (op1) == code)
2126 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2127 if (GET_CODE (op0) == code)
2129 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2130 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2133 /* "a op (b op c)" becomes "(b op c) op a". */
2134 if (! swap_commutative_operands_p (op1, op0))
2135 return simplify_gen_binary (code, mode, op1, op0);
2137 tem = op0;
2138 op0 = op1;
2139 op1 = tem;
2142 if (GET_CODE (op0) == code)
2144 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2145 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2147 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2148 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2151 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2152 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2153 if (tem != 0)
2154 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2156 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2157 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2158 if (tem != 0)
2159 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2162 return 0;
2166 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2167 and OP1. Return 0 if no simplification is possible.
2169 Don't use this for relational operations such as EQ or LT.
2170 Use simplify_relational_operation instead. */
2172 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
2173 rtx op0, rtx op1)
2175 rtx trueop0, trueop1;
2176 rtx tem;
2178 /* Relational operations don't work here. We must know the mode
2179 of the operands in order to do the comparison correctly.
2180 Assuming a full word can give incorrect results.
2181 Consider comparing 128 with -128 in QImode. */
2182 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2183 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2185 /* Make sure the constant is second. */
2186 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2187 && swap_commutative_operands_p (op0, op1))
2189 tem = op0, op0 = op1, op1 = tem;
2192 trueop0 = avoid_constant_pool_reference (op0);
2193 trueop1 = avoid_constant_pool_reference (op1);
2195 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2196 if (tem)
2197 return tem;
2198 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2201 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2202 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2203 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2204 actual constants. */
2206 static rtx
2207 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
2208 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2210 rtx tem, reversed, opleft, opright;
2211 HOST_WIDE_INT val;
2212 unsigned int width = GET_MODE_PRECISION (mode);
2214 /* Even if we can't compute a constant result,
2215 there are some cases worth simplifying. */
2217 switch (code)
2219 case PLUS:
2220 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2221 when x is NaN, infinite, or finite and nonzero. They aren't
2222 when x is -0 and the rounding mode is not towards -infinity,
2223 since (-0) + 0 is then 0. */
2224 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2225 return op0;
2227 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2228 transformations are safe even for IEEE. */
2229 if (GET_CODE (op0) == NEG)
2230 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2231 else if (GET_CODE (op1) == NEG)
2232 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2234 /* (~a) + 1 -> -a */
2235 if (INTEGRAL_MODE_P (mode)
2236 && GET_CODE (op0) == NOT
2237 && trueop1 == const1_rtx)
2238 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2240 /* Handle both-operands-constant cases. We can only add
2241 CONST_INTs to constants since the sum of relocatable symbols
2242 can't be handled by most assemblers. Don't add CONST_INT
2243 to CONST_INT since overflow won't be computed properly if wider
2244 than HOST_BITS_PER_WIDE_INT. */
2246 if ((GET_CODE (op0) == CONST
2247 || GET_CODE (op0) == SYMBOL_REF
2248 || GET_CODE (op0) == LABEL_REF)
2249 && CONST_INT_P (op1))
2250 return plus_constant (mode, op0, INTVAL (op1));
2251 else if ((GET_CODE (op1) == CONST
2252 || GET_CODE (op1) == SYMBOL_REF
2253 || GET_CODE (op1) == LABEL_REF)
2254 && CONST_INT_P (op0))
2255 return plus_constant (mode, op1, INTVAL (op0));
2257 /* See if this is something like X * C - X or vice versa or
2258 if the multiplication is written as a shift. If so, we can
2259 distribute and make a new multiply, shift, or maybe just
2260 have X (if C is 2 in the example above). But don't make
2261 something more expensive than we had before. */
2263 if (SCALAR_INT_MODE_P (mode))
2265 double_int coeff0, coeff1;
2266 rtx lhs = op0, rhs = op1;
2268 coeff0 = double_int_one;
2269 coeff1 = double_int_one;
2271 if (GET_CODE (lhs) == NEG)
2273 coeff0 = double_int_minus_one;
2274 lhs = XEXP (lhs, 0);
2276 else if (GET_CODE (lhs) == MULT
2277 && CONST_INT_P (XEXP (lhs, 1)))
2279 coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1)));
2280 lhs = XEXP (lhs, 0);
2282 else if (GET_CODE (lhs) == ASHIFT
2283 && CONST_INT_P (XEXP (lhs, 1))
2284 && INTVAL (XEXP (lhs, 1)) >= 0
2285 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2287 coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1)));
2288 lhs = XEXP (lhs, 0);
2291 if (GET_CODE (rhs) == NEG)
2293 coeff1 = double_int_minus_one;
2294 rhs = XEXP (rhs, 0);
2296 else if (GET_CODE (rhs) == MULT
2297 && CONST_INT_P (XEXP (rhs, 1)))
2299 coeff1 = double_int::from_shwi (INTVAL (XEXP (rhs, 1)));
2300 rhs = XEXP (rhs, 0);
2302 else if (GET_CODE (rhs) == ASHIFT
2303 && CONST_INT_P (XEXP (rhs, 1))
2304 && INTVAL (XEXP (rhs, 1)) >= 0
2305 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2307 coeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1)));
2308 rhs = XEXP (rhs, 0);
2311 if (rtx_equal_p (lhs, rhs))
2313 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2314 rtx coeff;
2315 double_int val;
2316 bool speed = optimize_function_for_speed_p (cfun);
2318 val = coeff0 + coeff1;
2319 coeff = immed_double_int_const (val, mode);
2321 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2322 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2323 ? tem : 0;
2327 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2328 if (CONST_SCALAR_INT_P (op1)
2329 && GET_CODE (op0) == XOR
2330 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2331 && mode_signbit_p (mode, op1))
2332 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2333 simplify_gen_binary (XOR, mode, op1,
2334 XEXP (op0, 1)));
2336 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2337 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2338 && GET_CODE (op0) == MULT
2339 && GET_CODE (XEXP (op0, 0)) == NEG)
2341 rtx in1, in2;
2343 in1 = XEXP (XEXP (op0, 0), 0);
2344 in2 = XEXP (op0, 1);
2345 return simplify_gen_binary (MINUS, mode, op1,
2346 simplify_gen_binary (MULT, mode,
2347 in1, in2));
2350 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2351 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2352 is 1. */
2353 if (COMPARISON_P (op0)
2354 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2355 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2356 && (reversed = reversed_comparison (op0, mode)))
2357 return
2358 simplify_gen_unary (NEG, mode, reversed, mode);
2360 /* If one of the operands is a PLUS or a MINUS, see if we can
2361 simplify this by the associative law.
2362 Don't use the associative law for floating point.
2363 The inaccuracy makes it nonassociative,
2364 and subtle programs can break if operations are associated. */
2366 if (INTEGRAL_MODE_P (mode)
2367 && (plus_minus_operand_p (op0)
2368 || plus_minus_operand_p (op1))
2369 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2370 return tem;
2372 /* Reassociate floating point addition only when the user
2373 specifies associative math operations. */
2374 if (FLOAT_MODE_P (mode)
2375 && flag_associative_math)
2377 tem = simplify_associative_operation (code, mode, op0, op1);
2378 if (tem)
2379 return tem;
2381 break;
2383 case COMPARE:
2384 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2385 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2386 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2387 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2389 rtx xop00 = XEXP (op0, 0);
2390 rtx xop10 = XEXP (op1, 0);
2392 #ifdef HAVE_cc0
2393 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2394 #else
2395 if (REG_P (xop00) && REG_P (xop10)
2396 && GET_MODE (xop00) == GET_MODE (xop10)
2397 && REGNO (xop00) == REGNO (xop10)
2398 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2399 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2400 #endif
2401 return xop00;
2403 break;
2405 case MINUS:
2406 /* We can't assume x-x is 0 even with non-IEEE floating point,
2407 but since it is zero except in very strange circumstances, we
2408 will treat it as zero with -ffinite-math-only. */
2409 if (rtx_equal_p (trueop0, trueop1)
2410 && ! side_effects_p (op0)
2411 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2412 return CONST0_RTX (mode);
2414 /* Change subtraction from zero into negation. (0 - x) is the
2415 same as -x when x is NaN, infinite, or finite and nonzero.
2416 But if the mode has signed zeros, and does not round towards
2417 -infinity, then 0 - 0 is 0, not -0. */
2418 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2419 return simplify_gen_unary (NEG, mode, op1, mode);
2421 /* (-1 - a) is ~a. */
2422 if (trueop0 == constm1_rtx)
2423 return simplify_gen_unary (NOT, mode, op1, mode);
2425 /* Subtracting 0 has no effect unless the mode has signed zeros
2426 and supports rounding towards -infinity. In such a case,
2427 0 - 0 is -0. */
2428 if (!(HONOR_SIGNED_ZEROS (mode)
2429 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2430 && trueop1 == CONST0_RTX (mode))
2431 return op0;
2433 /* See if this is something like X * C - X or vice versa or
2434 if the multiplication is written as a shift. If so, we can
2435 distribute and make a new multiply, shift, or maybe just
2436 have X (if C is 2 in the example above). But don't make
2437 something more expensive than we had before. */
2439 if (SCALAR_INT_MODE_P (mode))
2441 double_int coeff0, negcoeff1;
2442 rtx lhs = op0, rhs = op1;
2444 coeff0 = double_int_one;
2445 negcoeff1 = double_int_minus_one;
2447 if (GET_CODE (lhs) == NEG)
2449 coeff0 = double_int_minus_one;
2450 lhs = XEXP (lhs, 0);
2452 else if (GET_CODE (lhs) == MULT
2453 && CONST_INT_P (XEXP (lhs, 1)))
2455 coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1)));
2456 lhs = XEXP (lhs, 0);
2458 else if (GET_CODE (lhs) == ASHIFT
2459 && CONST_INT_P (XEXP (lhs, 1))
2460 && INTVAL (XEXP (lhs, 1)) >= 0
2461 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2463 coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1)));
2464 lhs = XEXP (lhs, 0);
2467 if (GET_CODE (rhs) == NEG)
2469 negcoeff1 = double_int_one;
2470 rhs = XEXP (rhs, 0);
2472 else if (GET_CODE (rhs) == MULT
2473 && CONST_INT_P (XEXP (rhs, 1)))
2475 negcoeff1 = double_int::from_shwi (-INTVAL (XEXP (rhs, 1)));
2476 rhs = XEXP (rhs, 0);
2478 else if (GET_CODE (rhs) == ASHIFT
2479 && CONST_INT_P (XEXP (rhs, 1))
2480 && INTVAL (XEXP (rhs, 1)) >= 0
2481 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2483 negcoeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1)));
2484 negcoeff1 = -negcoeff1;
2485 rhs = XEXP (rhs, 0);
2488 if (rtx_equal_p (lhs, rhs))
2490 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2491 rtx coeff;
2492 double_int val;
2493 bool speed = optimize_function_for_speed_p (cfun);
2495 val = coeff0 + negcoeff1;
2496 coeff = immed_double_int_const (val, mode);
2498 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2499 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2500 ? tem : 0;
2504 /* (a - (-b)) -> (a + b). True even for IEEE. */
2505 if (GET_CODE (op1) == NEG)
2506 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2508 /* (-x - c) may be simplified as (-c - x). */
2509 if (GET_CODE (op0) == NEG
2510 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2512 tem = simplify_unary_operation (NEG, mode, op1, mode);
2513 if (tem)
2514 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2517 /* Don't let a relocatable value get a negative coeff. */
2518 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2519 return simplify_gen_binary (PLUS, mode,
2520 op0,
2521 neg_const_int (mode, op1));
2523 /* (x - (x & y)) -> (x & ~y) */
2524 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2526 if (rtx_equal_p (op0, XEXP (op1, 0)))
2528 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2529 GET_MODE (XEXP (op1, 1)));
2530 return simplify_gen_binary (AND, mode, op0, tem);
2532 if (rtx_equal_p (op0, XEXP (op1, 1)))
2534 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2535 GET_MODE (XEXP (op1, 0)));
2536 return simplify_gen_binary (AND, mode, op0, tem);
2540 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2541 by reversing the comparison code if valid. */
2542 if (STORE_FLAG_VALUE == 1
2543 && trueop0 == const1_rtx
2544 && COMPARISON_P (op1)
2545 && (reversed = reversed_comparison (op1, mode)))
2546 return reversed;
2548 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2549 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2550 && GET_CODE (op1) == MULT
2551 && GET_CODE (XEXP (op1, 0)) == NEG)
2553 rtx in1, in2;
2555 in1 = XEXP (XEXP (op1, 0), 0);
2556 in2 = XEXP (op1, 1);
2557 return simplify_gen_binary (PLUS, mode,
2558 simplify_gen_binary (MULT, mode,
2559 in1, in2),
2560 op0);
2563 /* Canonicalize (minus (neg A) (mult B C)) to
2564 (minus (mult (neg B) C) A). */
2565 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2566 && GET_CODE (op1) == MULT
2567 && GET_CODE (op0) == NEG)
2569 rtx in1, in2;
2571 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2572 in2 = XEXP (op1, 1);
2573 return simplify_gen_binary (MINUS, mode,
2574 simplify_gen_binary (MULT, mode,
2575 in1, in2),
2576 XEXP (op0, 0));
2579 /* If one of the operands is a PLUS or a MINUS, see if we can
2580 simplify this by the associative law. This will, for example,
2581 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2582 Don't use the associative law for floating point.
2583 The inaccuracy makes it nonassociative,
2584 and subtle programs can break if operations are associated. */
2586 if (INTEGRAL_MODE_P (mode)
2587 && (plus_minus_operand_p (op0)
2588 || plus_minus_operand_p (op1))
2589 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2590 return tem;
2591 break;
2593 case MULT:
2594 if (trueop1 == constm1_rtx)
2595 return simplify_gen_unary (NEG, mode, op0, mode);
2597 if (GET_CODE (op0) == NEG)
2599 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2600 /* If op1 is a MULT as well and simplify_unary_operation
2601 just moved the NEG to the second operand, simplify_gen_binary
2602 below could through simplify_associative_operation move
2603 the NEG around again and recurse endlessly. */
2604 if (temp
2605 && GET_CODE (op1) == MULT
2606 && GET_CODE (temp) == MULT
2607 && XEXP (op1, 0) == XEXP (temp, 0)
2608 && GET_CODE (XEXP (temp, 1)) == NEG
2609 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2610 temp = NULL_RTX;
2611 if (temp)
2612 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2614 if (GET_CODE (op1) == NEG)
2616 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2617 /* If op0 is a MULT as well and simplify_unary_operation
2618 just moved the NEG to the second operand, simplify_gen_binary
2619 below could through simplify_associative_operation move
2620 the NEG around again and recurse endlessly. */
2621 if (temp
2622 && GET_CODE (op0) == MULT
2623 && GET_CODE (temp) == MULT
2624 && XEXP (op0, 0) == XEXP (temp, 0)
2625 && GET_CODE (XEXP (temp, 1)) == NEG
2626 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2627 temp = NULL_RTX;
2628 if (temp)
2629 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2632 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2633 x is NaN, since x * 0 is then also NaN. Nor is it valid
2634 when the mode has signed zeros, since multiplying a negative
2635 number by 0 will give -0, not 0. */
2636 if (!HONOR_NANS (mode)
2637 && !HONOR_SIGNED_ZEROS (mode)
2638 && trueop1 == CONST0_RTX (mode)
2639 && ! side_effects_p (op0))
2640 return op1;
2642 /* In IEEE floating point, x*1 is not equivalent to x for
2643 signalling NaNs. */
2644 if (!HONOR_SNANS (mode)
2645 && trueop1 == CONST1_RTX (mode))
2646 return op0;
2648 /* Convert multiply by constant power of two into shift unless
2649 we are still generating RTL. This test is a kludge. */
2650 if (CONST_INT_P (trueop1)
2651 && (val = exact_log2 (UINTVAL (trueop1))) >= 0
2652 /* If the mode is larger than the host word size, and the
2653 uppermost bit is set, then this isn't a power of two due
2654 to implicit sign extension. */
2655 && (width <= HOST_BITS_PER_WIDE_INT
2656 || val != HOST_BITS_PER_WIDE_INT - 1))
2657 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2659 /* Likewise for multipliers wider than a word. */
2660 if (CONST_DOUBLE_AS_INT_P (trueop1)
2661 && GET_MODE (op0) == mode
2662 && CONST_DOUBLE_LOW (trueop1) == 0
2663 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0
2664 && (val < HOST_BITS_PER_DOUBLE_INT - 1
2665 || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_DOUBLE_INT))
2666 return simplify_gen_binary (ASHIFT, mode, op0,
2667 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2669 /* x*2 is x+x and x*(-1) is -x */
2670 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2671 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2672 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2673 && GET_MODE (op0) == mode)
2675 REAL_VALUE_TYPE d;
2676 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2678 if (REAL_VALUES_EQUAL (d, dconst2))
2679 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2681 if (!HONOR_SNANS (mode)
2682 && REAL_VALUES_EQUAL (d, dconstm1))
2683 return simplify_gen_unary (NEG, mode, op0, mode);
2686 /* Optimize -x * -x as x * x. */
2687 if (FLOAT_MODE_P (mode)
2688 && GET_CODE (op0) == NEG
2689 && GET_CODE (op1) == NEG
2690 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2691 && !side_effects_p (XEXP (op0, 0)))
2692 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2694 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2695 if (SCALAR_FLOAT_MODE_P (mode)
2696 && GET_CODE (op0) == ABS
2697 && GET_CODE (op1) == ABS
2698 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2699 && !side_effects_p (XEXP (op0, 0)))
2700 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2702 /* Reassociate multiplication, but for floating point MULTs
2703 only when the user specifies unsafe math optimizations. */
2704 if (! FLOAT_MODE_P (mode)
2705 || flag_unsafe_math_optimizations)
2707 tem = simplify_associative_operation (code, mode, op0, op1);
2708 if (tem)
2709 return tem;
2711 break;
2713 case IOR:
2714 if (trueop1 == CONST0_RTX (mode))
2715 return op0;
2716 if (INTEGRAL_MODE_P (mode)
2717 && trueop1 == CONSTM1_RTX (mode)
2718 && !side_effects_p (op0))
2719 return op1;
2720 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2721 return op0;
2722 /* A | (~A) -> -1 */
2723 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2724 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2725 && ! side_effects_p (op0)
2726 && SCALAR_INT_MODE_P (mode))
2727 return constm1_rtx;
2729 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2730 if (CONST_INT_P (op1)
2731 && HWI_COMPUTABLE_MODE_P (mode)
2732 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2733 && !side_effects_p (op0))
2734 return op1;
2736 /* Canonicalize (X & C1) | C2. */
2737 if (GET_CODE (op0) == AND
2738 && CONST_INT_P (trueop1)
2739 && CONST_INT_P (XEXP (op0, 1)))
2741 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2742 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2743 HOST_WIDE_INT c2 = INTVAL (trueop1);
2745 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2746 if ((c1 & c2) == c1
2747 && !side_effects_p (XEXP (op0, 0)))
2748 return trueop1;
2750 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2751 if (((c1|c2) & mask) == mask)
2752 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2754 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2755 if (((c1 & ~c2) & mask) != (c1 & mask))
2757 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2758 gen_int_mode (c1 & ~c2, mode));
2759 return simplify_gen_binary (IOR, mode, tem, op1);
2763 /* Convert (A & B) | A to A. */
2764 if (GET_CODE (op0) == AND
2765 && (rtx_equal_p (XEXP (op0, 0), op1)
2766 || rtx_equal_p (XEXP (op0, 1), op1))
2767 && ! side_effects_p (XEXP (op0, 0))
2768 && ! side_effects_p (XEXP (op0, 1)))
2769 return op1;
2771 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2772 mode size to (rotate A CX). */
2774 if (GET_CODE (op1) == ASHIFT
2775 || GET_CODE (op1) == SUBREG)
2777 opleft = op1;
2778 opright = op0;
2780 else
2782 opright = op1;
2783 opleft = op0;
2786 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2787 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2788 && CONST_INT_P (XEXP (opleft, 1))
2789 && CONST_INT_P (XEXP (opright, 1))
2790 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2791 == GET_MODE_PRECISION (mode)))
2792 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2794 /* Same, but for ashift that has been "simplified" to a wider mode
2795 by simplify_shift_const. */
2797 if (GET_CODE (opleft) == SUBREG
2798 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2799 && GET_CODE (opright) == LSHIFTRT
2800 && GET_CODE (XEXP (opright, 0)) == SUBREG
2801 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2802 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2803 && (GET_MODE_SIZE (GET_MODE (opleft))
2804 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2805 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2806 SUBREG_REG (XEXP (opright, 0)))
2807 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2808 && CONST_INT_P (XEXP (opright, 1))
2809 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2810 == GET_MODE_PRECISION (mode)))
2811 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2812 XEXP (SUBREG_REG (opleft), 1));
2814 /* If we have (ior (and (X C1) C2)), simplify this by making
2815 C1 as small as possible if C1 actually changes. */
2816 if (CONST_INT_P (op1)
2817 && (HWI_COMPUTABLE_MODE_P (mode)
2818 || INTVAL (op1) > 0)
2819 && GET_CODE (op0) == AND
2820 && CONST_INT_P (XEXP (op0, 1))
2821 && CONST_INT_P (op1)
2822 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2824 rtx tmp = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2825 gen_int_mode (UINTVAL (XEXP (op0, 1))
2826 & ~UINTVAL (op1),
2827 mode));
2828 return simplify_gen_binary (IOR, mode, tmp, op1);
2831 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2832 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2833 the PLUS does not affect any of the bits in OP1: then we can do
2834 the IOR as a PLUS and we can associate. This is valid if OP1
2835 can be safely shifted left C bits. */
2836 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2837 && GET_CODE (XEXP (op0, 0)) == PLUS
2838 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2839 && CONST_INT_P (XEXP (op0, 1))
2840 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2842 int count = INTVAL (XEXP (op0, 1));
2843 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2845 if (mask >> count == INTVAL (trueop1)
2846 && trunc_int_for_mode (mask, mode) == mask
2847 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2848 return simplify_gen_binary (ASHIFTRT, mode,
2849 plus_constant (mode, XEXP (op0, 0),
2850 mask),
2851 XEXP (op0, 1));
2854 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2855 if (tem)
2856 return tem;
2858 tem = simplify_associative_operation (code, mode, op0, op1);
2859 if (tem)
2860 return tem;
2861 break;
2863 case XOR:
2864 if (trueop1 == CONST0_RTX (mode))
2865 return op0;
2866 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2867 return simplify_gen_unary (NOT, mode, op0, mode);
2868 if (rtx_equal_p (trueop0, trueop1)
2869 && ! side_effects_p (op0)
2870 && GET_MODE_CLASS (mode) != MODE_CC)
2871 return CONST0_RTX (mode);
2873 /* Canonicalize XOR of the most significant bit to PLUS. */
2874 if (CONST_SCALAR_INT_P (op1)
2875 && mode_signbit_p (mode, op1))
2876 return simplify_gen_binary (PLUS, mode, op0, op1);
2877 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2878 if (CONST_SCALAR_INT_P (op1)
2879 && GET_CODE (op0) == PLUS
2880 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2881 && mode_signbit_p (mode, XEXP (op0, 1)))
2882 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2883 simplify_gen_binary (XOR, mode, op1,
2884 XEXP (op0, 1)));
2886 /* If we are XORing two things that have no bits in common,
2887 convert them into an IOR. This helps to detect rotation encoded
2888 using those methods and possibly other simplifications. */
2890 if (HWI_COMPUTABLE_MODE_P (mode)
2891 && (nonzero_bits (op0, mode)
2892 & nonzero_bits (op1, mode)) == 0)
2893 return (simplify_gen_binary (IOR, mode, op0, op1));
2895 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2896 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2897 (NOT y). */
2899 int num_negated = 0;
2901 if (GET_CODE (op0) == NOT)
2902 num_negated++, op0 = XEXP (op0, 0);
2903 if (GET_CODE (op1) == NOT)
2904 num_negated++, op1 = XEXP (op1, 0);
2906 if (num_negated == 2)
2907 return simplify_gen_binary (XOR, mode, op0, op1);
2908 else if (num_negated == 1)
2909 return simplify_gen_unary (NOT, mode,
2910 simplify_gen_binary (XOR, mode, op0, op1),
2911 mode);
2914 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2915 correspond to a machine insn or result in further simplifications
2916 if B is a constant. */
2918 if (GET_CODE (op0) == AND
2919 && rtx_equal_p (XEXP (op0, 1), op1)
2920 && ! side_effects_p (op1))
2921 return simplify_gen_binary (AND, mode,
2922 simplify_gen_unary (NOT, mode,
2923 XEXP (op0, 0), mode),
2924 op1);
2926 else if (GET_CODE (op0) == AND
2927 && rtx_equal_p (XEXP (op0, 0), op1)
2928 && ! side_effects_p (op1))
2929 return simplify_gen_binary (AND, mode,
2930 simplify_gen_unary (NOT, mode,
2931 XEXP (op0, 1), mode),
2932 op1);
2934 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2935 we can transform like this:
2936 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2937 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2938 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2939 Attempt a few simplifications when B and C are both constants. */
2940 if (GET_CODE (op0) == AND
2941 && CONST_INT_P (op1)
2942 && CONST_INT_P (XEXP (op0, 1)))
2944 rtx a = XEXP (op0, 0);
2945 rtx b = XEXP (op0, 1);
2946 rtx c = op1;
2947 HOST_WIDE_INT bval = INTVAL (b);
2948 HOST_WIDE_INT cval = INTVAL (c);
2950 rtx na_c
2951 = simplify_binary_operation (AND, mode,
2952 simplify_gen_unary (NOT, mode, a, mode),
2954 if ((~cval & bval) == 0)
2956 /* Try to simplify ~A&C | ~B&C. */
2957 if (na_c != NULL_RTX)
2958 return simplify_gen_binary (IOR, mode, na_c,
2959 gen_int_mode (~bval & cval, mode));
2961 else
2963 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2964 if (na_c == const0_rtx)
2966 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2967 gen_int_mode (~cval & bval,
2968 mode));
2969 return simplify_gen_binary (IOR, mode, a_nc_b,
2970 gen_int_mode (~bval & cval,
2971 mode));
2976 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2977 comparison if STORE_FLAG_VALUE is 1. */
2978 if (STORE_FLAG_VALUE == 1
2979 && trueop1 == const1_rtx
2980 && COMPARISON_P (op0)
2981 && (reversed = reversed_comparison (op0, mode)))
2982 return reversed;
2984 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2985 is (lt foo (const_int 0)), so we can perform the above
2986 simplification if STORE_FLAG_VALUE is 1. */
2988 if (STORE_FLAG_VALUE == 1
2989 && trueop1 == const1_rtx
2990 && GET_CODE (op0) == LSHIFTRT
2991 && CONST_INT_P (XEXP (op0, 1))
2992 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2993 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2995 /* (xor (comparison foo bar) (const_int sign-bit))
2996 when STORE_FLAG_VALUE is the sign bit. */
2997 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2998 && trueop1 == const_true_rtx
2999 && COMPARISON_P (op0)
3000 && (reversed = reversed_comparison (op0, mode)))
3001 return reversed;
3003 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3004 if (tem)
3005 return tem;
3007 tem = simplify_associative_operation (code, mode, op0, op1);
3008 if (tem)
3009 return tem;
3010 break;
3012 case AND:
3013 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3014 return trueop1;
3015 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
3016 return op0;
3017 if (HWI_COMPUTABLE_MODE_P (mode))
3019 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
3020 HOST_WIDE_INT nzop1;
3021 if (CONST_INT_P (trueop1))
3023 HOST_WIDE_INT val1 = INTVAL (trueop1);
3024 /* If we are turning off bits already known off in OP0, we need
3025 not do an AND. */
3026 if ((nzop0 & ~val1) == 0)
3027 return op0;
3029 nzop1 = nonzero_bits (trueop1, mode);
3030 /* If we are clearing all the nonzero bits, the result is zero. */
3031 if ((nzop1 & nzop0) == 0
3032 && !side_effects_p (op0) && !side_effects_p (op1))
3033 return CONST0_RTX (mode);
3035 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
3036 && GET_MODE_CLASS (mode) != MODE_CC)
3037 return op0;
3038 /* A & (~A) -> 0 */
3039 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3040 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3041 && ! side_effects_p (op0)
3042 && GET_MODE_CLASS (mode) != MODE_CC)
3043 return CONST0_RTX (mode);
3045 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3046 there are no nonzero bits of C outside of X's mode. */
3047 if ((GET_CODE (op0) == SIGN_EXTEND
3048 || GET_CODE (op0) == ZERO_EXTEND)
3049 && CONST_INT_P (trueop1)
3050 && HWI_COMPUTABLE_MODE_P (mode)
3051 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
3052 & UINTVAL (trueop1)) == 0)
3054 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
3055 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
3056 gen_int_mode (INTVAL (trueop1),
3057 imode));
3058 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
3061 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3062 we might be able to further simplify the AND with X and potentially
3063 remove the truncation altogether. */
3064 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
3066 rtx x = XEXP (op0, 0);
3067 enum machine_mode xmode = GET_MODE (x);
3068 tem = simplify_gen_binary (AND, xmode, x,
3069 gen_int_mode (INTVAL (trueop1), xmode));
3070 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
3073 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3074 if (GET_CODE (op0) == IOR
3075 && CONST_INT_P (trueop1)
3076 && CONST_INT_P (XEXP (op0, 1)))
3078 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
3079 return simplify_gen_binary (IOR, mode,
3080 simplify_gen_binary (AND, mode,
3081 XEXP (op0, 0), op1),
3082 gen_int_mode (tmp, mode));
3085 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3086 insn (and may simplify more). */
3087 if (GET_CODE (op0) == XOR
3088 && rtx_equal_p (XEXP (op0, 0), op1)
3089 && ! side_effects_p (op1))
3090 return simplify_gen_binary (AND, mode,
3091 simplify_gen_unary (NOT, mode,
3092 XEXP (op0, 1), mode),
3093 op1);
3095 if (GET_CODE (op0) == XOR
3096 && rtx_equal_p (XEXP (op0, 1), op1)
3097 && ! side_effects_p (op1))
3098 return simplify_gen_binary (AND, mode,
3099 simplify_gen_unary (NOT, mode,
3100 XEXP (op0, 0), mode),
3101 op1);
3103 /* Similarly for (~(A ^ B)) & A. */
3104 if (GET_CODE (op0) == NOT
3105 && GET_CODE (XEXP (op0, 0)) == XOR
3106 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3107 && ! side_effects_p (op1))
3108 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3110 if (GET_CODE (op0) == NOT
3111 && GET_CODE (XEXP (op0, 0)) == XOR
3112 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3113 && ! side_effects_p (op1))
3114 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3116 /* Convert (A | B) & A to A. */
3117 if (GET_CODE (op0) == IOR
3118 && (rtx_equal_p (XEXP (op0, 0), op1)
3119 || rtx_equal_p (XEXP (op0, 1), op1))
3120 && ! side_effects_p (XEXP (op0, 0))
3121 && ! side_effects_p (XEXP (op0, 1)))
3122 return op1;
3124 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3125 ((A & N) + B) & M -> (A + B) & M
3126 Similarly if (N & M) == 0,
3127 ((A | N) + B) & M -> (A + B) & M
3128 and for - instead of + and/or ^ instead of |.
3129 Also, if (N & M) == 0, then
3130 (A +- N) & M -> A & M. */
3131 if (CONST_INT_P (trueop1)
3132 && HWI_COMPUTABLE_MODE_P (mode)
3133 && ~UINTVAL (trueop1)
3134 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3135 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3137 rtx pmop[2];
3138 int which;
3140 pmop[0] = XEXP (op0, 0);
3141 pmop[1] = XEXP (op0, 1);
3143 if (CONST_INT_P (pmop[1])
3144 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3145 return simplify_gen_binary (AND, mode, pmop[0], op1);
3147 for (which = 0; which < 2; which++)
3149 tem = pmop[which];
3150 switch (GET_CODE (tem))
3152 case AND:
3153 if (CONST_INT_P (XEXP (tem, 1))
3154 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3155 == UINTVAL (trueop1))
3156 pmop[which] = XEXP (tem, 0);
3157 break;
3158 case IOR:
3159 case XOR:
3160 if (CONST_INT_P (XEXP (tem, 1))
3161 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3162 pmop[which] = XEXP (tem, 0);
3163 break;
3164 default:
3165 break;
3169 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3171 tem = simplify_gen_binary (GET_CODE (op0), mode,
3172 pmop[0], pmop[1]);
3173 return simplify_gen_binary (code, mode, tem, op1);
3177 /* (and X (ior (not X) Y) -> (and X Y) */
3178 if (GET_CODE (op1) == IOR
3179 && GET_CODE (XEXP (op1, 0)) == NOT
3180 && op0 == XEXP (XEXP (op1, 0), 0))
3181 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3183 /* (and (ior (not X) Y) X) -> (and X Y) */
3184 if (GET_CODE (op0) == IOR
3185 && GET_CODE (XEXP (op0, 0)) == NOT
3186 && op1 == XEXP (XEXP (op0, 0), 0))
3187 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3189 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3190 if (tem)
3191 return tem;
3193 tem = simplify_associative_operation (code, mode, op0, op1);
3194 if (tem)
3195 return tem;
3196 break;
3198 case UDIV:
3199 /* 0/x is 0 (or x&0 if x has side-effects). */
3200 if (trueop0 == CONST0_RTX (mode))
3202 if (side_effects_p (op1))
3203 return simplify_gen_binary (AND, mode, op1, trueop0);
3204 return trueop0;
3206 /* x/1 is x. */
3207 if (trueop1 == CONST1_RTX (mode))
3209 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3210 if (tem)
3211 return tem;
3213 /* Convert divide by power of two into shift. */
3214 if (CONST_INT_P (trueop1)
3215 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3216 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3217 break;
3219 case DIV:
3220 /* Handle floating point and integers separately. */
3221 if (SCALAR_FLOAT_MODE_P (mode))
3223 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3224 safe for modes with NaNs, since 0.0 / 0.0 will then be
3225 NaN rather than 0.0. Nor is it safe for modes with signed
3226 zeros, since dividing 0 by a negative number gives -0.0 */
3227 if (trueop0 == CONST0_RTX (mode)
3228 && !HONOR_NANS (mode)
3229 && !HONOR_SIGNED_ZEROS (mode)
3230 && ! side_effects_p (op1))
3231 return op0;
3232 /* x/1.0 is x. */
3233 if (trueop1 == CONST1_RTX (mode)
3234 && !HONOR_SNANS (mode))
3235 return op0;
3237 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3238 && trueop1 != CONST0_RTX (mode))
3240 REAL_VALUE_TYPE d;
3241 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
3243 /* x/-1.0 is -x. */
3244 if (REAL_VALUES_EQUAL (d, dconstm1)
3245 && !HONOR_SNANS (mode))
3246 return simplify_gen_unary (NEG, mode, op0, mode);
3248 /* Change FP division by a constant into multiplication.
3249 Only do this with -freciprocal-math. */
3250 if (flag_reciprocal_math
3251 && !REAL_VALUES_EQUAL (d, dconst0))
3253 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
3254 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3255 return simplify_gen_binary (MULT, mode, op0, tem);
3259 else if (SCALAR_INT_MODE_P (mode))
3261 /* 0/x is 0 (or x&0 if x has side-effects). */
3262 if (trueop0 == CONST0_RTX (mode)
3263 && !cfun->can_throw_non_call_exceptions)
3265 if (side_effects_p (op1))
3266 return simplify_gen_binary (AND, mode, op1, trueop0);
3267 return trueop0;
3269 /* x/1 is x. */
3270 if (trueop1 == CONST1_RTX (mode))
3272 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3273 if (tem)
3274 return tem;
3276 /* x/-1 is -x. */
3277 if (trueop1 == constm1_rtx)
3279 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3280 if (x)
3281 return simplify_gen_unary (NEG, mode, x, mode);
3284 break;
3286 case UMOD:
3287 /* 0%x is 0 (or x&0 if x has side-effects). */
3288 if (trueop0 == CONST0_RTX (mode))
3290 if (side_effects_p (op1))
3291 return simplify_gen_binary (AND, mode, op1, trueop0);
3292 return trueop0;
3294 /* x%1 is 0 (of x&0 if x has side-effects). */
3295 if (trueop1 == CONST1_RTX (mode))
3297 if (side_effects_p (op0))
3298 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3299 return CONST0_RTX (mode);
3301 /* Implement modulus by power of two as AND. */
3302 if (CONST_INT_P (trueop1)
3303 && exact_log2 (UINTVAL (trueop1)) > 0)
3304 return simplify_gen_binary (AND, mode, op0,
3305 gen_int_mode (INTVAL (op1) - 1, mode));
3306 break;
3308 case MOD:
3309 /* 0%x is 0 (or x&0 if x has side-effects). */
3310 if (trueop0 == CONST0_RTX (mode))
3312 if (side_effects_p (op1))
3313 return simplify_gen_binary (AND, mode, op1, trueop0);
3314 return trueop0;
3316 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3317 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3319 if (side_effects_p (op0))
3320 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3321 return CONST0_RTX (mode);
3323 break;
3325 case ROTATERT:
3326 case ROTATE:
3327 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3328 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3329 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3330 amount instead. */
3331 if (CONST_INT_P (trueop1)
3332 && IN_RANGE (INTVAL (trueop1),
3333 GET_MODE_BITSIZE (mode) / 2 + (code == ROTATE),
3334 GET_MODE_BITSIZE (mode) - 1))
3335 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3336 mode, op0, GEN_INT (GET_MODE_BITSIZE (mode)
3337 - INTVAL (trueop1)));
3338 /* FALLTHRU */
3339 case ASHIFTRT:
3340 if (trueop1 == CONST0_RTX (mode))
3341 return op0;
3342 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3343 return op0;
3344 /* Rotating ~0 always results in ~0. */
3345 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3346 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3347 && ! side_effects_p (op1))
3348 return op0;
3349 canonicalize_shift:
3350 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3352 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
3353 if (val != INTVAL (op1))
3354 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3356 break;
3358 case ASHIFT:
3359 case SS_ASHIFT:
3360 case US_ASHIFT:
3361 if (trueop1 == CONST0_RTX (mode))
3362 return op0;
3363 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3364 return op0;
3365 goto canonicalize_shift;
3367 case LSHIFTRT:
3368 if (trueop1 == CONST0_RTX (mode))
3369 return op0;
3370 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3371 return op0;
3372 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3373 if (GET_CODE (op0) == CLZ
3374 && CONST_INT_P (trueop1)
3375 && STORE_FLAG_VALUE == 1
3376 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3378 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
3379 unsigned HOST_WIDE_INT zero_val = 0;
3381 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3382 && zero_val == GET_MODE_PRECISION (imode)
3383 && INTVAL (trueop1) == exact_log2 (zero_val))
3384 return simplify_gen_relational (EQ, mode, imode,
3385 XEXP (op0, 0), const0_rtx);
3387 goto canonicalize_shift;
3389 case SMIN:
3390 if (width <= HOST_BITS_PER_WIDE_INT
3391 && mode_signbit_p (mode, trueop1)
3392 && ! side_effects_p (op0))
3393 return op1;
3394 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3395 return op0;
3396 tem = simplify_associative_operation (code, mode, op0, op1);
3397 if (tem)
3398 return tem;
3399 break;
3401 case SMAX:
3402 if (width <= HOST_BITS_PER_WIDE_INT
3403 && CONST_INT_P (trueop1)
3404 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3405 && ! side_effects_p (op0))
3406 return op1;
3407 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3408 return op0;
3409 tem = simplify_associative_operation (code, mode, op0, op1);
3410 if (tem)
3411 return tem;
3412 break;
3414 case UMIN:
3415 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3416 return op1;
3417 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3418 return op0;
3419 tem = simplify_associative_operation (code, mode, op0, op1);
3420 if (tem)
3421 return tem;
3422 break;
3424 case UMAX:
3425 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3426 return op1;
3427 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3428 return op0;
3429 tem = simplify_associative_operation (code, mode, op0, op1);
3430 if (tem)
3431 return tem;
3432 break;
3434 case SS_PLUS:
3435 case US_PLUS:
3436 case SS_MINUS:
3437 case US_MINUS:
3438 case SS_MULT:
3439 case US_MULT:
3440 case SS_DIV:
3441 case US_DIV:
3442 /* ??? There are simplifications that can be done. */
3443 return 0;
3445 case VEC_SELECT:
3446 if (!VECTOR_MODE_P (mode))
3448 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3449 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3450 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3451 gcc_assert (XVECLEN (trueop1, 0) == 1);
3452 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3454 if (GET_CODE (trueop0) == CONST_VECTOR)
3455 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3456 (trueop1, 0, 0)));
3458 /* Extract a scalar element from a nested VEC_SELECT expression
3459 (with optional nested VEC_CONCAT expression). Some targets
3460 (i386) extract scalar element from a vector using chain of
3461 nested VEC_SELECT expressions. When input operand is a memory
3462 operand, this operation can be simplified to a simple scalar
3463 load from an offseted memory address. */
3464 if (GET_CODE (trueop0) == VEC_SELECT)
3466 rtx op0 = XEXP (trueop0, 0);
3467 rtx op1 = XEXP (trueop0, 1);
3469 enum machine_mode opmode = GET_MODE (op0);
3470 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3471 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3473 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3474 int elem;
3476 rtvec vec;
3477 rtx tmp_op, tmp;
3479 gcc_assert (GET_CODE (op1) == PARALLEL);
3480 gcc_assert (i < n_elts);
3482 /* Select element, pointed by nested selector. */
3483 elem = INTVAL (XVECEXP (op1, 0, i));
3485 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3486 if (GET_CODE (op0) == VEC_CONCAT)
3488 rtx op00 = XEXP (op0, 0);
3489 rtx op01 = XEXP (op0, 1);
3491 enum machine_mode mode00, mode01;
3492 int n_elts00, n_elts01;
3494 mode00 = GET_MODE (op00);
3495 mode01 = GET_MODE (op01);
3497 /* Find out number of elements of each operand. */
3498 if (VECTOR_MODE_P (mode00))
3500 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3501 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3503 else
3504 n_elts00 = 1;
3506 if (VECTOR_MODE_P (mode01))
3508 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3509 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3511 else
3512 n_elts01 = 1;
3514 gcc_assert (n_elts == n_elts00 + n_elts01);
3516 /* Select correct operand of VEC_CONCAT
3517 and adjust selector. */
3518 if (elem < n_elts01)
3519 tmp_op = op00;
3520 else
3522 tmp_op = op01;
3523 elem -= n_elts00;
3526 else
3527 tmp_op = op0;
3529 vec = rtvec_alloc (1);
3530 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3532 tmp = gen_rtx_fmt_ee (code, mode,
3533 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3534 return tmp;
3536 if (GET_CODE (trueop0) == VEC_DUPLICATE
3537 && GET_MODE (XEXP (trueop0, 0)) == mode)
3538 return XEXP (trueop0, 0);
3540 else
3542 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3543 gcc_assert (GET_MODE_INNER (mode)
3544 == GET_MODE_INNER (GET_MODE (trueop0)));
3545 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3547 if (GET_CODE (trueop0) == CONST_VECTOR)
3549 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3550 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3551 rtvec v = rtvec_alloc (n_elts);
3552 unsigned int i;
3554 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3555 for (i = 0; i < n_elts; i++)
3557 rtx x = XVECEXP (trueop1, 0, i);
3559 gcc_assert (CONST_INT_P (x));
3560 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3561 INTVAL (x));
3564 return gen_rtx_CONST_VECTOR (mode, v);
3567 /* Recognize the identity. */
3568 if (GET_MODE (trueop0) == mode)
3570 bool maybe_ident = true;
3571 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3573 rtx j = XVECEXP (trueop1, 0, i);
3574 if (!CONST_INT_P (j) || INTVAL (j) != i)
3576 maybe_ident = false;
3577 break;
3580 if (maybe_ident)
3581 return trueop0;
3584 /* If we build {a,b} then permute it, build the result directly. */
3585 if (XVECLEN (trueop1, 0) == 2
3586 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3587 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3588 && GET_CODE (trueop0) == VEC_CONCAT
3589 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3590 && GET_MODE (XEXP (trueop0, 0)) == mode
3591 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3592 && GET_MODE (XEXP (trueop0, 1)) == mode)
3594 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3595 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3596 rtx subop0, subop1;
3598 gcc_assert (i0 < 4 && i1 < 4);
3599 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3600 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3602 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3605 if (XVECLEN (trueop1, 0) == 2
3606 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3607 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3608 && GET_CODE (trueop0) == VEC_CONCAT
3609 && GET_MODE (trueop0) == mode)
3611 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3612 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3613 rtx subop0, subop1;
3615 gcc_assert (i0 < 2 && i1 < 2);
3616 subop0 = XEXP (trueop0, i0);
3617 subop1 = XEXP (trueop0, i1);
3619 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3623 if (XVECLEN (trueop1, 0) == 1
3624 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3625 && GET_CODE (trueop0) == VEC_CONCAT)
3627 rtx vec = trueop0;
3628 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3630 /* Try to find the element in the VEC_CONCAT. */
3631 while (GET_MODE (vec) != mode
3632 && GET_CODE (vec) == VEC_CONCAT)
3634 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3635 if (offset < vec_size)
3636 vec = XEXP (vec, 0);
3637 else
3639 offset -= vec_size;
3640 vec = XEXP (vec, 1);
3642 vec = avoid_constant_pool_reference (vec);
3645 if (GET_MODE (vec) == mode)
3646 return vec;
3649 /* If we select elements in a vec_merge that all come from the same
3650 operand, select from that operand directly. */
3651 if (GET_CODE (op0) == VEC_MERGE)
3653 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3654 if (CONST_INT_P (trueop02))
3656 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3657 bool all_operand0 = true;
3658 bool all_operand1 = true;
3659 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3661 rtx j = XVECEXP (trueop1, 0, i);
3662 if (sel & (1 << UINTVAL (j)))
3663 all_operand1 = false;
3664 else
3665 all_operand0 = false;
3667 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3668 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3669 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3670 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3674 return 0;
3675 case VEC_CONCAT:
3677 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3678 ? GET_MODE (trueop0)
3679 : GET_MODE_INNER (mode));
3680 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3681 ? GET_MODE (trueop1)
3682 : GET_MODE_INNER (mode));
3684 gcc_assert (VECTOR_MODE_P (mode));
3685 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3686 == GET_MODE_SIZE (mode));
3688 if (VECTOR_MODE_P (op0_mode))
3689 gcc_assert (GET_MODE_INNER (mode)
3690 == GET_MODE_INNER (op0_mode));
3691 else
3692 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3694 if (VECTOR_MODE_P (op1_mode))
3695 gcc_assert (GET_MODE_INNER (mode)
3696 == GET_MODE_INNER (op1_mode));
3697 else
3698 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3700 if ((GET_CODE (trueop0) == CONST_VECTOR
3701 || CONST_SCALAR_INT_P (trueop0)
3702 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3703 && (GET_CODE (trueop1) == CONST_VECTOR
3704 || CONST_SCALAR_INT_P (trueop1)
3705 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3707 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3708 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3709 rtvec v = rtvec_alloc (n_elts);
3710 unsigned int i;
3711 unsigned in_n_elts = 1;
3713 if (VECTOR_MODE_P (op0_mode))
3714 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3715 for (i = 0; i < n_elts; i++)
3717 if (i < in_n_elts)
3719 if (!VECTOR_MODE_P (op0_mode))
3720 RTVEC_ELT (v, i) = trueop0;
3721 else
3722 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3724 else
3726 if (!VECTOR_MODE_P (op1_mode))
3727 RTVEC_ELT (v, i) = trueop1;
3728 else
3729 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3730 i - in_n_elts);
3734 return gen_rtx_CONST_VECTOR (mode, v);
3737 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3738 Restrict the transformation to avoid generating a VEC_SELECT with a
3739 mode unrelated to its operand. */
3740 if (GET_CODE (trueop0) == VEC_SELECT
3741 && GET_CODE (trueop1) == VEC_SELECT
3742 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3743 && GET_MODE (XEXP (trueop0, 0)) == mode)
3745 rtx par0 = XEXP (trueop0, 1);
3746 rtx par1 = XEXP (trueop1, 1);
3747 int len0 = XVECLEN (par0, 0);
3748 int len1 = XVECLEN (par1, 0);
3749 rtvec vec = rtvec_alloc (len0 + len1);
3750 for (int i = 0; i < len0; i++)
3751 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3752 for (int i = 0; i < len1; i++)
3753 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3754 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3755 gen_rtx_PARALLEL (VOIDmode, vec));
3758 return 0;
3760 default:
3761 gcc_unreachable ();
3764 return 0;
3768 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3769 rtx op0, rtx op1)
3771 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3772 HOST_WIDE_INT val;
3773 unsigned int width = GET_MODE_PRECISION (mode);
3775 if (VECTOR_MODE_P (mode)
3776 && code != VEC_CONCAT
3777 && GET_CODE (op0) == CONST_VECTOR
3778 && GET_CODE (op1) == CONST_VECTOR)
3780 unsigned n_elts = GET_MODE_NUNITS (mode);
3781 enum machine_mode op0mode = GET_MODE (op0);
3782 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3783 enum machine_mode op1mode = GET_MODE (op1);
3784 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3785 rtvec v = rtvec_alloc (n_elts);
3786 unsigned int i;
3788 gcc_assert (op0_n_elts == n_elts);
3789 gcc_assert (op1_n_elts == n_elts);
3790 for (i = 0; i < n_elts; i++)
3792 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3793 CONST_VECTOR_ELT (op0, i),
3794 CONST_VECTOR_ELT (op1, i));
3795 if (!x)
3796 return 0;
3797 RTVEC_ELT (v, i) = x;
3800 return gen_rtx_CONST_VECTOR (mode, v);
3803 if (VECTOR_MODE_P (mode)
3804 && code == VEC_CONCAT
3805 && (CONST_SCALAR_INT_P (op0)
3806 || GET_CODE (op0) == CONST_FIXED
3807 || CONST_DOUBLE_AS_FLOAT_P (op0))
3808 && (CONST_SCALAR_INT_P (op1)
3809 || CONST_DOUBLE_AS_FLOAT_P (op1)
3810 || GET_CODE (op1) == CONST_FIXED))
3812 unsigned n_elts = GET_MODE_NUNITS (mode);
3813 rtvec v = rtvec_alloc (n_elts);
3815 gcc_assert (n_elts >= 2);
3816 if (n_elts == 2)
3818 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3819 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3821 RTVEC_ELT (v, 0) = op0;
3822 RTVEC_ELT (v, 1) = op1;
3824 else
3826 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3827 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3828 unsigned i;
3830 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3831 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3832 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3834 for (i = 0; i < op0_n_elts; ++i)
3835 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3836 for (i = 0; i < op1_n_elts; ++i)
3837 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3840 return gen_rtx_CONST_VECTOR (mode, v);
3843 if (SCALAR_FLOAT_MODE_P (mode)
3844 && CONST_DOUBLE_AS_FLOAT_P (op0)
3845 && CONST_DOUBLE_AS_FLOAT_P (op1)
3846 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3848 if (code == AND
3849 || code == IOR
3850 || code == XOR)
3852 long tmp0[4];
3853 long tmp1[4];
3854 REAL_VALUE_TYPE r;
3855 int i;
3857 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3858 GET_MODE (op0));
3859 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3860 GET_MODE (op1));
3861 for (i = 0; i < 4; i++)
3863 switch (code)
3865 case AND:
3866 tmp0[i] &= tmp1[i];
3867 break;
3868 case IOR:
3869 tmp0[i] |= tmp1[i];
3870 break;
3871 case XOR:
3872 tmp0[i] ^= tmp1[i];
3873 break;
3874 default:
3875 gcc_unreachable ();
3878 real_from_target (&r, tmp0, mode);
3879 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3881 else
3883 REAL_VALUE_TYPE f0, f1, value, result;
3884 bool inexact;
3886 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3887 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3888 real_convert (&f0, mode, &f0);
3889 real_convert (&f1, mode, &f1);
3891 if (HONOR_SNANS (mode)
3892 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3893 return 0;
3895 if (code == DIV
3896 && REAL_VALUES_EQUAL (f1, dconst0)
3897 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3898 return 0;
3900 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3901 && flag_trapping_math
3902 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3904 int s0 = REAL_VALUE_NEGATIVE (f0);
3905 int s1 = REAL_VALUE_NEGATIVE (f1);
3907 switch (code)
3909 case PLUS:
3910 /* Inf + -Inf = NaN plus exception. */
3911 if (s0 != s1)
3912 return 0;
3913 break;
3914 case MINUS:
3915 /* Inf - Inf = NaN plus exception. */
3916 if (s0 == s1)
3917 return 0;
3918 break;
3919 case DIV:
3920 /* Inf / Inf = NaN plus exception. */
3921 return 0;
3922 default:
3923 break;
3927 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3928 && flag_trapping_math
3929 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3930 || (REAL_VALUE_ISINF (f1)
3931 && REAL_VALUES_EQUAL (f0, dconst0))))
3932 /* Inf * 0 = NaN plus exception. */
3933 return 0;
3935 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3936 &f0, &f1);
3937 real_convert (&result, mode, &value);
3939 /* Don't constant fold this floating point operation if
3940 the result has overflowed and flag_trapping_math. */
3942 if (flag_trapping_math
3943 && MODE_HAS_INFINITIES (mode)
3944 && REAL_VALUE_ISINF (result)
3945 && !REAL_VALUE_ISINF (f0)
3946 && !REAL_VALUE_ISINF (f1))
3947 /* Overflow plus exception. */
3948 return 0;
3950 /* Don't constant fold this floating point operation if the
3951 result may dependent upon the run-time rounding mode and
3952 flag_rounding_math is set, or if GCC's software emulation
3953 is unable to accurately represent the result. */
3955 if ((flag_rounding_math
3956 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3957 && (inexact || !real_identical (&result, &value)))
3958 return NULL_RTX;
3960 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3964 /* We can fold some multi-word operations. */
3965 if (GET_MODE_CLASS (mode) == MODE_INT
3966 && width == HOST_BITS_PER_DOUBLE_INT
3967 && (CONST_DOUBLE_AS_INT_P (op0) || CONST_INT_P (op0))
3968 && (CONST_DOUBLE_AS_INT_P (op1) || CONST_INT_P (op1)))
3970 double_int o0, o1, res, tmp;
3971 bool overflow;
3973 o0 = rtx_to_double_int (op0);
3974 o1 = rtx_to_double_int (op1);
3976 switch (code)
3978 case MINUS:
3979 /* A - B == A + (-B). */
3980 o1 = -o1;
3982 /* Fall through.... */
3984 case PLUS:
3985 res = o0 + o1;
3986 break;
3988 case MULT:
3989 res = o0 * o1;
3990 break;
3992 case DIV:
3993 res = o0.divmod_with_overflow (o1, false, TRUNC_DIV_EXPR,
3994 &tmp, &overflow);
3995 if (overflow)
3996 return 0;
3997 break;
3999 case MOD:
4000 tmp = o0.divmod_with_overflow (o1, false, TRUNC_DIV_EXPR,
4001 &res, &overflow);
4002 if (overflow)
4003 return 0;
4004 break;
4006 case UDIV:
4007 res = o0.divmod_with_overflow (o1, true, TRUNC_DIV_EXPR,
4008 &tmp, &overflow);
4009 if (overflow)
4010 return 0;
4011 break;
4013 case UMOD:
4014 tmp = o0.divmod_with_overflow (o1, true, TRUNC_DIV_EXPR,
4015 &res, &overflow);
4016 if (overflow)
4017 return 0;
4018 break;
4020 case AND:
4021 res = o0 & o1;
4022 break;
4024 case IOR:
4025 res = o0 | o1;
4026 break;
4028 case XOR:
4029 res = o0 ^ o1;
4030 break;
4032 case SMIN:
4033 res = o0.smin (o1);
4034 break;
4036 case SMAX:
4037 res = o0.smax (o1);
4038 break;
4040 case UMIN:
4041 res = o0.umin (o1);
4042 break;
4044 case UMAX:
4045 res = o0.umax (o1);
4046 break;
4048 case LSHIFTRT: case ASHIFTRT:
4049 case ASHIFT:
4050 case ROTATE: case ROTATERT:
4052 unsigned HOST_WIDE_INT cnt;
4054 if (SHIFT_COUNT_TRUNCATED)
4056 o1.high = 0;
4057 o1.low &= GET_MODE_PRECISION (mode) - 1;
4060 if (!o1.fits_uhwi ()
4061 || o1.to_uhwi () >= GET_MODE_PRECISION (mode))
4062 return 0;
4064 cnt = o1.to_uhwi ();
4065 unsigned short prec = GET_MODE_PRECISION (mode);
4067 if (code == LSHIFTRT || code == ASHIFTRT)
4068 res = o0.rshift (cnt, prec, code == ASHIFTRT);
4069 else if (code == ASHIFT)
4070 res = o0.alshift (cnt, prec);
4071 else if (code == ROTATE)
4072 res = o0.lrotate (cnt, prec);
4073 else /* code == ROTATERT */
4074 res = o0.rrotate (cnt, prec);
4076 break;
4078 default:
4079 return 0;
4082 return immed_double_int_const (res, mode);
4085 if (CONST_INT_P (op0) && CONST_INT_P (op1)
4086 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
4088 /* Get the integer argument values in two forms:
4089 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
4091 arg0 = INTVAL (op0);
4092 arg1 = INTVAL (op1);
4094 if (width < HOST_BITS_PER_WIDE_INT)
4096 arg0 &= GET_MODE_MASK (mode);
4097 arg1 &= GET_MODE_MASK (mode);
4099 arg0s = arg0;
4100 if (val_signbit_known_set_p (mode, arg0s))
4101 arg0s |= ~GET_MODE_MASK (mode);
4103 arg1s = arg1;
4104 if (val_signbit_known_set_p (mode, arg1s))
4105 arg1s |= ~GET_MODE_MASK (mode);
4107 else
4109 arg0s = arg0;
4110 arg1s = arg1;
4113 /* Compute the value of the arithmetic. */
4115 switch (code)
4117 case PLUS:
4118 val = arg0s + arg1s;
4119 break;
4121 case MINUS:
4122 val = arg0s - arg1s;
4123 break;
4125 case MULT:
4126 val = arg0s * arg1s;
4127 break;
4129 case DIV:
4130 if (arg1s == 0
4131 || ((unsigned HOST_WIDE_INT) arg0s
4132 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4133 && arg1s == -1))
4134 return 0;
4135 val = arg0s / arg1s;
4136 break;
4138 case MOD:
4139 if (arg1s == 0
4140 || ((unsigned HOST_WIDE_INT) arg0s
4141 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4142 && arg1s == -1))
4143 return 0;
4144 val = arg0s % arg1s;
4145 break;
4147 case UDIV:
4148 if (arg1 == 0
4149 || ((unsigned HOST_WIDE_INT) arg0s
4150 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4151 && arg1s == -1))
4152 return 0;
4153 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
4154 break;
4156 case UMOD:
4157 if (arg1 == 0
4158 || ((unsigned HOST_WIDE_INT) arg0s
4159 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4160 && arg1s == -1))
4161 return 0;
4162 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
4163 break;
4165 case AND:
4166 val = arg0 & arg1;
4167 break;
4169 case IOR:
4170 val = arg0 | arg1;
4171 break;
4173 case XOR:
4174 val = arg0 ^ arg1;
4175 break;
4177 case LSHIFTRT:
4178 case ASHIFT:
4179 case ASHIFTRT:
4180 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
4181 the value is in range. We can't return any old value for
4182 out-of-range arguments because either the middle-end (via
4183 shift_truncation_mask) or the back-end might be relying on
4184 target-specific knowledge. Nor can we rely on
4185 shift_truncation_mask, since the shift might not be part of an
4186 ashlM3, lshrM3 or ashrM3 instruction. */
4187 if (SHIFT_COUNT_TRUNCATED)
4188 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
4189 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
4190 return 0;
4192 val = (code == ASHIFT
4193 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
4194 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
4196 /* Sign-extend the result for arithmetic right shifts. */
4197 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
4198 val |= HOST_WIDE_INT_M1U << (width - arg1);
4199 break;
4201 case ROTATERT:
4202 if (arg1 < 0)
4203 return 0;
4205 arg1 %= width;
4206 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
4207 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
4208 break;
4210 case ROTATE:
4211 if (arg1 < 0)
4212 return 0;
4214 arg1 %= width;
4215 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
4216 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
4217 break;
4219 case COMPARE:
4220 /* Do nothing here. */
4221 return 0;
4223 case SMIN:
4224 val = arg0s <= arg1s ? arg0s : arg1s;
4225 break;
4227 case UMIN:
4228 val = ((unsigned HOST_WIDE_INT) arg0
4229 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4230 break;
4232 case SMAX:
4233 val = arg0s > arg1s ? arg0s : arg1s;
4234 break;
4236 case UMAX:
4237 val = ((unsigned HOST_WIDE_INT) arg0
4238 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4239 break;
4241 case SS_PLUS:
4242 case US_PLUS:
4243 case SS_MINUS:
4244 case US_MINUS:
4245 case SS_MULT:
4246 case US_MULT:
4247 case SS_DIV:
4248 case US_DIV:
4249 case SS_ASHIFT:
4250 case US_ASHIFT:
4251 /* ??? There are simplifications that can be done. */
4252 return 0;
4254 default:
4255 gcc_unreachable ();
4258 return gen_int_mode (val, mode);
4261 return NULL_RTX;
4266 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
4267 PLUS or MINUS.
4269 Rather than test for specific case, we do this by a brute-force method
4270 and do all possible simplifications until no more changes occur. Then
4271 we rebuild the operation. */
4273 struct simplify_plus_minus_op_data
4275 rtx op;
4276 short neg;
4279 static bool
4280 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4282 int result;
4284 result = (commutative_operand_precedence (y)
4285 - commutative_operand_precedence (x));
4286 if (result)
4287 return result > 0;
4289 /* Group together equal REGs to do more simplification. */
4290 if (REG_P (x) && REG_P (y))
4291 return REGNO (x) > REGNO (y);
4292 else
4293 return false;
4296 static rtx
4297 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
4298 rtx op1)
4300 struct simplify_plus_minus_op_data ops[8];
4301 rtx result, tem;
4302 int n_ops = 2, input_ops = 2;
4303 int changed, n_constants = 0, canonicalized = 0;
4304 int i, j;
4306 memset (ops, 0, sizeof ops);
4308 /* Set up the two operands and then expand them until nothing has been
4309 changed. If we run out of room in our array, give up; this should
4310 almost never happen. */
4312 ops[0].op = op0;
4313 ops[0].neg = 0;
4314 ops[1].op = op1;
4315 ops[1].neg = (code == MINUS);
4319 changed = 0;
4321 for (i = 0; i < n_ops; i++)
4323 rtx this_op = ops[i].op;
4324 int this_neg = ops[i].neg;
4325 enum rtx_code this_code = GET_CODE (this_op);
4327 switch (this_code)
4329 case PLUS:
4330 case MINUS:
4331 if (n_ops == 7)
4332 return NULL_RTX;
4334 ops[n_ops].op = XEXP (this_op, 1);
4335 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4336 n_ops++;
4338 ops[i].op = XEXP (this_op, 0);
4339 input_ops++;
4340 changed = 1;
4341 canonicalized |= this_neg;
4342 break;
4344 case NEG:
4345 ops[i].op = XEXP (this_op, 0);
4346 ops[i].neg = ! this_neg;
4347 changed = 1;
4348 canonicalized = 1;
4349 break;
4351 case CONST:
4352 if (n_ops < 7
4353 && GET_CODE (XEXP (this_op, 0)) == PLUS
4354 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4355 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4357 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4358 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4359 ops[n_ops].neg = this_neg;
4360 n_ops++;
4361 changed = 1;
4362 canonicalized = 1;
4364 break;
4366 case NOT:
4367 /* ~a -> (-a - 1) */
4368 if (n_ops != 7)
4370 ops[n_ops].op = CONSTM1_RTX (mode);
4371 ops[n_ops++].neg = this_neg;
4372 ops[i].op = XEXP (this_op, 0);
4373 ops[i].neg = !this_neg;
4374 changed = 1;
4375 canonicalized = 1;
4377 break;
4379 case CONST_INT:
4380 n_constants++;
4381 if (this_neg)
4383 ops[i].op = neg_const_int (mode, this_op);
4384 ops[i].neg = 0;
4385 changed = 1;
4386 canonicalized = 1;
4388 break;
4390 default:
4391 break;
4395 while (changed);
4397 if (n_constants > 1)
4398 canonicalized = 1;
4400 gcc_assert (n_ops >= 2);
4402 /* If we only have two operands, we can avoid the loops. */
4403 if (n_ops == 2)
4405 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4406 rtx lhs, rhs;
4408 /* Get the two operands. Be careful with the order, especially for
4409 the cases where code == MINUS. */
4410 if (ops[0].neg && ops[1].neg)
4412 lhs = gen_rtx_NEG (mode, ops[0].op);
4413 rhs = ops[1].op;
4415 else if (ops[0].neg)
4417 lhs = ops[1].op;
4418 rhs = ops[0].op;
4420 else
4422 lhs = ops[0].op;
4423 rhs = ops[1].op;
4426 return simplify_const_binary_operation (code, mode, lhs, rhs);
4429 /* Now simplify each pair of operands until nothing changes. */
4432 /* Insertion sort is good enough for an eight-element array. */
4433 for (i = 1; i < n_ops; i++)
4435 struct simplify_plus_minus_op_data save;
4436 j = i - 1;
4437 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
4438 continue;
4440 canonicalized = 1;
4441 save = ops[i];
4443 ops[j + 1] = ops[j];
4444 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
4445 ops[j + 1] = save;
4448 changed = 0;
4449 for (i = n_ops - 1; i > 0; i--)
4450 for (j = i - 1; j >= 0; j--)
4452 rtx lhs = ops[j].op, rhs = ops[i].op;
4453 int lneg = ops[j].neg, rneg = ops[i].neg;
4455 if (lhs != 0 && rhs != 0)
4457 enum rtx_code ncode = PLUS;
4459 if (lneg != rneg)
4461 ncode = MINUS;
4462 if (lneg)
4463 tem = lhs, lhs = rhs, rhs = tem;
4465 else if (swap_commutative_operands_p (lhs, rhs))
4466 tem = lhs, lhs = rhs, rhs = tem;
4468 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4469 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4471 rtx tem_lhs, tem_rhs;
4473 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4474 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4475 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
4477 if (tem && !CONSTANT_P (tem))
4478 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4480 else
4481 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4483 /* Reject "simplifications" that just wrap the two
4484 arguments in a CONST. Failure to do so can result
4485 in infinite recursion with simplify_binary_operation
4486 when it calls us to simplify CONST operations. */
4487 if (tem
4488 && ! (GET_CODE (tem) == CONST
4489 && GET_CODE (XEXP (tem, 0)) == ncode
4490 && XEXP (XEXP (tem, 0), 0) == lhs
4491 && XEXP (XEXP (tem, 0), 1) == rhs))
4493 lneg &= rneg;
4494 if (GET_CODE (tem) == NEG)
4495 tem = XEXP (tem, 0), lneg = !lneg;
4496 if (CONST_INT_P (tem) && lneg)
4497 tem = neg_const_int (mode, tem), lneg = 0;
4499 ops[i].op = tem;
4500 ops[i].neg = lneg;
4501 ops[j].op = NULL_RTX;
4502 changed = 1;
4503 canonicalized = 1;
4508 /* If nothing changed, fail. */
4509 if (!canonicalized)
4510 return NULL_RTX;
4512 /* Pack all the operands to the lower-numbered entries. */
4513 for (i = 0, j = 0; j < n_ops; j++)
4514 if (ops[j].op)
4516 ops[i] = ops[j];
4517 i++;
4519 n_ops = i;
4521 while (changed);
4523 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4524 if (n_ops == 2
4525 && CONST_INT_P (ops[1].op)
4526 && CONSTANT_P (ops[0].op)
4527 && ops[0].neg)
4528 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4530 /* We suppressed creation of trivial CONST expressions in the
4531 combination loop to avoid recursion. Create one manually now.
4532 The combination loop should have ensured that there is exactly
4533 one CONST_INT, and the sort will have ensured that it is last
4534 in the array and that any other constant will be next-to-last. */
4536 if (n_ops > 1
4537 && CONST_INT_P (ops[n_ops - 1].op)
4538 && CONSTANT_P (ops[n_ops - 2].op))
4540 rtx value = ops[n_ops - 1].op;
4541 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4542 value = neg_const_int (mode, value);
4543 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4544 INTVAL (value));
4545 n_ops--;
4548 /* Put a non-negated operand first, if possible. */
4550 for (i = 0; i < n_ops && ops[i].neg; i++)
4551 continue;
4552 if (i == n_ops)
4553 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4554 else if (i != 0)
4556 tem = ops[0].op;
4557 ops[0] = ops[i];
4558 ops[i].op = tem;
4559 ops[i].neg = 1;
4562 /* Now make the result by performing the requested operations. */
4563 result = ops[0].op;
4564 for (i = 1; i < n_ops; i++)
4565 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4566 mode, result, ops[i].op);
4568 return result;
4571 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4572 static bool
4573 plus_minus_operand_p (const_rtx x)
4575 return GET_CODE (x) == PLUS
4576 || GET_CODE (x) == MINUS
4577 || (GET_CODE (x) == CONST
4578 && GET_CODE (XEXP (x, 0)) == PLUS
4579 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4580 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4583 /* Like simplify_binary_operation except used for relational operators.
4584 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4585 not also be VOIDmode.
4587 CMP_MODE specifies in which mode the comparison is done in, so it is
4588 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4589 the operands or, if both are VOIDmode, the operands are compared in
4590 "infinite precision". */
4592 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
4593 enum machine_mode cmp_mode, rtx op0, rtx op1)
4595 rtx tem, trueop0, trueop1;
4597 if (cmp_mode == VOIDmode)
4598 cmp_mode = GET_MODE (op0);
4599 if (cmp_mode == VOIDmode)
4600 cmp_mode = GET_MODE (op1);
4602 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4603 if (tem)
4605 if (SCALAR_FLOAT_MODE_P (mode))
4607 if (tem == const0_rtx)
4608 return CONST0_RTX (mode);
4609 #ifdef FLOAT_STORE_FLAG_VALUE
4611 REAL_VALUE_TYPE val;
4612 val = FLOAT_STORE_FLAG_VALUE (mode);
4613 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4615 #else
4616 return NULL_RTX;
4617 #endif
4619 if (VECTOR_MODE_P (mode))
4621 if (tem == const0_rtx)
4622 return CONST0_RTX (mode);
4623 #ifdef VECTOR_STORE_FLAG_VALUE
4625 int i, units;
4626 rtvec v;
4628 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4629 if (val == NULL_RTX)
4630 return NULL_RTX;
4631 if (val == const1_rtx)
4632 return CONST1_RTX (mode);
4634 units = GET_MODE_NUNITS (mode);
4635 v = rtvec_alloc (units);
4636 for (i = 0; i < units; i++)
4637 RTVEC_ELT (v, i) = val;
4638 return gen_rtx_raw_CONST_VECTOR (mode, v);
4640 #else
4641 return NULL_RTX;
4642 #endif
4645 return tem;
4648 /* For the following tests, ensure const0_rtx is op1. */
4649 if (swap_commutative_operands_p (op0, op1)
4650 || (op0 == const0_rtx && op1 != const0_rtx))
4651 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4653 /* If op0 is a compare, extract the comparison arguments from it. */
4654 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4655 return simplify_gen_relational (code, mode, VOIDmode,
4656 XEXP (op0, 0), XEXP (op0, 1));
4658 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4659 || CC0_P (op0))
4660 return NULL_RTX;
4662 trueop0 = avoid_constant_pool_reference (op0);
4663 trueop1 = avoid_constant_pool_reference (op1);
4664 return simplify_relational_operation_1 (code, mode, cmp_mode,
4665 trueop0, trueop1);
4668 /* This part of simplify_relational_operation is only used when CMP_MODE
4669 is not in class MODE_CC (i.e. it is a real comparison).
4671 MODE is the mode of the result, while CMP_MODE specifies in which
4672 mode the comparison is done in, so it is the mode of the operands. */
4674 static rtx
4675 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
4676 enum machine_mode cmp_mode, rtx op0, rtx op1)
4678 enum rtx_code op0code = GET_CODE (op0);
4680 if (op1 == const0_rtx && COMPARISON_P (op0))
4682 /* If op0 is a comparison, extract the comparison arguments
4683 from it. */
4684 if (code == NE)
4686 if (GET_MODE (op0) == mode)
4687 return simplify_rtx (op0);
4688 else
4689 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4690 XEXP (op0, 0), XEXP (op0, 1));
4692 else if (code == EQ)
4694 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4695 if (new_code != UNKNOWN)
4696 return simplify_gen_relational (new_code, mode, VOIDmode,
4697 XEXP (op0, 0), XEXP (op0, 1));
4701 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4702 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4703 if ((code == LTU || code == GEU)
4704 && GET_CODE (op0) == PLUS
4705 && CONST_INT_P (XEXP (op0, 1))
4706 && (rtx_equal_p (op1, XEXP (op0, 0))
4707 || rtx_equal_p (op1, XEXP (op0, 1)))
4708 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4709 && XEXP (op0, 1) != const0_rtx)
4711 rtx new_cmp
4712 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4713 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4714 cmp_mode, XEXP (op0, 0), new_cmp);
4717 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4718 if ((code == LTU || code == GEU)
4719 && GET_CODE (op0) == PLUS
4720 && rtx_equal_p (op1, XEXP (op0, 1))
4721 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4722 && !rtx_equal_p (op1, XEXP (op0, 0)))
4723 return simplify_gen_relational (code, mode, cmp_mode, op0,
4724 copy_rtx (XEXP (op0, 0)));
4726 if (op1 == const0_rtx)
4728 /* Canonicalize (GTU x 0) as (NE x 0). */
4729 if (code == GTU)
4730 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4731 /* Canonicalize (LEU x 0) as (EQ x 0). */
4732 if (code == LEU)
4733 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4735 else if (op1 == const1_rtx)
4737 switch (code)
4739 case GE:
4740 /* Canonicalize (GE x 1) as (GT x 0). */
4741 return simplify_gen_relational (GT, mode, cmp_mode,
4742 op0, const0_rtx);
4743 case GEU:
4744 /* Canonicalize (GEU x 1) as (NE x 0). */
4745 return simplify_gen_relational (NE, mode, cmp_mode,
4746 op0, const0_rtx);
4747 case LT:
4748 /* Canonicalize (LT x 1) as (LE x 0). */
4749 return simplify_gen_relational (LE, mode, cmp_mode,
4750 op0, const0_rtx);
4751 case LTU:
4752 /* Canonicalize (LTU x 1) as (EQ x 0). */
4753 return simplify_gen_relational (EQ, mode, cmp_mode,
4754 op0, const0_rtx);
4755 default:
4756 break;
4759 else if (op1 == constm1_rtx)
4761 /* Canonicalize (LE x -1) as (LT x 0). */
4762 if (code == LE)
4763 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4764 /* Canonicalize (GT x -1) as (GE x 0). */
4765 if (code == GT)
4766 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4769 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4770 if ((code == EQ || code == NE)
4771 && (op0code == PLUS || op0code == MINUS)
4772 && CONSTANT_P (op1)
4773 && CONSTANT_P (XEXP (op0, 1))
4774 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4776 rtx x = XEXP (op0, 0);
4777 rtx c = XEXP (op0, 1);
4778 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4779 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4781 /* Detect an infinite recursive condition, where we oscillate at this
4782 simplification case between:
4783 A + B == C <---> C - B == A,
4784 where A, B, and C are all constants with non-simplifiable expressions,
4785 usually SYMBOL_REFs. */
4786 if (GET_CODE (tem) == invcode
4787 && CONSTANT_P (x)
4788 && rtx_equal_p (c, XEXP (tem, 1)))
4789 return NULL_RTX;
4791 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4794 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4795 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4796 if (code == NE
4797 && op1 == const0_rtx
4798 && GET_MODE_CLASS (mode) == MODE_INT
4799 && cmp_mode != VOIDmode
4800 /* ??? Work-around BImode bugs in the ia64 backend. */
4801 && mode != BImode
4802 && cmp_mode != BImode
4803 && nonzero_bits (op0, cmp_mode) == 1
4804 && STORE_FLAG_VALUE == 1)
4805 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4806 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4807 : lowpart_subreg (mode, op0, cmp_mode);
4809 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4810 if ((code == EQ || code == NE)
4811 && op1 == const0_rtx
4812 && op0code == XOR)
4813 return simplify_gen_relational (code, mode, cmp_mode,
4814 XEXP (op0, 0), XEXP (op0, 1));
4816 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4817 if ((code == EQ || code == NE)
4818 && op0code == XOR
4819 && rtx_equal_p (XEXP (op0, 0), op1)
4820 && !side_effects_p (XEXP (op0, 0)))
4821 return simplify_gen_relational (code, mode, cmp_mode,
4822 XEXP (op0, 1), const0_rtx);
4824 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4825 if ((code == EQ || code == NE)
4826 && op0code == XOR
4827 && rtx_equal_p (XEXP (op0, 1), op1)
4828 && !side_effects_p (XEXP (op0, 1)))
4829 return simplify_gen_relational (code, mode, cmp_mode,
4830 XEXP (op0, 0), const0_rtx);
4832 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4833 if ((code == EQ || code == NE)
4834 && op0code == XOR
4835 && CONST_SCALAR_INT_P (op1)
4836 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4837 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4838 simplify_gen_binary (XOR, cmp_mode,
4839 XEXP (op0, 1), op1));
4841 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4842 if ((code == EQ || code == NE)
4843 && GET_CODE (op0) == BSWAP
4844 && CONST_SCALAR_INT_P (op1))
4845 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4846 simplify_gen_unary (BSWAP, cmp_mode,
4847 op1, cmp_mode));
4849 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4850 if ((code == EQ || code == NE)
4851 && GET_CODE (op0) == BSWAP
4852 && GET_CODE (op1) == BSWAP)
4853 return simplify_gen_relational (code, mode, cmp_mode,
4854 XEXP (op0, 0), XEXP (op1, 0));
4856 if (op0code == POPCOUNT && op1 == const0_rtx)
4857 switch (code)
4859 case EQ:
4860 case LE:
4861 case LEU:
4862 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4863 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4864 XEXP (op0, 0), const0_rtx);
4866 case NE:
4867 case GT:
4868 case GTU:
4869 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4870 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4871 XEXP (op0, 0), const0_rtx);
4873 default:
4874 break;
4877 return NULL_RTX;
4880 enum
4882 CMP_EQ = 1,
4883 CMP_LT = 2,
4884 CMP_GT = 4,
4885 CMP_LTU = 8,
4886 CMP_GTU = 16
4890 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4891 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4892 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4893 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4894 For floating-point comparisons, assume that the operands were ordered. */
4896 static rtx
4897 comparison_result (enum rtx_code code, int known_results)
4899 switch (code)
4901 case EQ:
4902 case UNEQ:
4903 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4904 case NE:
4905 case LTGT:
4906 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4908 case LT:
4909 case UNLT:
4910 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4911 case GE:
4912 case UNGE:
4913 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4915 case GT:
4916 case UNGT:
4917 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4918 case LE:
4919 case UNLE:
4920 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4922 case LTU:
4923 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4924 case GEU:
4925 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4927 case GTU:
4928 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4929 case LEU:
4930 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4932 case ORDERED:
4933 return const_true_rtx;
4934 case UNORDERED:
4935 return const0_rtx;
4936 default:
4937 gcc_unreachable ();
4941 /* Check if the given comparison (done in the given MODE) is actually a
4942 tautology or a contradiction.
4943 If no simplification is possible, this function returns zero.
4944 Otherwise, it returns either const_true_rtx or const0_rtx. */
4947 simplify_const_relational_operation (enum rtx_code code,
4948 enum machine_mode mode,
4949 rtx op0, rtx op1)
4951 rtx tem;
4952 rtx trueop0;
4953 rtx trueop1;
4955 gcc_assert (mode != VOIDmode
4956 || (GET_MODE (op0) == VOIDmode
4957 && GET_MODE (op1) == VOIDmode));
4959 /* If op0 is a compare, extract the comparison arguments from it. */
4960 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4962 op1 = XEXP (op0, 1);
4963 op0 = XEXP (op0, 0);
4965 if (GET_MODE (op0) != VOIDmode)
4966 mode = GET_MODE (op0);
4967 else if (GET_MODE (op1) != VOIDmode)
4968 mode = GET_MODE (op1);
4969 else
4970 return 0;
4973 /* We can't simplify MODE_CC values since we don't know what the
4974 actual comparison is. */
4975 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4976 return 0;
4978 /* Make sure the constant is second. */
4979 if (swap_commutative_operands_p (op0, op1))
4981 tem = op0, op0 = op1, op1 = tem;
4982 code = swap_condition (code);
4985 trueop0 = avoid_constant_pool_reference (op0);
4986 trueop1 = avoid_constant_pool_reference (op1);
4988 /* For integer comparisons of A and B maybe we can simplify A - B and can
4989 then simplify a comparison of that with zero. If A and B are both either
4990 a register or a CONST_INT, this can't help; testing for these cases will
4991 prevent infinite recursion here and speed things up.
4993 We can only do this for EQ and NE comparisons as otherwise we may
4994 lose or introduce overflow which we cannot disregard as undefined as
4995 we do not know the signedness of the operation on either the left or
4996 the right hand side of the comparison. */
4998 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4999 && (code == EQ || code == NE)
5000 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
5001 && (REG_P (op1) || CONST_INT_P (trueop1)))
5002 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
5003 /* We cannot do this if tem is a nonzero address. */
5004 && ! nonzero_address_p (tem))
5005 return simplify_const_relational_operation (signed_condition (code),
5006 mode, tem, const0_rtx);
5008 if (! HONOR_NANS (mode) && code == ORDERED)
5009 return const_true_rtx;
5011 if (! HONOR_NANS (mode) && code == UNORDERED)
5012 return const0_rtx;
5014 /* For modes without NaNs, if the two operands are equal, we know the
5015 result except if they have side-effects. Even with NaNs we know
5016 the result of unordered comparisons and, if signaling NaNs are
5017 irrelevant, also the result of LT/GT/LTGT. */
5018 if ((! HONOR_NANS (GET_MODE (trueop0))
5019 || code == UNEQ || code == UNLE || code == UNGE
5020 || ((code == LT || code == GT || code == LTGT)
5021 && ! HONOR_SNANS (GET_MODE (trueop0))))
5022 && rtx_equal_p (trueop0, trueop1)
5023 && ! side_effects_p (trueop0))
5024 return comparison_result (code, CMP_EQ);
5026 /* If the operands are floating-point constants, see if we can fold
5027 the result. */
5028 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
5029 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
5030 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
5032 REAL_VALUE_TYPE d0, d1;
5034 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
5035 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
5037 /* Comparisons are unordered iff at least one of the values is NaN. */
5038 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
5039 switch (code)
5041 case UNEQ:
5042 case UNLT:
5043 case UNGT:
5044 case UNLE:
5045 case UNGE:
5046 case NE:
5047 case UNORDERED:
5048 return const_true_rtx;
5049 case EQ:
5050 case LT:
5051 case GT:
5052 case LE:
5053 case GE:
5054 case LTGT:
5055 case ORDERED:
5056 return const0_rtx;
5057 default:
5058 return 0;
5061 return comparison_result (code,
5062 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
5063 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
5066 /* Otherwise, see if the operands are both integers. */
5067 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
5068 && (CONST_DOUBLE_AS_INT_P (trueop0) || CONST_INT_P (trueop0))
5069 && (CONST_DOUBLE_AS_INT_P (trueop1) || CONST_INT_P (trueop1)))
5071 int width = GET_MODE_PRECISION (mode);
5072 HOST_WIDE_INT l0s, h0s, l1s, h1s;
5073 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
5075 /* Get the two words comprising each integer constant. */
5076 if (CONST_DOUBLE_AS_INT_P (trueop0))
5078 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
5079 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
5081 else
5083 l0u = l0s = INTVAL (trueop0);
5084 h0u = h0s = HWI_SIGN_EXTEND (l0s);
5087 if (CONST_DOUBLE_AS_INT_P (trueop1))
5089 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
5090 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
5092 else
5094 l1u = l1s = INTVAL (trueop1);
5095 h1u = h1s = HWI_SIGN_EXTEND (l1s);
5098 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
5099 we have to sign or zero-extend the values. */
5100 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
5102 l0u &= GET_MODE_MASK (mode);
5103 l1u &= GET_MODE_MASK (mode);
5105 if (val_signbit_known_set_p (mode, l0s))
5106 l0s |= ~GET_MODE_MASK (mode);
5108 if (val_signbit_known_set_p (mode, l1s))
5109 l1s |= ~GET_MODE_MASK (mode);
5111 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
5112 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
5114 if (h0u == h1u && l0u == l1u)
5115 return comparison_result (code, CMP_EQ);
5116 else
5118 int cr;
5119 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
5120 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
5121 return comparison_result (code, cr);
5125 /* Optimize comparisons with upper and lower bounds. */
5126 if (HWI_COMPUTABLE_MODE_P (mode)
5127 && CONST_INT_P (trueop1))
5129 int sign;
5130 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
5131 HOST_WIDE_INT val = INTVAL (trueop1);
5132 HOST_WIDE_INT mmin, mmax;
5134 if (code == GEU
5135 || code == LEU
5136 || code == GTU
5137 || code == LTU)
5138 sign = 0;
5139 else
5140 sign = 1;
5142 /* Get a reduced range if the sign bit is zero. */
5143 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
5145 mmin = 0;
5146 mmax = nonzero;
5148 else
5150 rtx mmin_rtx, mmax_rtx;
5151 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
5153 mmin = INTVAL (mmin_rtx);
5154 mmax = INTVAL (mmax_rtx);
5155 if (sign)
5157 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
5159 mmin >>= (sign_copies - 1);
5160 mmax >>= (sign_copies - 1);
5164 switch (code)
5166 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5167 case GEU:
5168 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5169 return const_true_rtx;
5170 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5171 return const0_rtx;
5172 break;
5173 case GE:
5174 if (val <= mmin)
5175 return const_true_rtx;
5176 if (val > mmax)
5177 return const0_rtx;
5178 break;
5180 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5181 case LEU:
5182 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5183 return const_true_rtx;
5184 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5185 return const0_rtx;
5186 break;
5187 case LE:
5188 if (val >= mmax)
5189 return const_true_rtx;
5190 if (val < mmin)
5191 return const0_rtx;
5192 break;
5194 case EQ:
5195 /* x == y is always false for y out of range. */
5196 if (val < mmin || val > mmax)
5197 return const0_rtx;
5198 break;
5200 /* x > y is always false for y >= mmax, always true for y < mmin. */
5201 case GTU:
5202 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5203 return const0_rtx;
5204 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5205 return const_true_rtx;
5206 break;
5207 case GT:
5208 if (val >= mmax)
5209 return const0_rtx;
5210 if (val < mmin)
5211 return const_true_rtx;
5212 break;
5214 /* x < y is always false for y <= mmin, always true for y > mmax. */
5215 case LTU:
5216 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5217 return const0_rtx;
5218 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5219 return const_true_rtx;
5220 break;
5221 case LT:
5222 if (val <= mmin)
5223 return const0_rtx;
5224 if (val > mmax)
5225 return const_true_rtx;
5226 break;
5228 case NE:
5229 /* x != y is always true for y out of range. */
5230 if (val < mmin || val > mmax)
5231 return const_true_rtx;
5232 break;
5234 default:
5235 break;
5239 /* Optimize integer comparisons with zero. */
5240 if (trueop1 == const0_rtx)
5242 /* Some addresses are known to be nonzero. We don't know
5243 their sign, but equality comparisons are known. */
5244 if (nonzero_address_p (trueop0))
5246 if (code == EQ || code == LEU)
5247 return const0_rtx;
5248 if (code == NE || code == GTU)
5249 return const_true_rtx;
5252 /* See if the first operand is an IOR with a constant. If so, we
5253 may be able to determine the result of this comparison. */
5254 if (GET_CODE (op0) == IOR)
5256 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5257 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5259 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
5260 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5261 && (UINTVAL (inner_const)
5262 & ((unsigned HOST_WIDE_INT) 1
5263 << sign_bitnum)));
5265 switch (code)
5267 case EQ:
5268 case LEU:
5269 return const0_rtx;
5270 case NE:
5271 case GTU:
5272 return const_true_rtx;
5273 case LT:
5274 case LE:
5275 if (has_sign)
5276 return const_true_rtx;
5277 break;
5278 case GT:
5279 case GE:
5280 if (has_sign)
5281 return const0_rtx;
5282 break;
5283 default:
5284 break;
5290 /* Optimize comparison of ABS with zero. */
5291 if (trueop1 == CONST0_RTX (mode)
5292 && (GET_CODE (trueop0) == ABS
5293 || (GET_CODE (trueop0) == FLOAT_EXTEND
5294 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5296 switch (code)
5298 case LT:
5299 /* Optimize abs(x) < 0.0. */
5300 if (!HONOR_SNANS (mode)
5301 && (!INTEGRAL_MODE_P (mode)
5302 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5304 if (INTEGRAL_MODE_P (mode)
5305 && (issue_strict_overflow_warning
5306 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5307 warning (OPT_Wstrict_overflow,
5308 ("assuming signed overflow does not occur when "
5309 "assuming abs (x) < 0 is false"));
5310 return const0_rtx;
5312 break;
5314 case GE:
5315 /* Optimize abs(x) >= 0.0. */
5316 if (!HONOR_NANS (mode)
5317 && (!INTEGRAL_MODE_P (mode)
5318 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5320 if (INTEGRAL_MODE_P (mode)
5321 && (issue_strict_overflow_warning
5322 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5323 warning (OPT_Wstrict_overflow,
5324 ("assuming signed overflow does not occur when "
5325 "assuming abs (x) >= 0 is true"));
5326 return const_true_rtx;
5328 break;
5330 case UNGE:
5331 /* Optimize ! (abs(x) < 0.0). */
5332 return const_true_rtx;
5334 default:
5335 break;
5339 return 0;
5342 /* Simplify CODE, an operation with result mode MODE and three operands,
5343 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5344 a constant. Return 0 if no simplifications is possible. */
5347 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
5348 enum machine_mode op0_mode, rtx op0, rtx op1,
5349 rtx op2)
5351 unsigned int width = GET_MODE_PRECISION (mode);
5352 bool any_change = false;
5353 rtx tem, trueop2;
5355 /* VOIDmode means "infinite" precision. */
5356 if (width == 0)
5357 width = HOST_BITS_PER_WIDE_INT;
5359 switch (code)
5361 case FMA:
5362 /* Simplify negations around the multiplication. */
5363 /* -a * -b + c => a * b + c. */
5364 if (GET_CODE (op0) == NEG)
5366 tem = simplify_unary_operation (NEG, mode, op1, mode);
5367 if (tem)
5368 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5370 else if (GET_CODE (op1) == NEG)
5372 tem = simplify_unary_operation (NEG, mode, op0, mode);
5373 if (tem)
5374 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5377 /* Canonicalize the two multiplication operands. */
5378 /* a * -b + c => -b * a + c. */
5379 if (swap_commutative_operands_p (op0, op1))
5380 tem = op0, op0 = op1, op1 = tem, any_change = true;
5382 if (any_change)
5383 return gen_rtx_FMA (mode, op0, op1, op2);
5384 return NULL_RTX;
5386 case SIGN_EXTRACT:
5387 case ZERO_EXTRACT:
5388 if (CONST_INT_P (op0)
5389 && CONST_INT_P (op1)
5390 && CONST_INT_P (op2)
5391 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5392 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5394 /* Extracting a bit-field from a constant */
5395 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5396 HOST_WIDE_INT op1val = INTVAL (op1);
5397 HOST_WIDE_INT op2val = INTVAL (op2);
5398 if (BITS_BIG_ENDIAN)
5399 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5400 else
5401 val >>= op2val;
5403 if (HOST_BITS_PER_WIDE_INT != op1val)
5405 /* First zero-extend. */
5406 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
5407 /* If desired, propagate sign bit. */
5408 if (code == SIGN_EXTRACT
5409 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
5410 != 0)
5411 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5414 return gen_int_mode (val, mode);
5416 break;
5418 case IF_THEN_ELSE:
5419 if (CONST_INT_P (op0))
5420 return op0 != const0_rtx ? op1 : op2;
5422 /* Convert c ? a : a into "a". */
5423 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5424 return op1;
5426 /* Convert a != b ? a : b into "a". */
5427 if (GET_CODE (op0) == NE
5428 && ! side_effects_p (op0)
5429 && ! HONOR_NANS (mode)
5430 && ! HONOR_SIGNED_ZEROS (mode)
5431 && ((rtx_equal_p (XEXP (op0, 0), op1)
5432 && rtx_equal_p (XEXP (op0, 1), op2))
5433 || (rtx_equal_p (XEXP (op0, 0), op2)
5434 && rtx_equal_p (XEXP (op0, 1), op1))))
5435 return op1;
5437 /* Convert a == b ? a : b into "b". */
5438 if (GET_CODE (op0) == EQ
5439 && ! side_effects_p (op0)
5440 && ! HONOR_NANS (mode)
5441 && ! HONOR_SIGNED_ZEROS (mode)
5442 && ((rtx_equal_p (XEXP (op0, 0), op1)
5443 && rtx_equal_p (XEXP (op0, 1), op2))
5444 || (rtx_equal_p (XEXP (op0, 0), op2)
5445 && rtx_equal_p (XEXP (op0, 1), op1))))
5446 return op2;
5448 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5450 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5451 ? GET_MODE (XEXP (op0, 1))
5452 : GET_MODE (XEXP (op0, 0)));
5453 rtx temp;
5455 /* Look for happy constants in op1 and op2. */
5456 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5458 HOST_WIDE_INT t = INTVAL (op1);
5459 HOST_WIDE_INT f = INTVAL (op2);
5461 if (t == STORE_FLAG_VALUE && f == 0)
5462 code = GET_CODE (op0);
5463 else if (t == 0 && f == STORE_FLAG_VALUE)
5465 enum rtx_code tmp;
5466 tmp = reversed_comparison_code (op0, NULL_RTX);
5467 if (tmp == UNKNOWN)
5468 break;
5469 code = tmp;
5471 else
5472 break;
5474 return simplify_gen_relational (code, mode, cmp_mode,
5475 XEXP (op0, 0), XEXP (op0, 1));
5478 if (cmp_mode == VOIDmode)
5479 cmp_mode = op0_mode;
5480 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5481 cmp_mode, XEXP (op0, 0),
5482 XEXP (op0, 1));
5484 /* See if any simplifications were possible. */
5485 if (temp)
5487 if (CONST_INT_P (temp))
5488 return temp == const0_rtx ? op2 : op1;
5489 else if (temp)
5490 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5493 break;
5495 case VEC_MERGE:
5496 gcc_assert (GET_MODE (op0) == mode);
5497 gcc_assert (GET_MODE (op1) == mode);
5498 gcc_assert (VECTOR_MODE_P (mode));
5499 trueop2 = avoid_constant_pool_reference (op2);
5500 if (CONST_INT_P (trueop2))
5502 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5503 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5504 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5505 unsigned HOST_WIDE_INT mask;
5506 if (n_elts == HOST_BITS_PER_WIDE_INT)
5507 mask = -1;
5508 else
5509 mask = ((unsigned HOST_WIDE_INT) 1 << n_elts) - 1;
5511 if (!(sel & mask) && !side_effects_p (op0))
5512 return op1;
5513 if ((sel & mask) == mask && !side_effects_p (op1))
5514 return op0;
5516 rtx trueop0 = avoid_constant_pool_reference (op0);
5517 rtx trueop1 = avoid_constant_pool_reference (op1);
5518 if (GET_CODE (trueop0) == CONST_VECTOR
5519 && GET_CODE (trueop1) == CONST_VECTOR)
5521 rtvec v = rtvec_alloc (n_elts);
5522 unsigned int i;
5524 for (i = 0; i < n_elts; i++)
5525 RTVEC_ELT (v, i) = ((sel & ((unsigned HOST_WIDE_INT) 1 << i))
5526 ? CONST_VECTOR_ELT (trueop0, i)
5527 : CONST_VECTOR_ELT (trueop1, i));
5528 return gen_rtx_CONST_VECTOR (mode, v);
5531 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5532 if no element from a appears in the result. */
5533 if (GET_CODE (op0) == VEC_MERGE)
5535 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5536 if (CONST_INT_P (tem))
5538 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5539 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5540 return simplify_gen_ternary (code, mode, mode,
5541 XEXP (op0, 1), op1, op2);
5542 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5543 return simplify_gen_ternary (code, mode, mode,
5544 XEXP (op0, 0), op1, op2);
5547 if (GET_CODE (op1) == VEC_MERGE)
5549 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5550 if (CONST_INT_P (tem))
5552 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5553 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5554 return simplify_gen_ternary (code, mode, mode,
5555 op0, XEXP (op1, 1), op2);
5556 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5557 return simplify_gen_ternary (code, mode, mode,
5558 op0, XEXP (op1, 0), op2);
5563 if (rtx_equal_p (op0, op1)
5564 && !side_effects_p (op2) && !side_effects_p (op1))
5565 return op0;
5567 break;
5569 default:
5570 gcc_unreachable ();
5573 return 0;
5576 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
5577 or CONST_VECTOR,
5578 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5580 Works by unpacking OP into a collection of 8-bit values
5581 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5582 and then repacking them again for OUTERMODE. */
5584 static rtx
5585 simplify_immed_subreg (enum machine_mode outermode, rtx op,
5586 enum machine_mode innermode, unsigned int byte)
5588 /* We support up to 512-bit values (for V8DFmode). */
5589 enum {
5590 max_bitsize = 512,
5591 value_bit = 8,
5592 value_mask = (1 << value_bit) - 1
5594 unsigned char value[max_bitsize / value_bit];
5595 int value_start;
5596 int i;
5597 int elem;
5599 int num_elem;
5600 rtx * elems;
5601 int elem_bitsize;
5602 rtx result_s;
5603 rtvec result_v = NULL;
5604 enum mode_class outer_class;
5605 enum machine_mode outer_submode;
5607 /* Some ports misuse CCmode. */
5608 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5609 return op;
5611 /* We have no way to represent a complex constant at the rtl level. */
5612 if (COMPLEX_MODE_P (outermode))
5613 return NULL_RTX;
5615 /* Unpack the value. */
5617 if (GET_CODE (op) == CONST_VECTOR)
5619 num_elem = CONST_VECTOR_NUNITS (op);
5620 elems = &CONST_VECTOR_ELT (op, 0);
5621 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5623 else
5625 num_elem = 1;
5626 elems = &op;
5627 elem_bitsize = max_bitsize;
5629 /* If this asserts, it is too complicated; reducing value_bit may help. */
5630 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5631 /* I don't know how to handle endianness of sub-units. */
5632 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5634 for (elem = 0; elem < num_elem; elem++)
5636 unsigned char * vp;
5637 rtx el = elems[elem];
5639 /* Vectors are kept in target memory order. (This is probably
5640 a mistake.) */
5642 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5643 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5644 / BITS_PER_UNIT);
5645 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5646 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5647 unsigned bytele = (subword_byte % UNITS_PER_WORD
5648 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5649 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5652 switch (GET_CODE (el))
5654 case CONST_INT:
5655 for (i = 0;
5656 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5657 i += value_bit)
5658 *vp++ = INTVAL (el) >> i;
5659 /* CONST_INTs are always logically sign-extended. */
5660 for (; i < elem_bitsize; i += value_bit)
5661 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5662 break;
5664 case CONST_DOUBLE:
5665 if (GET_MODE (el) == VOIDmode)
5667 unsigned char extend = 0;
5668 /* If this triggers, someone should have generated a
5669 CONST_INT instead. */
5670 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5672 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5673 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5674 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5676 *vp++
5677 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5678 i += value_bit;
5681 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5682 extend = -1;
5683 for (; i < elem_bitsize; i += value_bit)
5684 *vp++ = extend;
5686 else
5688 long tmp[max_bitsize / 32];
5689 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5691 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5692 gcc_assert (bitsize <= elem_bitsize);
5693 gcc_assert (bitsize % value_bit == 0);
5695 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5696 GET_MODE (el));
5698 /* real_to_target produces its result in words affected by
5699 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5700 and use WORDS_BIG_ENDIAN instead; see the documentation
5701 of SUBREG in rtl.texi. */
5702 for (i = 0; i < bitsize; i += value_bit)
5704 int ibase;
5705 if (WORDS_BIG_ENDIAN)
5706 ibase = bitsize - 1 - i;
5707 else
5708 ibase = i;
5709 *vp++ = tmp[ibase / 32] >> i % 32;
5712 /* It shouldn't matter what's done here, so fill it with
5713 zero. */
5714 for (; i < elem_bitsize; i += value_bit)
5715 *vp++ = 0;
5717 break;
5719 case CONST_FIXED:
5720 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5722 for (i = 0; i < elem_bitsize; i += value_bit)
5723 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5725 else
5727 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5728 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5729 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5730 i += value_bit)
5731 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5732 >> (i - HOST_BITS_PER_WIDE_INT);
5733 for (; i < elem_bitsize; i += value_bit)
5734 *vp++ = 0;
5736 break;
5738 default:
5739 gcc_unreachable ();
5743 /* Now, pick the right byte to start with. */
5744 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5745 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5746 will already have offset 0. */
5747 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5749 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5750 - byte);
5751 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5752 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5753 byte = (subword_byte % UNITS_PER_WORD
5754 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5757 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5758 so if it's become negative it will instead be very large.) */
5759 gcc_assert (byte < GET_MODE_SIZE (innermode));
5761 /* Convert from bytes to chunks of size value_bit. */
5762 value_start = byte * (BITS_PER_UNIT / value_bit);
5764 /* Re-pack the value. */
5766 if (VECTOR_MODE_P (outermode))
5768 num_elem = GET_MODE_NUNITS (outermode);
5769 result_v = rtvec_alloc (num_elem);
5770 elems = &RTVEC_ELT (result_v, 0);
5771 outer_submode = GET_MODE_INNER (outermode);
5773 else
5775 num_elem = 1;
5776 elems = &result_s;
5777 outer_submode = outermode;
5780 outer_class = GET_MODE_CLASS (outer_submode);
5781 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5783 gcc_assert (elem_bitsize % value_bit == 0);
5784 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5786 for (elem = 0; elem < num_elem; elem++)
5788 unsigned char *vp;
5790 /* Vectors are stored in target memory order. (This is probably
5791 a mistake.) */
5793 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5794 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5795 / BITS_PER_UNIT);
5796 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5797 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5798 unsigned bytele = (subword_byte % UNITS_PER_WORD
5799 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5800 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5803 switch (outer_class)
5805 case MODE_INT:
5806 case MODE_PARTIAL_INT:
5808 unsigned HOST_WIDE_INT hi = 0, lo = 0;
5810 for (i = 0;
5811 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5812 i += value_bit)
5813 lo |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5814 for (; i < elem_bitsize; i += value_bit)
5815 hi |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5816 << (i - HOST_BITS_PER_WIDE_INT);
5818 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5819 know why. */
5820 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5821 elems[elem] = gen_int_mode (lo, outer_submode);
5822 else if (elem_bitsize <= HOST_BITS_PER_DOUBLE_INT)
5823 elems[elem] = immed_double_const (lo, hi, outer_submode);
5824 else
5825 return NULL_RTX;
5827 break;
5829 case MODE_FLOAT:
5830 case MODE_DECIMAL_FLOAT:
5832 REAL_VALUE_TYPE r;
5833 long tmp[max_bitsize / 32];
5835 /* real_from_target wants its input in words affected by
5836 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5837 and use WORDS_BIG_ENDIAN instead; see the documentation
5838 of SUBREG in rtl.texi. */
5839 for (i = 0; i < max_bitsize / 32; i++)
5840 tmp[i] = 0;
5841 for (i = 0; i < elem_bitsize; i += value_bit)
5843 int ibase;
5844 if (WORDS_BIG_ENDIAN)
5845 ibase = elem_bitsize - 1 - i;
5846 else
5847 ibase = i;
5848 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5851 real_from_target (&r, tmp, outer_submode);
5852 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5854 break;
5856 case MODE_FRACT:
5857 case MODE_UFRACT:
5858 case MODE_ACCUM:
5859 case MODE_UACCUM:
5861 FIXED_VALUE_TYPE f;
5862 f.data.low = 0;
5863 f.data.high = 0;
5864 f.mode = outer_submode;
5866 for (i = 0;
5867 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5868 i += value_bit)
5869 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5870 for (; i < elem_bitsize; i += value_bit)
5871 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5872 << (i - HOST_BITS_PER_WIDE_INT));
5874 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5876 break;
5878 default:
5879 gcc_unreachable ();
5882 if (VECTOR_MODE_P (outermode))
5883 return gen_rtx_CONST_VECTOR (outermode, result_v);
5884 else
5885 return result_s;
5888 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5889 Return 0 if no simplifications are possible. */
5891 simplify_subreg (enum machine_mode outermode, rtx op,
5892 enum machine_mode innermode, unsigned int byte)
5894 /* Little bit of sanity checking. */
5895 gcc_assert (innermode != VOIDmode);
5896 gcc_assert (outermode != VOIDmode);
5897 gcc_assert (innermode != BLKmode);
5898 gcc_assert (outermode != BLKmode);
5900 gcc_assert (GET_MODE (op) == innermode
5901 || GET_MODE (op) == VOIDmode);
5903 if ((byte % GET_MODE_SIZE (outermode)) != 0)
5904 return NULL_RTX;
5906 if (byte >= GET_MODE_SIZE (innermode))
5907 return NULL_RTX;
5909 if (outermode == innermode && !byte)
5910 return op;
5912 if (CONST_SCALAR_INT_P (op)
5913 || CONST_DOUBLE_AS_FLOAT_P (op)
5914 || GET_CODE (op) == CONST_FIXED
5915 || GET_CODE (op) == CONST_VECTOR)
5916 return simplify_immed_subreg (outermode, op, innermode, byte);
5918 /* Changing mode twice with SUBREG => just change it once,
5919 or not at all if changing back op starting mode. */
5920 if (GET_CODE (op) == SUBREG)
5922 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5923 int final_offset = byte + SUBREG_BYTE (op);
5924 rtx newx;
5926 if (outermode == innermostmode
5927 && byte == 0 && SUBREG_BYTE (op) == 0)
5928 return SUBREG_REG (op);
5930 /* The SUBREG_BYTE represents offset, as if the value were stored
5931 in memory. Irritating exception is paradoxical subreg, where
5932 we define SUBREG_BYTE to be 0. On big endian machines, this
5933 value should be negative. For a moment, undo this exception. */
5934 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5936 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5937 if (WORDS_BIG_ENDIAN)
5938 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5939 if (BYTES_BIG_ENDIAN)
5940 final_offset += difference % UNITS_PER_WORD;
5942 if (SUBREG_BYTE (op) == 0
5943 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5945 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5946 if (WORDS_BIG_ENDIAN)
5947 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5948 if (BYTES_BIG_ENDIAN)
5949 final_offset += difference % UNITS_PER_WORD;
5952 /* See whether resulting subreg will be paradoxical. */
5953 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5955 /* In nonparadoxical subregs we can't handle negative offsets. */
5956 if (final_offset < 0)
5957 return NULL_RTX;
5958 /* Bail out in case resulting subreg would be incorrect. */
5959 if (final_offset % GET_MODE_SIZE (outermode)
5960 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5961 return NULL_RTX;
5963 else
5965 int offset = 0;
5966 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5968 /* In paradoxical subreg, see if we are still looking on lower part.
5969 If so, our SUBREG_BYTE will be 0. */
5970 if (WORDS_BIG_ENDIAN)
5971 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5972 if (BYTES_BIG_ENDIAN)
5973 offset += difference % UNITS_PER_WORD;
5974 if (offset == final_offset)
5975 final_offset = 0;
5976 else
5977 return NULL_RTX;
5980 /* Recurse for further possible simplifications. */
5981 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5982 final_offset);
5983 if (newx)
5984 return newx;
5985 if (validate_subreg (outermode, innermostmode,
5986 SUBREG_REG (op), final_offset))
5988 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5989 if (SUBREG_PROMOTED_VAR_P (op)
5990 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5991 && GET_MODE_CLASS (outermode) == MODE_INT
5992 && IN_RANGE (GET_MODE_SIZE (outermode),
5993 GET_MODE_SIZE (innermode),
5994 GET_MODE_SIZE (innermostmode))
5995 && subreg_lowpart_p (newx))
5997 SUBREG_PROMOTED_VAR_P (newx) = 1;
5998 SUBREG_PROMOTED_UNSIGNED_SET
5999 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
6001 return newx;
6003 return NULL_RTX;
6006 /* SUBREG of a hard register => just change the register number
6007 and/or mode. If the hard register is not valid in that mode,
6008 suppress this simplification. If the hard register is the stack,
6009 frame, or argument pointer, leave this as a SUBREG. */
6011 if (REG_P (op) && HARD_REGISTER_P (op))
6013 unsigned int regno, final_regno;
6015 regno = REGNO (op);
6016 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
6017 if (HARD_REGISTER_NUM_P (final_regno))
6019 rtx x;
6020 int final_offset = byte;
6022 /* Adjust offset for paradoxical subregs. */
6023 if (byte == 0
6024 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
6026 int difference = (GET_MODE_SIZE (innermode)
6027 - GET_MODE_SIZE (outermode));
6028 if (WORDS_BIG_ENDIAN)
6029 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6030 if (BYTES_BIG_ENDIAN)
6031 final_offset += difference % UNITS_PER_WORD;
6034 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
6036 /* Propagate original regno. We don't have any way to specify
6037 the offset inside original regno, so do so only for lowpart.
6038 The information is used only by alias analysis that can not
6039 grog partial register anyway. */
6041 if (subreg_lowpart_offset (outermode, innermode) == byte)
6042 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
6043 return x;
6047 /* If we have a SUBREG of a register that we are replacing and we are
6048 replacing it with a MEM, make a new MEM and try replacing the
6049 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6050 or if we would be widening it. */
6052 if (MEM_P (op)
6053 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
6054 /* Allow splitting of volatile memory references in case we don't
6055 have instruction to move the whole thing. */
6056 && (! MEM_VOLATILE_P (op)
6057 || ! have_insn_for (SET, innermode))
6058 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
6059 return adjust_address_nv (op, outermode, byte);
6061 /* Handle complex values represented as CONCAT
6062 of real and imaginary part. */
6063 if (GET_CODE (op) == CONCAT)
6065 unsigned int part_size, final_offset;
6066 rtx part, res;
6068 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
6069 if (byte < part_size)
6071 part = XEXP (op, 0);
6072 final_offset = byte;
6074 else
6076 part = XEXP (op, 1);
6077 final_offset = byte - part_size;
6080 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
6081 return NULL_RTX;
6083 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
6084 if (res)
6085 return res;
6086 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
6087 return gen_rtx_SUBREG (outermode, part, final_offset);
6088 return NULL_RTX;
6091 /* A SUBREG resulting from a zero extension may fold to zero if
6092 it extracts higher bits that the ZERO_EXTEND's source bits. */
6093 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
6095 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
6096 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
6097 return CONST0_RTX (outermode);
6100 if (SCALAR_INT_MODE_P (outermode)
6101 && SCALAR_INT_MODE_P (innermode)
6102 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
6103 && byte == subreg_lowpart_offset (outermode, innermode))
6105 rtx tem = simplify_truncation (outermode, op, innermode);
6106 if (tem)
6107 return tem;
6110 return NULL_RTX;
6113 /* Make a SUBREG operation or equivalent if it folds. */
6116 simplify_gen_subreg (enum machine_mode outermode, rtx op,
6117 enum machine_mode innermode, unsigned int byte)
6119 rtx newx;
6121 newx = simplify_subreg (outermode, op, innermode, byte);
6122 if (newx)
6123 return newx;
6125 if (GET_CODE (op) == SUBREG
6126 || GET_CODE (op) == CONCAT
6127 || GET_MODE (op) == VOIDmode)
6128 return NULL_RTX;
6130 if (validate_subreg (outermode, innermode, op, byte))
6131 return gen_rtx_SUBREG (outermode, op, byte);
6133 return NULL_RTX;
6136 /* Simplify X, an rtx expression.
6138 Return the simplified expression or NULL if no simplifications
6139 were possible.
6141 This is the preferred entry point into the simplification routines;
6142 however, we still allow passes to call the more specific routines.
6144 Right now GCC has three (yes, three) major bodies of RTL simplification
6145 code that need to be unified.
6147 1. fold_rtx in cse.c. This code uses various CSE specific
6148 information to aid in RTL simplification.
6150 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6151 it uses combine specific information to aid in RTL
6152 simplification.
6154 3. The routines in this file.
6157 Long term we want to only have one body of simplification code; to
6158 get to that state I recommend the following steps:
6160 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6161 which are not pass dependent state into these routines.
6163 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6164 use this routine whenever possible.
6166 3. Allow for pass dependent state to be provided to these
6167 routines and add simplifications based on the pass dependent
6168 state. Remove code from cse.c & combine.c that becomes
6169 redundant/dead.
6171 It will take time, but ultimately the compiler will be easier to
6172 maintain and improve. It's totally silly that when we add a
6173 simplification that it needs to be added to 4 places (3 for RTL
6174 simplification and 1 for tree simplification. */
6177 simplify_rtx (const_rtx x)
6179 const enum rtx_code code = GET_CODE (x);
6180 const enum machine_mode mode = GET_MODE (x);
6182 switch (GET_RTX_CLASS (code))
6184 case RTX_UNARY:
6185 return simplify_unary_operation (code, mode,
6186 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6187 case RTX_COMM_ARITH:
6188 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6189 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6191 /* Fall through.... */
6193 case RTX_BIN_ARITH:
6194 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6196 case RTX_TERNARY:
6197 case RTX_BITFIELD_OPS:
6198 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6199 XEXP (x, 0), XEXP (x, 1),
6200 XEXP (x, 2));
6202 case RTX_COMPARE:
6203 case RTX_COMM_COMPARE:
6204 return simplify_relational_operation (code, mode,
6205 ((GET_MODE (XEXP (x, 0))
6206 != VOIDmode)
6207 ? GET_MODE (XEXP (x, 0))
6208 : GET_MODE (XEXP (x, 1))),
6209 XEXP (x, 0),
6210 XEXP (x, 1));
6212 case RTX_EXTRA:
6213 if (code == SUBREG)
6214 return simplify_subreg (mode, SUBREG_REG (x),
6215 GET_MODE (SUBREG_REG (x)),
6216 SUBREG_BYTE (x));
6217 break;
6219 case RTX_OBJ:
6220 if (code == LO_SUM)
6222 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6223 if (GET_CODE (XEXP (x, 0)) == HIGH
6224 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6225 return XEXP (x, 1);
6227 break;
6229 default:
6230 break;
6232 return NULL;