gcc/ChangeLog
[official-gcc.git] / gcc / simplify-rtx.c
blob5c1dd25d8a4380596da8df3b8e550d0a47aa5cb9
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2013 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "tm_p.h"
28 #include "regs.h"
29 #include "hard-reg-set.h"
30 #include "flags.h"
31 #include "insn-config.h"
32 #include "recog.h"
33 #include "function.h"
34 #include "expr.h"
35 #include "diagnostic-core.h"
36 #include "ggc.h"
37 #include "target.h"
39 /* Simplification and canonicalization of RTL. */
41 /* Much code operates on (low, high) pairs; the low value is an
42 unsigned wide int, the high value a signed wide int. We
43 occasionally need to sign extend from low to high as if low were a
44 signed wide int. */
45 #define HWI_SIGN_EXTEND(low) \
46 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
48 static rtx neg_const_int (enum machine_mode, const_rtx);
49 static bool plus_minus_operand_p (const_rtx);
50 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
51 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
52 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
53 unsigned int);
54 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
55 rtx, rtx);
56 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
57 enum machine_mode, rtx, rtx);
58 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
59 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
60 rtx, rtx, rtx, rtx);
62 /* Negate a CONST_INT rtx, truncating (because a conversion from a
63 maximally negative number can overflow). */
64 static rtx
65 neg_const_int (enum machine_mode mode, const_rtx i)
67 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
70 /* Test whether expression, X, is an immediate constant that represents
71 the most significant bit of machine mode MODE. */
73 bool
74 mode_signbit_p (enum machine_mode mode, const_rtx x)
76 unsigned HOST_WIDE_INT val;
77 unsigned int width;
79 if (GET_MODE_CLASS (mode) != MODE_INT)
80 return false;
82 width = GET_MODE_PRECISION (mode);
83 if (width == 0)
84 return false;
86 if (width <= HOST_BITS_PER_WIDE_INT
87 && CONST_INT_P (x))
88 val = INTVAL (x);
89 else if (width <= HOST_BITS_PER_DOUBLE_INT
90 && CONST_DOUBLE_AS_INT_P (x)
91 && CONST_DOUBLE_LOW (x) == 0)
93 val = CONST_DOUBLE_HIGH (x);
94 width -= HOST_BITS_PER_WIDE_INT;
96 else
97 /* FIXME: We don't yet have a representation for wider modes. */
98 return false;
100 if (width < HOST_BITS_PER_WIDE_INT)
101 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
102 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
105 /* Test whether VAL is equal to the most significant bit of mode MODE
106 (after masking with the mode mask of MODE). Returns false if the
107 precision of MODE is too large to handle. */
109 bool
110 val_signbit_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
112 unsigned int width;
114 if (GET_MODE_CLASS (mode) != MODE_INT)
115 return false;
117 width = GET_MODE_PRECISION (mode);
118 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
119 return false;
121 val &= GET_MODE_MASK (mode);
122 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
125 /* Test whether the most significant bit of mode MODE is set in VAL.
126 Returns false if the precision of MODE is too large to handle. */
127 bool
128 val_signbit_known_set_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
130 unsigned int width;
132 if (GET_MODE_CLASS (mode) != MODE_INT)
133 return false;
135 width = GET_MODE_PRECISION (mode);
136 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
137 return false;
139 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
140 return val != 0;
143 /* Test whether the most significant bit of mode MODE is clear in VAL.
144 Returns false if the precision of MODE is too large to handle. */
145 bool
146 val_signbit_known_clear_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
148 unsigned int width;
150 if (GET_MODE_CLASS (mode) != MODE_INT)
151 return false;
153 width = GET_MODE_PRECISION (mode);
154 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
155 return false;
157 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
158 return val == 0;
161 /* Make a binary operation by properly ordering the operands and
162 seeing if the expression folds. */
165 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
166 rtx op1)
168 rtx tem;
170 /* If this simplifies, do it. */
171 tem = simplify_binary_operation (code, mode, op0, op1);
172 if (tem)
173 return tem;
175 /* Put complex operands first and constants second if commutative. */
176 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
177 && swap_commutative_operands_p (op0, op1))
178 tem = op0, op0 = op1, op1 = tem;
180 return gen_rtx_fmt_ee (code, mode, op0, op1);
183 /* If X is a MEM referencing the constant pool, return the real value.
184 Otherwise return X. */
186 avoid_constant_pool_reference (rtx x)
188 rtx c, tmp, addr;
189 enum machine_mode cmode;
190 HOST_WIDE_INT offset = 0;
192 switch (GET_CODE (x))
194 case MEM:
195 break;
197 case FLOAT_EXTEND:
198 /* Handle float extensions of constant pool references. */
199 tmp = XEXP (x, 0);
200 c = avoid_constant_pool_reference (tmp);
201 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
203 REAL_VALUE_TYPE d;
205 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
206 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
208 return x;
210 default:
211 return x;
214 if (GET_MODE (x) == BLKmode)
215 return x;
217 addr = XEXP (x, 0);
219 /* Call target hook to avoid the effects of -fpic etc.... */
220 addr = targetm.delegitimize_address (addr);
222 /* Split the address into a base and integer offset. */
223 if (GET_CODE (addr) == CONST
224 && GET_CODE (XEXP (addr, 0)) == PLUS
225 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
227 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
228 addr = XEXP (XEXP (addr, 0), 0);
231 if (GET_CODE (addr) == LO_SUM)
232 addr = XEXP (addr, 1);
234 /* If this is a constant pool reference, we can turn it into its
235 constant and hope that simplifications happen. */
236 if (GET_CODE (addr) == SYMBOL_REF
237 && CONSTANT_POOL_ADDRESS_P (addr))
239 c = get_pool_constant (addr);
240 cmode = get_pool_mode (addr);
242 /* If we're accessing the constant in a different mode than it was
243 originally stored, attempt to fix that up via subreg simplifications.
244 If that fails we have no choice but to return the original memory. */
245 if ((offset != 0 || cmode != GET_MODE (x))
246 && offset >= 0 && offset < GET_MODE_SIZE (cmode))
248 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
249 if (tem && CONSTANT_P (tem))
250 return tem;
252 else
253 return c;
256 return x;
259 /* Simplify a MEM based on its attributes. This is the default
260 delegitimize_address target hook, and it's recommended that every
261 overrider call it. */
264 delegitimize_mem_from_attrs (rtx x)
266 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
267 use their base addresses as equivalent. */
268 if (MEM_P (x)
269 && MEM_EXPR (x)
270 && MEM_OFFSET_KNOWN_P (x))
272 tree decl = MEM_EXPR (x);
273 enum machine_mode mode = GET_MODE (x);
274 HOST_WIDE_INT offset = 0;
276 switch (TREE_CODE (decl))
278 default:
279 decl = NULL;
280 break;
282 case VAR_DECL:
283 break;
285 case ARRAY_REF:
286 case ARRAY_RANGE_REF:
287 case COMPONENT_REF:
288 case BIT_FIELD_REF:
289 case REALPART_EXPR:
290 case IMAGPART_EXPR:
291 case VIEW_CONVERT_EXPR:
293 HOST_WIDE_INT bitsize, bitpos;
294 tree toffset;
295 int unsignedp, volatilep = 0;
297 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
298 &mode, &unsignedp, &volatilep, false);
299 if (bitsize != GET_MODE_BITSIZE (mode)
300 || (bitpos % BITS_PER_UNIT)
301 || (toffset && !host_integerp (toffset, 0)))
302 decl = NULL;
303 else
305 offset += bitpos / BITS_PER_UNIT;
306 if (toffset)
307 offset += TREE_INT_CST_LOW (toffset);
309 break;
313 if (decl
314 && mode == GET_MODE (x)
315 && TREE_CODE (decl) == VAR_DECL
316 && (TREE_STATIC (decl)
317 || DECL_THREAD_LOCAL_P (decl))
318 && DECL_RTL_SET_P (decl)
319 && MEM_P (DECL_RTL (decl)))
321 rtx newx;
323 offset += MEM_OFFSET (x);
325 newx = DECL_RTL (decl);
327 if (MEM_P (newx))
329 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
331 /* Avoid creating a new MEM needlessly if we already had
332 the same address. We do if there's no OFFSET and the
333 old address X is identical to NEWX, or if X is of the
334 form (plus NEWX OFFSET), or the NEWX is of the form
335 (plus Y (const_int Z)) and X is that with the offset
336 added: (plus Y (const_int Z+OFFSET)). */
337 if (!((offset == 0
338 || (GET_CODE (o) == PLUS
339 && GET_CODE (XEXP (o, 1)) == CONST_INT
340 && (offset == INTVAL (XEXP (o, 1))
341 || (GET_CODE (n) == PLUS
342 && GET_CODE (XEXP (n, 1)) == CONST_INT
343 && (INTVAL (XEXP (n, 1)) + offset
344 == INTVAL (XEXP (o, 1)))
345 && (n = XEXP (n, 0))))
346 && (o = XEXP (o, 0))))
347 && rtx_equal_p (o, n)))
348 x = adjust_address_nv (newx, mode, offset);
350 else if (GET_MODE (x) == GET_MODE (newx)
351 && offset == 0)
352 x = newx;
356 return x;
359 /* Make a unary operation by first seeing if it folds and otherwise making
360 the specified operation. */
363 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
364 enum machine_mode op_mode)
366 rtx tem;
368 /* If this simplifies, use it. */
369 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
370 return tem;
372 return gen_rtx_fmt_e (code, mode, op);
375 /* Likewise for ternary operations. */
378 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
379 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
381 rtx tem;
383 /* If this simplifies, use it. */
384 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
385 op0, op1, op2)))
386 return tem;
388 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
391 /* Likewise, for relational operations.
392 CMP_MODE specifies mode comparison is done in. */
395 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
396 enum machine_mode cmp_mode, rtx op0, rtx op1)
398 rtx tem;
400 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
401 op0, op1)))
402 return tem;
404 return gen_rtx_fmt_ee (code, mode, op0, op1);
407 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
408 and simplify the result. If FN is non-NULL, call this callback on each
409 X, if it returns non-NULL, replace X with its return value and simplify the
410 result. */
413 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
414 rtx (*fn) (rtx, const_rtx, void *), void *data)
416 enum rtx_code code = GET_CODE (x);
417 enum machine_mode mode = GET_MODE (x);
418 enum machine_mode op_mode;
419 const char *fmt;
420 rtx op0, op1, op2, newx, op;
421 rtvec vec, newvec;
422 int i, j;
424 if (__builtin_expect (fn != NULL, 0))
426 newx = fn (x, old_rtx, data);
427 if (newx)
428 return newx;
430 else if (rtx_equal_p (x, old_rtx))
431 return copy_rtx ((rtx) data);
433 switch (GET_RTX_CLASS (code))
435 case RTX_UNARY:
436 op0 = XEXP (x, 0);
437 op_mode = GET_MODE (op0);
438 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
439 if (op0 == XEXP (x, 0))
440 return x;
441 return simplify_gen_unary (code, mode, op0, op_mode);
443 case RTX_BIN_ARITH:
444 case RTX_COMM_ARITH:
445 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
446 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
447 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
448 return x;
449 return simplify_gen_binary (code, mode, op0, op1);
451 case RTX_COMPARE:
452 case RTX_COMM_COMPARE:
453 op0 = XEXP (x, 0);
454 op1 = XEXP (x, 1);
455 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
456 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
457 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
458 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
459 return x;
460 return simplify_gen_relational (code, mode, op_mode, op0, op1);
462 case RTX_TERNARY:
463 case RTX_BITFIELD_OPS:
464 op0 = XEXP (x, 0);
465 op_mode = GET_MODE (op0);
466 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
467 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
468 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
469 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
470 return x;
471 if (op_mode == VOIDmode)
472 op_mode = GET_MODE (op0);
473 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
475 case RTX_EXTRA:
476 if (code == SUBREG)
478 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
479 if (op0 == SUBREG_REG (x))
480 return x;
481 op0 = simplify_gen_subreg (GET_MODE (x), op0,
482 GET_MODE (SUBREG_REG (x)),
483 SUBREG_BYTE (x));
484 return op0 ? op0 : x;
486 break;
488 case RTX_OBJ:
489 if (code == MEM)
491 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
492 if (op0 == XEXP (x, 0))
493 return x;
494 return replace_equiv_address_nv (x, op0);
496 else if (code == LO_SUM)
498 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
499 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
501 /* (lo_sum (high x) x) -> x */
502 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
503 return op1;
505 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
506 return x;
507 return gen_rtx_LO_SUM (mode, op0, op1);
509 break;
511 default:
512 break;
515 newx = x;
516 fmt = GET_RTX_FORMAT (code);
517 for (i = 0; fmt[i]; i++)
518 switch (fmt[i])
520 case 'E':
521 vec = XVEC (x, i);
522 newvec = XVEC (newx, i);
523 for (j = 0; j < GET_NUM_ELEM (vec); j++)
525 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
526 old_rtx, fn, data);
527 if (op != RTVEC_ELT (vec, j))
529 if (newvec == vec)
531 newvec = shallow_copy_rtvec (vec);
532 if (x == newx)
533 newx = shallow_copy_rtx (x);
534 XVEC (newx, i) = newvec;
536 RTVEC_ELT (newvec, j) = op;
539 break;
541 case 'e':
542 if (XEXP (x, i))
544 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
545 if (op != XEXP (x, i))
547 if (x == newx)
548 newx = shallow_copy_rtx (x);
549 XEXP (newx, i) = op;
552 break;
554 return newx;
557 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
558 resulting RTX. Return a new RTX which is as simplified as possible. */
561 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
563 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
566 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
567 Only handle cases where the truncated value is inherently an rvalue.
569 RTL provides two ways of truncating a value:
571 1. a lowpart subreg. This form is only a truncation when both
572 the outer and inner modes (here MODE and OP_MODE respectively)
573 are scalar integers, and only then when the subreg is used as
574 an rvalue.
576 It is only valid to form such truncating subregs if the
577 truncation requires no action by the target. The onus for
578 proving this is on the creator of the subreg -- e.g. the
579 caller to simplify_subreg or simplify_gen_subreg -- and typically
580 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
582 2. a TRUNCATE. This form handles both scalar and compound integers.
584 The first form is preferred where valid. However, the TRUNCATE
585 handling in simplify_unary_operation turns the second form into the
586 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
587 so it is generally safe to form rvalue truncations using:
589 simplify_gen_unary (TRUNCATE, ...)
591 and leave simplify_unary_operation to work out which representation
592 should be used.
594 Because of the proof requirements on (1), simplify_truncation must
595 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
596 regardless of whether the outer truncation came from a SUBREG or a
597 TRUNCATE. For example, if the caller has proven that an SImode
598 truncation of:
600 (and:DI X Y)
602 is a no-op and can be represented as a subreg, it does not follow
603 that SImode truncations of X and Y are also no-ops. On a target
604 like 64-bit MIPS that requires SImode values to be stored in
605 sign-extended form, an SImode truncation of:
607 (and:DI (reg:DI X) (const_int 63))
609 is trivially a no-op because only the lower 6 bits can be set.
610 However, X is still an arbitrary 64-bit number and so we cannot
611 assume that truncating it too is a no-op. */
613 static rtx
614 simplify_truncation (enum machine_mode mode, rtx op,
615 enum machine_mode op_mode)
617 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
618 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
619 gcc_assert (precision <= op_precision);
621 /* Optimize truncations of zero and sign extended values. */
622 if (GET_CODE (op) == ZERO_EXTEND
623 || GET_CODE (op) == SIGN_EXTEND)
625 /* There are three possibilities. If MODE is the same as the
626 origmode, we can omit both the extension and the subreg.
627 If MODE is not larger than the origmode, we can apply the
628 truncation without the extension. Finally, if the outermode
629 is larger than the origmode, we can just extend to the appropriate
630 mode. */
631 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
632 if (mode == origmode)
633 return XEXP (op, 0);
634 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
635 return simplify_gen_unary (TRUNCATE, mode,
636 XEXP (op, 0), origmode);
637 else
638 return simplify_gen_unary (GET_CODE (op), mode,
639 XEXP (op, 0), origmode);
642 /* Simplify (truncate:SI (op:DI (x:DI) (y:DI)))
643 to (op:SI (truncate:SI (x:DI)) (truncate:SI (x:DI))). */
644 if (GET_CODE (op) == PLUS
645 || GET_CODE (op) == MINUS
646 || GET_CODE (op) == MULT)
648 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
649 if (op0)
651 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
652 if (op1)
653 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
657 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
658 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
659 the outer subreg is effectively a truncation to the original mode. */
660 if ((GET_CODE (op) == LSHIFTRT
661 || GET_CODE (op) == ASHIFTRT)
662 /* Ensure that OP_MODE is at least twice as wide as MODE
663 to avoid the possibility that an outer LSHIFTRT shifts by more
664 than the sign extension's sign_bit_copies and introduces zeros
665 into the high bits of the result. */
666 && 2 * precision <= op_precision
667 && CONST_INT_P (XEXP (op, 1))
668 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
669 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
670 && UINTVAL (XEXP (op, 1)) < precision)
671 return simplify_gen_binary (ASHIFTRT, mode,
672 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
674 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
675 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
676 the outer subreg is effectively a truncation to the original mode. */
677 if ((GET_CODE (op) == LSHIFTRT
678 || GET_CODE (op) == ASHIFTRT)
679 && CONST_INT_P (XEXP (op, 1))
680 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
681 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
682 && UINTVAL (XEXP (op, 1)) < precision)
683 return simplify_gen_binary (LSHIFTRT, mode,
684 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
686 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
687 to (ashift:QI (x:QI) C), where C is a suitable small constant and
688 the outer subreg is effectively a truncation to the original mode. */
689 if (GET_CODE (op) == ASHIFT
690 && CONST_INT_P (XEXP (op, 1))
691 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
692 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
693 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
694 && UINTVAL (XEXP (op, 1)) < precision)
695 return simplify_gen_binary (ASHIFT, mode,
696 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
698 /* Recognize a word extraction from a multi-word subreg. */
699 if ((GET_CODE (op) == LSHIFTRT
700 || GET_CODE (op) == ASHIFTRT)
701 && SCALAR_INT_MODE_P (mode)
702 && SCALAR_INT_MODE_P (op_mode)
703 && precision >= BITS_PER_WORD
704 && 2 * precision <= op_precision
705 && CONST_INT_P (XEXP (op, 1))
706 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
707 && UINTVAL (XEXP (op, 1)) < op_precision)
709 int byte = subreg_lowpart_offset (mode, op_mode);
710 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
711 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
712 (WORDS_BIG_ENDIAN
713 ? byte - shifted_bytes
714 : byte + shifted_bytes));
717 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
718 and try replacing the TRUNCATE and shift with it. Don't do this
719 if the MEM has a mode-dependent address. */
720 if ((GET_CODE (op) == LSHIFTRT
721 || GET_CODE (op) == ASHIFTRT)
722 && SCALAR_INT_MODE_P (op_mode)
723 && MEM_P (XEXP (op, 0))
724 && CONST_INT_P (XEXP (op, 1))
725 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
726 && INTVAL (XEXP (op, 1)) > 0
727 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
728 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
729 MEM_ADDR_SPACE (XEXP (op, 0)))
730 && ! MEM_VOLATILE_P (XEXP (op, 0))
731 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
732 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
734 int byte = subreg_lowpart_offset (mode, op_mode);
735 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
736 return adjust_address_nv (XEXP (op, 0), mode,
737 (WORDS_BIG_ENDIAN
738 ? byte - shifted_bytes
739 : byte + shifted_bytes));
742 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
743 (OP:SI foo:SI) if OP is NEG or ABS. */
744 if ((GET_CODE (op) == ABS
745 || GET_CODE (op) == NEG)
746 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
747 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
748 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
749 return simplify_gen_unary (GET_CODE (op), mode,
750 XEXP (XEXP (op, 0), 0), mode);
752 /* (truncate:A (subreg:B (truncate:C X) 0)) is
753 (truncate:A X). */
754 if (GET_CODE (op) == SUBREG
755 && SCALAR_INT_MODE_P (mode)
756 && SCALAR_INT_MODE_P (op_mode)
757 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
758 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
759 && subreg_lowpart_p (op))
761 rtx inner = XEXP (SUBREG_REG (op), 0);
762 if (GET_MODE_PRECISION (mode)
763 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
764 return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
765 else
766 /* If subreg above is paradoxical and C is narrower
767 than A, return (subreg:A (truncate:C X) 0). */
768 return simplify_gen_subreg (mode, SUBREG_REG (op),
769 GET_MODE (SUBREG_REG (op)), 0);
772 /* (truncate:A (truncate:B X)) is (truncate:A X). */
773 if (GET_CODE (op) == TRUNCATE)
774 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
775 GET_MODE (XEXP (op, 0)));
777 return NULL_RTX;
780 /* Try to simplify a unary operation CODE whose output mode is to be
781 MODE with input operand OP whose mode was originally OP_MODE.
782 Return zero if no simplification can be made. */
784 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
785 rtx op, enum machine_mode op_mode)
787 rtx trueop, tem;
789 trueop = avoid_constant_pool_reference (op);
791 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
792 if (tem)
793 return tem;
795 return simplify_unary_operation_1 (code, mode, op);
798 /* Perform some simplifications we can do even if the operands
799 aren't constant. */
800 static rtx
801 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
803 enum rtx_code reversed;
804 rtx temp;
806 switch (code)
808 case NOT:
809 /* (not (not X)) == X. */
810 if (GET_CODE (op) == NOT)
811 return XEXP (op, 0);
813 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
814 comparison is all ones. */
815 if (COMPARISON_P (op)
816 && (mode == BImode || STORE_FLAG_VALUE == -1)
817 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
818 return simplify_gen_relational (reversed, mode, VOIDmode,
819 XEXP (op, 0), XEXP (op, 1));
821 /* (not (plus X -1)) can become (neg X). */
822 if (GET_CODE (op) == PLUS
823 && XEXP (op, 1) == constm1_rtx)
824 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
826 /* Similarly, (not (neg X)) is (plus X -1). */
827 if (GET_CODE (op) == NEG)
828 return plus_constant (mode, XEXP (op, 0), -1);
830 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
831 if (GET_CODE (op) == XOR
832 && CONST_INT_P (XEXP (op, 1))
833 && (temp = simplify_unary_operation (NOT, mode,
834 XEXP (op, 1), mode)) != 0)
835 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
837 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
838 if (GET_CODE (op) == PLUS
839 && CONST_INT_P (XEXP (op, 1))
840 && mode_signbit_p (mode, XEXP (op, 1))
841 && (temp = simplify_unary_operation (NOT, mode,
842 XEXP (op, 1), mode)) != 0)
843 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
846 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
847 operands other than 1, but that is not valid. We could do a
848 similar simplification for (not (lshiftrt C X)) where C is
849 just the sign bit, but this doesn't seem common enough to
850 bother with. */
851 if (GET_CODE (op) == ASHIFT
852 && XEXP (op, 0) == const1_rtx)
854 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
855 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
858 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
859 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
860 so we can perform the above simplification. */
861 if (STORE_FLAG_VALUE == -1
862 && GET_CODE (op) == ASHIFTRT
863 && GET_CODE (XEXP (op, 1))
864 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
865 return simplify_gen_relational (GE, mode, VOIDmode,
866 XEXP (op, 0), const0_rtx);
869 if (GET_CODE (op) == SUBREG
870 && subreg_lowpart_p (op)
871 && (GET_MODE_SIZE (GET_MODE (op))
872 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
873 && GET_CODE (SUBREG_REG (op)) == ASHIFT
874 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
876 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
877 rtx x;
879 x = gen_rtx_ROTATE (inner_mode,
880 simplify_gen_unary (NOT, inner_mode, const1_rtx,
881 inner_mode),
882 XEXP (SUBREG_REG (op), 1));
883 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
884 if (temp)
885 return temp;
888 /* Apply De Morgan's laws to reduce number of patterns for machines
889 with negating logical insns (and-not, nand, etc.). If result has
890 only one NOT, put it first, since that is how the patterns are
891 coded. */
892 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
894 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
895 enum machine_mode op_mode;
897 op_mode = GET_MODE (in1);
898 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
900 op_mode = GET_MODE (in2);
901 if (op_mode == VOIDmode)
902 op_mode = mode;
903 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
905 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
907 rtx tem = in2;
908 in2 = in1; in1 = tem;
911 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
912 mode, in1, in2);
915 /* (not (bswap x)) -> (bswap (not x)). */
916 if (GET_CODE (op) == BSWAP)
918 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
919 return simplify_gen_unary (BSWAP, mode, x, mode);
921 break;
923 case NEG:
924 /* (neg (neg X)) == X. */
925 if (GET_CODE (op) == NEG)
926 return XEXP (op, 0);
928 /* (neg (plus X 1)) can become (not X). */
929 if (GET_CODE (op) == PLUS
930 && XEXP (op, 1) == const1_rtx)
931 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
933 /* Similarly, (neg (not X)) is (plus X 1). */
934 if (GET_CODE (op) == NOT)
935 return plus_constant (mode, XEXP (op, 0), 1);
937 /* (neg (minus X Y)) can become (minus Y X). This transformation
938 isn't safe for modes with signed zeros, since if X and Y are
939 both +0, (minus Y X) is the same as (minus X Y). If the
940 rounding mode is towards +infinity (or -infinity) then the two
941 expressions will be rounded differently. */
942 if (GET_CODE (op) == MINUS
943 && !HONOR_SIGNED_ZEROS (mode)
944 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
945 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
947 if (GET_CODE (op) == PLUS
948 && !HONOR_SIGNED_ZEROS (mode)
949 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
951 /* (neg (plus A C)) is simplified to (minus -C A). */
952 if (CONST_SCALAR_INT_P (XEXP (op, 1))
953 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
955 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
956 if (temp)
957 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
960 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
961 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
962 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
965 /* (neg (mult A B)) becomes (mult A (neg B)).
966 This works even for floating-point values. */
967 if (GET_CODE (op) == MULT
968 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
970 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
971 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
974 /* NEG commutes with ASHIFT since it is multiplication. Only do
975 this if we can then eliminate the NEG (e.g., if the operand
976 is a constant). */
977 if (GET_CODE (op) == ASHIFT)
979 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
980 if (temp)
981 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
984 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
985 C is equal to the width of MODE minus 1. */
986 if (GET_CODE (op) == ASHIFTRT
987 && CONST_INT_P (XEXP (op, 1))
988 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
989 return simplify_gen_binary (LSHIFTRT, mode,
990 XEXP (op, 0), XEXP (op, 1));
992 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
993 C is equal to the width of MODE minus 1. */
994 if (GET_CODE (op) == LSHIFTRT
995 && CONST_INT_P (XEXP (op, 1))
996 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
997 return simplify_gen_binary (ASHIFTRT, mode,
998 XEXP (op, 0), XEXP (op, 1));
1000 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1001 if (GET_CODE (op) == XOR
1002 && XEXP (op, 1) == const1_rtx
1003 && nonzero_bits (XEXP (op, 0), mode) == 1)
1004 return plus_constant (mode, XEXP (op, 0), -1);
1006 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1007 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1008 if (GET_CODE (op) == LT
1009 && XEXP (op, 1) == const0_rtx
1010 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1012 enum machine_mode inner = GET_MODE (XEXP (op, 0));
1013 int isize = GET_MODE_PRECISION (inner);
1014 if (STORE_FLAG_VALUE == 1)
1016 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1017 GEN_INT (isize - 1));
1018 if (mode == inner)
1019 return temp;
1020 if (GET_MODE_PRECISION (mode) > isize)
1021 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1022 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1024 else if (STORE_FLAG_VALUE == -1)
1026 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1027 GEN_INT (isize - 1));
1028 if (mode == inner)
1029 return temp;
1030 if (GET_MODE_PRECISION (mode) > isize)
1031 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1032 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1035 break;
1037 case TRUNCATE:
1038 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1039 with the umulXi3_highpart patterns. */
1040 if (GET_CODE (op) == LSHIFTRT
1041 && GET_CODE (XEXP (op, 0)) == MULT)
1042 break;
1044 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1046 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1048 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1049 if (temp)
1050 return temp;
1052 /* We can't handle truncation to a partial integer mode here
1053 because we don't know the real bitsize of the partial
1054 integer mode. */
1055 break;
1058 if (GET_MODE (op) != VOIDmode)
1060 temp = simplify_truncation (mode, op, GET_MODE (op));
1061 if (temp)
1062 return temp;
1065 /* If we know that the value is already truncated, we can
1066 replace the TRUNCATE with a SUBREG. */
1067 if (GET_MODE_NUNITS (mode) == 1
1068 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1069 || truncated_to_mode (mode, op)))
1071 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1072 if (temp)
1073 return temp;
1076 /* A truncate of a comparison can be replaced with a subreg if
1077 STORE_FLAG_VALUE permits. This is like the previous test,
1078 but it works even if the comparison is done in a mode larger
1079 than HOST_BITS_PER_WIDE_INT. */
1080 if (HWI_COMPUTABLE_MODE_P (mode)
1081 && COMPARISON_P (op)
1082 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1084 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1085 if (temp)
1086 return temp;
1089 /* A truncate of a memory is just loading the low part of the memory
1090 if we are not changing the meaning of the address. */
1091 if (GET_CODE (op) == MEM
1092 && !VECTOR_MODE_P (mode)
1093 && !MEM_VOLATILE_P (op)
1094 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1096 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1097 if (temp)
1098 return temp;
1101 break;
1103 case FLOAT_TRUNCATE:
1104 if (DECIMAL_FLOAT_MODE_P (mode))
1105 break;
1107 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1108 if (GET_CODE (op) == FLOAT_EXTEND
1109 && GET_MODE (XEXP (op, 0)) == mode)
1110 return XEXP (op, 0);
1112 /* (float_truncate:SF (float_truncate:DF foo:XF))
1113 = (float_truncate:SF foo:XF).
1114 This may eliminate double rounding, so it is unsafe.
1116 (float_truncate:SF (float_extend:XF foo:DF))
1117 = (float_truncate:SF foo:DF).
1119 (float_truncate:DF (float_extend:XF foo:SF))
1120 = (float_extend:SF foo:DF). */
1121 if ((GET_CODE (op) == FLOAT_TRUNCATE
1122 && flag_unsafe_math_optimizations)
1123 || GET_CODE (op) == FLOAT_EXTEND)
1124 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1125 0)))
1126 > GET_MODE_SIZE (mode)
1127 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1128 mode,
1129 XEXP (op, 0), mode);
1131 /* (float_truncate (float x)) is (float x) */
1132 if (GET_CODE (op) == FLOAT
1133 && (flag_unsafe_math_optimizations
1134 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
1135 && ((unsigned)significand_size (GET_MODE (op))
1136 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1137 - num_sign_bit_copies (XEXP (op, 0),
1138 GET_MODE (XEXP (op, 0))))))))
1139 return simplify_gen_unary (FLOAT, mode,
1140 XEXP (op, 0),
1141 GET_MODE (XEXP (op, 0)));
1143 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1144 (OP:SF foo:SF) if OP is NEG or ABS. */
1145 if ((GET_CODE (op) == ABS
1146 || GET_CODE (op) == NEG)
1147 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1148 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1149 return simplify_gen_unary (GET_CODE (op), mode,
1150 XEXP (XEXP (op, 0), 0), mode);
1152 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1153 is (float_truncate:SF x). */
1154 if (GET_CODE (op) == SUBREG
1155 && subreg_lowpart_p (op)
1156 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1157 return SUBREG_REG (op);
1158 break;
1160 case FLOAT_EXTEND:
1161 if (DECIMAL_FLOAT_MODE_P (mode))
1162 break;
1164 /* (float_extend (float_extend x)) is (float_extend x)
1166 (float_extend (float x)) is (float x) assuming that double
1167 rounding can't happen.
1169 if (GET_CODE (op) == FLOAT_EXTEND
1170 || (GET_CODE (op) == FLOAT
1171 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1172 && ((unsigned)significand_size (GET_MODE (op))
1173 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1174 - num_sign_bit_copies (XEXP (op, 0),
1175 GET_MODE (XEXP (op, 0)))))))
1176 return simplify_gen_unary (GET_CODE (op), mode,
1177 XEXP (op, 0),
1178 GET_MODE (XEXP (op, 0)));
1180 break;
1182 case ABS:
1183 /* (abs (neg <foo>)) -> (abs <foo>) */
1184 if (GET_CODE (op) == NEG)
1185 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1186 GET_MODE (XEXP (op, 0)));
1188 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1189 do nothing. */
1190 if (GET_MODE (op) == VOIDmode)
1191 break;
1193 /* If operand is something known to be positive, ignore the ABS. */
1194 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1195 || val_signbit_known_clear_p (GET_MODE (op),
1196 nonzero_bits (op, GET_MODE (op))))
1197 return op;
1199 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1200 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1201 return gen_rtx_NEG (mode, op);
1203 break;
1205 case FFS:
1206 /* (ffs (*_extend <X>)) = (ffs <X>) */
1207 if (GET_CODE (op) == SIGN_EXTEND
1208 || GET_CODE (op) == ZERO_EXTEND)
1209 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1210 GET_MODE (XEXP (op, 0)));
1211 break;
1213 case POPCOUNT:
1214 switch (GET_CODE (op))
1216 case BSWAP:
1217 case ZERO_EXTEND:
1218 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1219 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1220 GET_MODE (XEXP (op, 0)));
1222 case ROTATE:
1223 case ROTATERT:
1224 /* Rotations don't affect popcount. */
1225 if (!side_effects_p (XEXP (op, 1)))
1226 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1227 GET_MODE (XEXP (op, 0)));
1228 break;
1230 default:
1231 break;
1233 break;
1235 case PARITY:
1236 switch (GET_CODE (op))
1238 case NOT:
1239 case BSWAP:
1240 case ZERO_EXTEND:
1241 case SIGN_EXTEND:
1242 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1243 GET_MODE (XEXP (op, 0)));
1245 case ROTATE:
1246 case ROTATERT:
1247 /* Rotations don't affect parity. */
1248 if (!side_effects_p (XEXP (op, 1)))
1249 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1250 GET_MODE (XEXP (op, 0)));
1251 break;
1253 default:
1254 break;
1256 break;
1258 case BSWAP:
1259 /* (bswap (bswap x)) -> x. */
1260 if (GET_CODE (op) == BSWAP)
1261 return XEXP (op, 0);
1262 break;
1264 case FLOAT:
1265 /* (float (sign_extend <X>)) = (float <X>). */
1266 if (GET_CODE (op) == SIGN_EXTEND)
1267 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1268 GET_MODE (XEXP (op, 0)));
1269 break;
1271 case SIGN_EXTEND:
1272 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1273 becomes just the MINUS if its mode is MODE. This allows
1274 folding switch statements on machines using casesi (such as
1275 the VAX). */
1276 if (GET_CODE (op) == TRUNCATE
1277 && GET_MODE (XEXP (op, 0)) == mode
1278 && GET_CODE (XEXP (op, 0)) == MINUS
1279 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1280 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1281 return XEXP (op, 0);
1283 /* Extending a widening multiplication should be canonicalized to
1284 a wider widening multiplication. */
1285 if (GET_CODE (op) == MULT)
1287 rtx lhs = XEXP (op, 0);
1288 rtx rhs = XEXP (op, 1);
1289 enum rtx_code lcode = GET_CODE (lhs);
1290 enum rtx_code rcode = GET_CODE (rhs);
1292 /* Widening multiplies usually extend both operands, but sometimes
1293 they use a shift to extract a portion of a register. */
1294 if ((lcode == SIGN_EXTEND
1295 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1296 && (rcode == SIGN_EXTEND
1297 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1299 enum machine_mode lmode = GET_MODE (lhs);
1300 enum machine_mode rmode = GET_MODE (rhs);
1301 int bits;
1303 if (lcode == ASHIFTRT)
1304 /* Number of bits not shifted off the end. */
1305 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1306 else /* lcode == SIGN_EXTEND */
1307 /* Size of inner mode. */
1308 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1310 if (rcode == ASHIFTRT)
1311 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1312 else /* rcode == SIGN_EXTEND */
1313 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1315 /* We can only widen multiplies if the result is mathematiclly
1316 equivalent. I.e. if overflow was impossible. */
1317 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1318 return simplify_gen_binary
1319 (MULT, mode,
1320 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1321 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1325 /* Check for a sign extension of a subreg of a promoted
1326 variable, where the promotion is sign-extended, and the
1327 target mode is the same as the variable's promotion. */
1328 if (GET_CODE (op) == SUBREG
1329 && SUBREG_PROMOTED_VAR_P (op)
1330 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1331 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1333 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1334 if (temp)
1335 return temp;
1338 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1339 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1340 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1342 gcc_assert (GET_MODE_BITSIZE (mode)
1343 > GET_MODE_BITSIZE (GET_MODE (op)));
1344 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1345 GET_MODE (XEXP (op, 0)));
1348 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1349 is (sign_extend:M (subreg:O <X>)) if there is mode with
1350 GET_MODE_BITSIZE (N) - I bits.
1351 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1352 is similarly (zero_extend:M (subreg:O <X>)). */
1353 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1354 && GET_CODE (XEXP (op, 0)) == ASHIFT
1355 && CONST_INT_P (XEXP (op, 1))
1356 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1357 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1359 enum machine_mode tmode
1360 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1361 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1362 gcc_assert (GET_MODE_BITSIZE (mode)
1363 > GET_MODE_BITSIZE (GET_MODE (op)));
1364 if (tmode != BLKmode)
1366 rtx inner =
1367 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1368 if (inner)
1369 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1370 ? SIGN_EXTEND : ZERO_EXTEND,
1371 mode, inner, tmode);
1375 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1376 /* As we do not know which address space the pointer is referring to,
1377 we can do this only if the target does not support different pointer
1378 or address modes depending on the address space. */
1379 if (target_default_pointer_address_modes_p ()
1380 && ! POINTERS_EXTEND_UNSIGNED
1381 && mode == Pmode && GET_MODE (op) == ptr_mode
1382 && (CONSTANT_P (op)
1383 || (GET_CODE (op) == SUBREG
1384 && REG_P (SUBREG_REG (op))
1385 && REG_POINTER (SUBREG_REG (op))
1386 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1387 return convert_memory_address (Pmode, op);
1388 #endif
1389 break;
1391 case ZERO_EXTEND:
1392 /* Check for a zero extension of a subreg of a promoted
1393 variable, where the promotion is zero-extended, and the
1394 target mode is the same as the variable's promotion. */
1395 if (GET_CODE (op) == SUBREG
1396 && SUBREG_PROMOTED_VAR_P (op)
1397 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1398 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1400 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1401 if (temp)
1402 return temp;
1405 /* Extending a widening multiplication should be canonicalized to
1406 a wider widening multiplication. */
1407 if (GET_CODE (op) == MULT)
1409 rtx lhs = XEXP (op, 0);
1410 rtx rhs = XEXP (op, 1);
1411 enum rtx_code lcode = GET_CODE (lhs);
1412 enum rtx_code rcode = GET_CODE (rhs);
1414 /* Widening multiplies usually extend both operands, but sometimes
1415 they use a shift to extract a portion of a register. */
1416 if ((lcode == ZERO_EXTEND
1417 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1418 && (rcode == ZERO_EXTEND
1419 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1421 enum machine_mode lmode = GET_MODE (lhs);
1422 enum machine_mode rmode = GET_MODE (rhs);
1423 int bits;
1425 if (lcode == LSHIFTRT)
1426 /* Number of bits not shifted off the end. */
1427 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1428 else /* lcode == ZERO_EXTEND */
1429 /* Size of inner mode. */
1430 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1432 if (rcode == LSHIFTRT)
1433 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1434 else /* rcode == ZERO_EXTEND */
1435 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1437 /* We can only widen multiplies if the result is mathematiclly
1438 equivalent. I.e. if overflow was impossible. */
1439 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1440 return simplify_gen_binary
1441 (MULT, mode,
1442 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1443 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1447 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1448 if (GET_CODE (op) == ZERO_EXTEND)
1449 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1450 GET_MODE (XEXP (op, 0)));
1452 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1453 is (zero_extend:M (subreg:O <X>)) if there is mode with
1454 GET_MODE_BITSIZE (N) - I bits. */
1455 if (GET_CODE (op) == LSHIFTRT
1456 && GET_CODE (XEXP (op, 0)) == ASHIFT
1457 && CONST_INT_P (XEXP (op, 1))
1458 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1459 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1461 enum machine_mode tmode
1462 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1463 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1464 if (tmode != BLKmode)
1466 rtx inner =
1467 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1468 if (inner)
1469 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1473 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1474 /* As we do not know which address space the pointer is referring to,
1475 we can do this only if the target does not support different pointer
1476 or address modes depending on the address space. */
1477 if (target_default_pointer_address_modes_p ()
1478 && POINTERS_EXTEND_UNSIGNED > 0
1479 && mode == Pmode && GET_MODE (op) == ptr_mode
1480 && (CONSTANT_P (op)
1481 || (GET_CODE (op) == SUBREG
1482 && REG_P (SUBREG_REG (op))
1483 && REG_POINTER (SUBREG_REG (op))
1484 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1485 return convert_memory_address (Pmode, op);
1486 #endif
1487 break;
1489 default:
1490 break;
1493 return 0;
1496 /* Try to compute the value of a unary operation CODE whose output mode is to
1497 be MODE with input operand OP whose mode was originally OP_MODE.
1498 Return zero if the value cannot be computed. */
1500 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1501 rtx op, enum machine_mode op_mode)
1503 unsigned int width = GET_MODE_PRECISION (mode);
1504 unsigned int op_width = GET_MODE_PRECISION (op_mode);
1506 if (code == VEC_DUPLICATE)
1508 gcc_assert (VECTOR_MODE_P (mode));
1509 if (GET_MODE (op) != VOIDmode)
1511 if (!VECTOR_MODE_P (GET_MODE (op)))
1512 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1513 else
1514 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1515 (GET_MODE (op)));
1517 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1518 || GET_CODE (op) == CONST_VECTOR)
1520 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1521 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1522 rtvec v = rtvec_alloc (n_elts);
1523 unsigned int i;
1525 if (GET_CODE (op) != CONST_VECTOR)
1526 for (i = 0; i < n_elts; i++)
1527 RTVEC_ELT (v, i) = op;
1528 else
1530 enum machine_mode inmode = GET_MODE (op);
1531 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1532 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1534 gcc_assert (in_n_elts < n_elts);
1535 gcc_assert ((n_elts % in_n_elts) == 0);
1536 for (i = 0; i < n_elts; i++)
1537 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1539 return gen_rtx_CONST_VECTOR (mode, v);
1543 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1545 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1546 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1547 enum machine_mode opmode = GET_MODE (op);
1548 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1549 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1550 rtvec v = rtvec_alloc (n_elts);
1551 unsigned int i;
1553 gcc_assert (op_n_elts == n_elts);
1554 for (i = 0; i < n_elts; i++)
1556 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1557 CONST_VECTOR_ELT (op, i),
1558 GET_MODE_INNER (opmode));
1559 if (!x)
1560 return 0;
1561 RTVEC_ELT (v, i) = x;
1563 return gen_rtx_CONST_VECTOR (mode, v);
1566 /* The order of these tests is critical so that, for example, we don't
1567 check the wrong mode (input vs. output) for a conversion operation,
1568 such as FIX. At some point, this should be simplified. */
1570 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1572 HOST_WIDE_INT hv, lv;
1573 REAL_VALUE_TYPE d;
1575 if (CONST_INT_P (op))
1576 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1577 else
1578 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1580 REAL_VALUE_FROM_INT (d, lv, hv, mode);
1581 d = real_value_truncate (mode, d);
1582 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1584 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1586 HOST_WIDE_INT hv, lv;
1587 REAL_VALUE_TYPE d;
1589 if (CONST_INT_P (op))
1590 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1591 else
1592 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1594 if (op_mode == VOIDmode
1595 || GET_MODE_PRECISION (op_mode) > HOST_BITS_PER_DOUBLE_INT)
1596 /* We should never get a negative number. */
1597 gcc_assert (hv >= 0);
1598 else if (GET_MODE_PRECISION (op_mode) <= HOST_BITS_PER_WIDE_INT)
1599 hv = 0, lv &= GET_MODE_MASK (op_mode);
1601 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1602 d = real_value_truncate (mode, d);
1603 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1606 if (CONST_INT_P (op)
1607 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1609 HOST_WIDE_INT arg0 = INTVAL (op);
1610 HOST_WIDE_INT val;
1612 switch (code)
1614 case NOT:
1615 val = ~ arg0;
1616 break;
1618 case NEG:
1619 val = - arg0;
1620 break;
1622 case ABS:
1623 val = (arg0 >= 0 ? arg0 : - arg0);
1624 break;
1626 case FFS:
1627 arg0 &= GET_MODE_MASK (mode);
1628 val = ffs_hwi (arg0);
1629 break;
1631 case CLZ:
1632 arg0 &= GET_MODE_MASK (mode);
1633 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1635 else
1636 val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 1;
1637 break;
1639 case CLRSB:
1640 arg0 &= GET_MODE_MASK (mode);
1641 if (arg0 == 0)
1642 val = GET_MODE_PRECISION (mode) - 1;
1643 else if (arg0 >= 0)
1644 val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 2;
1645 else if (arg0 < 0)
1646 val = GET_MODE_PRECISION (mode) - floor_log2 (~arg0) - 2;
1647 break;
1649 case CTZ:
1650 arg0 &= GET_MODE_MASK (mode);
1651 if (arg0 == 0)
1653 /* Even if the value at zero is undefined, we have to come
1654 up with some replacement. Seems good enough. */
1655 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1656 val = GET_MODE_PRECISION (mode);
1658 else
1659 val = ctz_hwi (arg0);
1660 break;
1662 case POPCOUNT:
1663 arg0 &= GET_MODE_MASK (mode);
1664 val = 0;
1665 while (arg0)
1666 val++, arg0 &= arg0 - 1;
1667 break;
1669 case PARITY:
1670 arg0 &= GET_MODE_MASK (mode);
1671 val = 0;
1672 while (arg0)
1673 val++, arg0 &= arg0 - 1;
1674 val &= 1;
1675 break;
1677 case BSWAP:
1679 unsigned int s;
1681 val = 0;
1682 for (s = 0; s < width; s += 8)
1684 unsigned int d = width - s - 8;
1685 unsigned HOST_WIDE_INT byte;
1686 byte = (arg0 >> s) & 0xff;
1687 val |= byte << d;
1690 break;
1692 case TRUNCATE:
1693 val = arg0;
1694 break;
1696 case ZERO_EXTEND:
1697 /* When zero-extending a CONST_INT, we need to know its
1698 original mode. */
1699 gcc_assert (op_mode != VOIDmode);
1700 if (op_width == HOST_BITS_PER_WIDE_INT)
1702 /* If we were really extending the mode,
1703 we would have to distinguish between zero-extension
1704 and sign-extension. */
1705 gcc_assert (width == op_width);
1706 val = arg0;
1708 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1709 val = arg0 & GET_MODE_MASK (op_mode);
1710 else
1711 return 0;
1712 break;
1714 case SIGN_EXTEND:
1715 if (op_mode == VOIDmode)
1716 op_mode = mode;
1717 op_width = GET_MODE_PRECISION (op_mode);
1718 if (op_width == HOST_BITS_PER_WIDE_INT)
1720 /* If we were really extending the mode,
1721 we would have to distinguish between zero-extension
1722 and sign-extension. */
1723 gcc_assert (width == op_width);
1724 val = arg0;
1726 else if (op_width < HOST_BITS_PER_WIDE_INT)
1728 val = arg0 & GET_MODE_MASK (op_mode);
1729 if (val_signbit_known_set_p (op_mode, val))
1730 val |= ~GET_MODE_MASK (op_mode);
1732 else
1733 return 0;
1734 break;
1736 case SQRT:
1737 case FLOAT_EXTEND:
1738 case FLOAT_TRUNCATE:
1739 case SS_TRUNCATE:
1740 case US_TRUNCATE:
1741 case SS_NEG:
1742 case US_NEG:
1743 case SS_ABS:
1744 return 0;
1746 default:
1747 gcc_unreachable ();
1750 return gen_int_mode (val, mode);
1753 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1754 for a DImode operation on a CONST_INT. */
1755 else if (width <= HOST_BITS_PER_DOUBLE_INT
1756 && (CONST_DOUBLE_AS_INT_P (op) || CONST_INT_P (op)))
1758 double_int first, value;
1760 if (CONST_DOUBLE_AS_INT_P (op))
1761 first = double_int::from_pair (CONST_DOUBLE_HIGH (op),
1762 CONST_DOUBLE_LOW (op));
1763 else
1764 first = double_int::from_shwi (INTVAL (op));
1766 switch (code)
1768 case NOT:
1769 value = ~first;
1770 break;
1772 case NEG:
1773 value = -first;
1774 break;
1776 case ABS:
1777 if (first.is_negative ())
1778 value = -first;
1779 else
1780 value = first;
1781 break;
1783 case FFS:
1784 value.high = 0;
1785 if (first.low != 0)
1786 value.low = ffs_hwi (first.low);
1787 else if (first.high != 0)
1788 value.low = HOST_BITS_PER_WIDE_INT + ffs_hwi (first.high);
1789 else
1790 value.low = 0;
1791 break;
1793 case CLZ:
1794 value.high = 0;
1795 if (first.high != 0)
1796 value.low = GET_MODE_PRECISION (mode) - floor_log2 (first.high) - 1
1797 - HOST_BITS_PER_WIDE_INT;
1798 else if (first.low != 0)
1799 value.low = GET_MODE_PRECISION (mode) - floor_log2 (first.low) - 1;
1800 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, value.low))
1801 value.low = GET_MODE_PRECISION (mode);
1802 break;
1804 case CTZ:
1805 value.high = 0;
1806 if (first.low != 0)
1807 value.low = ctz_hwi (first.low);
1808 else if (first.high != 0)
1809 value.low = HOST_BITS_PER_WIDE_INT + ctz_hwi (first.high);
1810 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, value.low))
1811 value.low = GET_MODE_PRECISION (mode);
1812 break;
1814 case POPCOUNT:
1815 value = double_int_zero;
1816 while (first.low)
1818 value.low++;
1819 first.low &= first.low - 1;
1821 while (first.high)
1823 value.low++;
1824 first.high &= first.high - 1;
1826 break;
1828 case PARITY:
1829 value = double_int_zero;
1830 while (first.low)
1832 value.low++;
1833 first.low &= first.low - 1;
1835 while (first.high)
1837 value.low++;
1838 first.high &= first.high - 1;
1840 value.low &= 1;
1841 break;
1843 case BSWAP:
1845 unsigned int s;
1847 value = double_int_zero;
1848 for (s = 0; s < width; s += 8)
1850 unsigned int d = width - s - 8;
1851 unsigned HOST_WIDE_INT byte;
1853 if (s < HOST_BITS_PER_WIDE_INT)
1854 byte = (first.low >> s) & 0xff;
1855 else
1856 byte = (first.high >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1858 if (d < HOST_BITS_PER_WIDE_INT)
1859 value.low |= byte << d;
1860 else
1861 value.high |= byte << (d - HOST_BITS_PER_WIDE_INT);
1864 break;
1866 case TRUNCATE:
1867 /* This is just a change-of-mode, so do nothing. */
1868 value = first;
1869 break;
1871 case ZERO_EXTEND:
1872 gcc_assert (op_mode != VOIDmode);
1874 if (op_width > HOST_BITS_PER_WIDE_INT)
1875 return 0;
1877 value = double_int::from_uhwi (first.low & GET_MODE_MASK (op_mode));
1878 break;
1880 case SIGN_EXTEND:
1881 if (op_mode == VOIDmode
1882 || op_width > HOST_BITS_PER_WIDE_INT)
1883 return 0;
1884 else
1886 value.low = first.low & GET_MODE_MASK (op_mode);
1887 if (val_signbit_known_set_p (op_mode, value.low))
1888 value.low |= ~GET_MODE_MASK (op_mode);
1890 value.high = HWI_SIGN_EXTEND (value.low);
1892 break;
1894 case SQRT:
1895 return 0;
1897 default:
1898 return 0;
1901 return immed_double_int_const (value, mode);
1904 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1905 && SCALAR_FLOAT_MODE_P (mode)
1906 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1908 REAL_VALUE_TYPE d, t;
1909 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1911 switch (code)
1913 case SQRT:
1914 if (HONOR_SNANS (mode) && real_isnan (&d))
1915 return 0;
1916 real_sqrt (&t, mode, &d);
1917 d = t;
1918 break;
1919 case ABS:
1920 d = real_value_abs (&d);
1921 break;
1922 case NEG:
1923 d = real_value_negate (&d);
1924 break;
1925 case FLOAT_TRUNCATE:
1926 d = real_value_truncate (mode, d);
1927 break;
1928 case FLOAT_EXTEND:
1929 /* All this does is change the mode, unless changing
1930 mode class. */
1931 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1932 real_convert (&d, mode, &d);
1933 break;
1934 case FIX:
1935 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1936 break;
1937 case NOT:
1939 long tmp[4];
1940 int i;
1942 real_to_target (tmp, &d, GET_MODE (op));
1943 for (i = 0; i < 4; i++)
1944 tmp[i] = ~tmp[i];
1945 real_from_target (&d, tmp, mode);
1946 break;
1948 default:
1949 gcc_unreachable ();
1951 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1954 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1955 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1956 && GET_MODE_CLASS (mode) == MODE_INT
1957 && width <= HOST_BITS_PER_DOUBLE_INT && width > 0)
1959 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1960 operators are intentionally left unspecified (to ease implementation
1961 by target backends), for consistency, this routine implements the
1962 same semantics for constant folding as used by the middle-end. */
1964 /* This was formerly used only for non-IEEE float.
1965 eggert@twinsun.com says it is safe for IEEE also. */
1966 HOST_WIDE_INT xh, xl, th, tl;
1967 REAL_VALUE_TYPE x, t;
1968 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1969 switch (code)
1971 case FIX:
1972 if (REAL_VALUE_ISNAN (x))
1973 return const0_rtx;
1975 /* Test against the signed upper bound. */
1976 if (width > HOST_BITS_PER_WIDE_INT)
1978 th = ((unsigned HOST_WIDE_INT) 1
1979 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1980 tl = -1;
1982 else
1984 th = 0;
1985 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1987 real_from_integer (&t, VOIDmode, tl, th, 0);
1988 if (REAL_VALUES_LESS (t, x))
1990 xh = th;
1991 xl = tl;
1992 break;
1995 /* Test against the signed lower bound. */
1996 if (width > HOST_BITS_PER_WIDE_INT)
1998 th = (unsigned HOST_WIDE_INT) (-1)
1999 << (width - HOST_BITS_PER_WIDE_INT - 1);
2000 tl = 0;
2002 else
2004 th = -1;
2005 tl = (unsigned HOST_WIDE_INT) (-1) << (width - 1);
2007 real_from_integer (&t, VOIDmode, tl, th, 0);
2008 if (REAL_VALUES_LESS (x, t))
2010 xh = th;
2011 xl = tl;
2012 break;
2014 REAL_VALUE_TO_INT (&xl, &xh, x);
2015 break;
2017 case UNSIGNED_FIX:
2018 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
2019 return const0_rtx;
2021 /* Test against the unsigned upper bound. */
2022 if (width == HOST_BITS_PER_DOUBLE_INT)
2024 th = -1;
2025 tl = -1;
2027 else if (width >= HOST_BITS_PER_WIDE_INT)
2029 th = ((unsigned HOST_WIDE_INT) 1
2030 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
2031 tl = -1;
2033 else
2035 th = 0;
2036 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
2038 real_from_integer (&t, VOIDmode, tl, th, 1);
2039 if (REAL_VALUES_LESS (t, x))
2041 xh = th;
2042 xl = tl;
2043 break;
2046 REAL_VALUE_TO_INT (&xl, &xh, x);
2047 break;
2049 default:
2050 gcc_unreachable ();
2052 return immed_double_const (xl, xh, mode);
2055 return NULL_RTX;
2058 /* Subroutine of simplify_binary_operation to simplify a binary operation
2059 CODE that can commute with byte swapping, with result mode MODE and
2060 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2061 Return zero if no simplification or canonicalization is possible. */
2063 static rtx
2064 simplify_byte_swapping_operation (enum rtx_code code, enum machine_mode mode,
2065 rtx op0, rtx op1)
2067 rtx tem;
2069 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2070 if (GET_CODE (op0) == BSWAP
2071 && (CONST_INT_P (op1) || CONST_DOUBLE_AS_INT_P (op1)))
2073 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
2074 simplify_gen_unary (BSWAP, mode, op1, mode));
2075 return simplify_gen_unary (BSWAP, mode, tem, mode);
2078 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2079 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
2081 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
2082 return simplify_gen_unary (BSWAP, mode, tem, mode);
2085 return NULL_RTX;
2088 /* Subroutine of simplify_binary_operation to simplify a commutative,
2089 associative binary operation CODE with result mode MODE, operating
2090 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2091 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2092 canonicalization is possible. */
2094 static rtx
2095 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
2096 rtx op0, rtx op1)
2098 rtx tem;
2100 /* Linearize the operator to the left. */
2101 if (GET_CODE (op1) == code)
2103 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2104 if (GET_CODE (op0) == code)
2106 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2107 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2110 /* "a op (b op c)" becomes "(b op c) op a". */
2111 if (! swap_commutative_operands_p (op1, op0))
2112 return simplify_gen_binary (code, mode, op1, op0);
2114 tem = op0;
2115 op0 = op1;
2116 op1 = tem;
2119 if (GET_CODE (op0) == code)
2121 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2122 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2124 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2125 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2128 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2129 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2130 if (tem != 0)
2131 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2133 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2134 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2135 if (tem != 0)
2136 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2139 return 0;
2143 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2144 and OP1. Return 0 if no simplification is possible.
2146 Don't use this for relational operations such as EQ or LT.
2147 Use simplify_relational_operation instead. */
2149 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
2150 rtx op0, rtx op1)
2152 rtx trueop0, trueop1;
2153 rtx tem;
2155 /* Relational operations don't work here. We must know the mode
2156 of the operands in order to do the comparison correctly.
2157 Assuming a full word can give incorrect results.
2158 Consider comparing 128 with -128 in QImode. */
2159 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2160 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2162 /* Make sure the constant is second. */
2163 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2164 && swap_commutative_operands_p (op0, op1))
2166 tem = op0, op0 = op1, op1 = tem;
2169 trueop0 = avoid_constant_pool_reference (op0);
2170 trueop1 = avoid_constant_pool_reference (op1);
2172 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2173 if (tem)
2174 return tem;
2175 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2178 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2179 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2180 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2181 actual constants. */
2183 static rtx
2184 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
2185 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2187 rtx tem, reversed, opleft, opright;
2188 HOST_WIDE_INT val;
2189 unsigned int width = GET_MODE_PRECISION (mode);
2191 /* Even if we can't compute a constant result,
2192 there are some cases worth simplifying. */
2194 switch (code)
2196 case PLUS:
2197 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2198 when x is NaN, infinite, or finite and nonzero. They aren't
2199 when x is -0 and the rounding mode is not towards -infinity,
2200 since (-0) + 0 is then 0. */
2201 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2202 return op0;
2204 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2205 transformations are safe even for IEEE. */
2206 if (GET_CODE (op0) == NEG)
2207 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2208 else if (GET_CODE (op1) == NEG)
2209 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2211 /* (~a) + 1 -> -a */
2212 if (INTEGRAL_MODE_P (mode)
2213 && GET_CODE (op0) == NOT
2214 && trueop1 == const1_rtx)
2215 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2217 /* Handle both-operands-constant cases. We can only add
2218 CONST_INTs to constants since the sum of relocatable symbols
2219 can't be handled by most assemblers. Don't add CONST_INT
2220 to CONST_INT since overflow won't be computed properly if wider
2221 than HOST_BITS_PER_WIDE_INT. */
2223 if ((GET_CODE (op0) == CONST
2224 || GET_CODE (op0) == SYMBOL_REF
2225 || GET_CODE (op0) == LABEL_REF)
2226 && CONST_INT_P (op1))
2227 return plus_constant (mode, op0, INTVAL (op1));
2228 else if ((GET_CODE (op1) == CONST
2229 || GET_CODE (op1) == SYMBOL_REF
2230 || GET_CODE (op1) == LABEL_REF)
2231 && CONST_INT_P (op0))
2232 return plus_constant (mode, op1, INTVAL (op0));
2234 /* See if this is something like X * C - X or vice versa or
2235 if the multiplication is written as a shift. If so, we can
2236 distribute and make a new multiply, shift, or maybe just
2237 have X (if C is 2 in the example above). But don't make
2238 something more expensive than we had before. */
2240 if (SCALAR_INT_MODE_P (mode))
2242 double_int coeff0, coeff1;
2243 rtx lhs = op0, rhs = op1;
2245 coeff0 = double_int_one;
2246 coeff1 = double_int_one;
2248 if (GET_CODE (lhs) == NEG)
2250 coeff0 = double_int_minus_one;
2251 lhs = XEXP (lhs, 0);
2253 else if (GET_CODE (lhs) == MULT
2254 && CONST_INT_P (XEXP (lhs, 1)))
2256 coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1)));
2257 lhs = XEXP (lhs, 0);
2259 else if (GET_CODE (lhs) == ASHIFT
2260 && CONST_INT_P (XEXP (lhs, 1))
2261 && INTVAL (XEXP (lhs, 1)) >= 0
2262 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2264 coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1)));
2265 lhs = XEXP (lhs, 0);
2268 if (GET_CODE (rhs) == NEG)
2270 coeff1 = double_int_minus_one;
2271 rhs = XEXP (rhs, 0);
2273 else if (GET_CODE (rhs) == MULT
2274 && CONST_INT_P (XEXP (rhs, 1)))
2276 coeff1 = double_int::from_shwi (INTVAL (XEXP (rhs, 1)));
2277 rhs = XEXP (rhs, 0);
2279 else if (GET_CODE (rhs) == ASHIFT
2280 && CONST_INT_P (XEXP (rhs, 1))
2281 && INTVAL (XEXP (rhs, 1)) >= 0
2282 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2284 coeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1)));
2285 rhs = XEXP (rhs, 0);
2288 if (rtx_equal_p (lhs, rhs))
2290 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2291 rtx coeff;
2292 double_int val;
2293 bool speed = optimize_function_for_speed_p (cfun);
2295 val = coeff0 + coeff1;
2296 coeff = immed_double_int_const (val, mode);
2298 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2299 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2300 ? tem : 0;
2304 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2305 if (CONST_SCALAR_INT_P (op1)
2306 && GET_CODE (op0) == XOR
2307 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2308 && mode_signbit_p (mode, op1))
2309 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2310 simplify_gen_binary (XOR, mode, op1,
2311 XEXP (op0, 1)));
2313 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2314 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2315 && GET_CODE (op0) == MULT
2316 && GET_CODE (XEXP (op0, 0)) == NEG)
2318 rtx in1, in2;
2320 in1 = XEXP (XEXP (op0, 0), 0);
2321 in2 = XEXP (op0, 1);
2322 return simplify_gen_binary (MINUS, mode, op1,
2323 simplify_gen_binary (MULT, mode,
2324 in1, in2));
2327 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2328 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2329 is 1. */
2330 if (COMPARISON_P (op0)
2331 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2332 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2333 && (reversed = reversed_comparison (op0, mode)))
2334 return
2335 simplify_gen_unary (NEG, mode, reversed, mode);
2337 /* If one of the operands is a PLUS or a MINUS, see if we can
2338 simplify this by the associative law.
2339 Don't use the associative law for floating point.
2340 The inaccuracy makes it nonassociative,
2341 and subtle programs can break if operations are associated. */
2343 if (INTEGRAL_MODE_P (mode)
2344 && (plus_minus_operand_p (op0)
2345 || plus_minus_operand_p (op1))
2346 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2347 return tem;
2349 /* Reassociate floating point addition only when the user
2350 specifies associative math operations. */
2351 if (FLOAT_MODE_P (mode)
2352 && flag_associative_math)
2354 tem = simplify_associative_operation (code, mode, op0, op1);
2355 if (tem)
2356 return tem;
2358 break;
2360 case COMPARE:
2361 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2362 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2363 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2364 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2366 rtx xop00 = XEXP (op0, 0);
2367 rtx xop10 = XEXP (op1, 0);
2369 #ifdef HAVE_cc0
2370 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2371 #else
2372 if (REG_P (xop00) && REG_P (xop10)
2373 && GET_MODE (xop00) == GET_MODE (xop10)
2374 && REGNO (xop00) == REGNO (xop10)
2375 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2376 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2377 #endif
2378 return xop00;
2380 break;
2382 case MINUS:
2383 /* We can't assume x-x is 0 even with non-IEEE floating point,
2384 but since it is zero except in very strange circumstances, we
2385 will treat it as zero with -ffinite-math-only. */
2386 if (rtx_equal_p (trueop0, trueop1)
2387 && ! side_effects_p (op0)
2388 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2389 return CONST0_RTX (mode);
2391 /* Change subtraction from zero into negation. (0 - x) is the
2392 same as -x when x is NaN, infinite, or finite and nonzero.
2393 But if the mode has signed zeros, and does not round towards
2394 -infinity, then 0 - 0 is 0, not -0. */
2395 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2396 return simplify_gen_unary (NEG, mode, op1, mode);
2398 /* (-1 - a) is ~a. */
2399 if (trueop0 == constm1_rtx)
2400 return simplify_gen_unary (NOT, mode, op1, mode);
2402 /* Subtracting 0 has no effect unless the mode has signed zeros
2403 and supports rounding towards -infinity. In such a case,
2404 0 - 0 is -0. */
2405 if (!(HONOR_SIGNED_ZEROS (mode)
2406 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2407 && trueop1 == CONST0_RTX (mode))
2408 return op0;
2410 /* See if this is something like X * C - X or vice versa or
2411 if the multiplication is written as a shift. If so, we can
2412 distribute and make a new multiply, shift, or maybe just
2413 have X (if C is 2 in the example above). But don't make
2414 something more expensive than we had before. */
2416 if (SCALAR_INT_MODE_P (mode))
2418 double_int coeff0, negcoeff1;
2419 rtx lhs = op0, rhs = op1;
2421 coeff0 = double_int_one;
2422 negcoeff1 = double_int_minus_one;
2424 if (GET_CODE (lhs) == NEG)
2426 coeff0 = double_int_minus_one;
2427 lhs = XEXP (lhs, 0);
2429 else if (GET_CODE (lhs) == MULT
2430 && CONST_INT_P (XEXP (lhs, 1)))
2432 coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1)));
2433 lhs = XEXP (lhs, 0);
2435 else if (GET_CODE (lhs) == ASHIFT
2436 && CONST_INT_P (XEXP (lhs, 1))
2437 && INTVAL (XEXP (lhs, 1)) >= 0
2438 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2440 coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1)));
2441 lhs = XEXP (lhs, 0);
2444 if (GET_CODE (rhs) == NEG)
2446 negcoeff1 = double_int_one;
2447 rhs = XEXP (rhs, 0);
2449 else if (GET_CODE (rhs) == MULT
2450 && CONST_INT_P (XEXP (rhs, 1)))
2452 negcoeff1 = double_int::from_shwi (-INTVAL (XEXP (rhs, 1)));
2453 rhs = XEXP (rhs, 0);
2455 else if (GET_CODE (rhs) == ASHIFT
2456 && CONST_INT_P (XEXP (rhs, 1))
2457 && INTVAL (XEXP (rhs, 1)) >= 0
2458 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2460 negcoeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1)));
2461 negcoeff1 = -negcoeff1;
2462 rhs = XEXP (rhs, 0);
2465 if (rtx_equal_p (lhs, rhs))
2467 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2468 rtx coeff;
2469 double_int val;
2470 bool speed = optimize_function_for_speed_p (cfun);
2472 val = coeff0 + negcoeff1;
2473 coeff = immed_double_int_const (val, mode);
2475 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2476 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2477 ? tem : 0;
2481 /* (a - (-b)) -> (a + b). True even for IEEE. */
2482 if (GET_CODE (op1) == NEG)
2483 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2485 /* (-x - c) may be simplified as (-c - x). */
2486 if (GET_CODE (op0) == NEG
2487 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2489 tem = simplify_unary_operation (NEG, mode, op1, mode);
2490 if (tem)
2491 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2494 /* Don't let a relocatable value get a negative coeff. */
2495 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2496 return simplify_gen_binary (PLUS, mode,
2497 op0,
2498 neg_const_int (mode, op1));
2500 /* (x - (x & y)) -> (x & ~y) */
2501 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2503 if (rtx_equal_p (op0, XEXP (op1, 0)))
2505 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2506 GET_MODE (XEXP (op1, 1)));
2507 return simplify_gen_binary (AND, mode, op0, tem);
2509 if (rtx_equal_p (op0, XEXP (op1, 1)))
2511 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2512 GET_MODE (XEXP (op1, 0)));
2513 return simplify_gen_binary (AND, mode, op0, tem);
2517 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2518 by reversing the comparison code if valid. */
2519 if (STORE_FLAG_VALUE == 1
2520 && trueop0 == const1_rtx
2521 && COMPARISON_P (op1)
2522 && (reversed = reversed_comparison (op1, mode)))
2523 return reversed;
2525 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2526 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2527 && GET_CODE (op1) == MULT
2528 && GET_CODE (XEXP (op1, 0)) == NEG)
2530 rtx in1, in2;
2532 in1 = XEXP (XEXP (op1, 0), 0);
2533 in2 = XEXP (op1, 1);
2534 return simplify_gen_binary (PLUS, mode,
2535 simplify_gen_binary (MULT, mode,
2536 in1, in2),
2537 op0);
2540 /* Canonicalize (minus (neg A) (mult B C)) to
2541 (minus (mult (neg B) C) A). */
2542 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2543 && GET_CODE (op1) == MULT
2544 && GET_CODE (op0) == NEG)
2546 rtx in1, in2;
2548 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2549 in2 = XEXP (op1, 1);
2550 return simplify_gen_binary (MINUS, mode,
2551 simplify_gen_binary (MULT, mode,
2552 in1, in2),
2553 XEXP (op0, 0));
2556 /* If one of the operands is a PLUS or a MINUS, see if we can
2557 simplify this by the associative law. This will, for example,
2558 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2559 Don't use the associative law for floating point.
2560 The inaccuracy makes it nonassociative,
2561 and subtle programs can break if operations are associated. */
2563 if (INTEGRAL_MODE_P (mode)
2564 && (plus_minus_operand_p (op0)
2565 || plus_minus_operand_p (op1))
2566 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2567 return tem;
2568 break;
2570 case MULT:
2571 if (trueop1 == constm1_rtx)
2572 return simplify_gen_unary (NEG, mode, op0, mode);
2574 if (GET_CODE (op0) == NEG)
2576 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2577 /* If op1 is a MULT as well and simplify_unary_operation
2578 just moved the NEG to the second operand, simplify_gen_binary
2579 below could through simplify_associative_operation move
2580 the NEG around again and recurse endlessly. */
2581 if (temp
2582 && GET_CODE (op1) == MULT
2583 && GET_CODE (temp) == MULT
2584 && XEXP (op1, 0) == XEXP (temp, 0)
2585 && GET_CODE (XEXP (temp, 1)) == NEG
2586 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2587 temp = NULL_RTX;
2588 if (temp)
2589 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2591 if (GET_CODE (op1) == NEG)
2593 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2594 /* If op0 is a MULT as well and simplify_unary_operation
2595 just moved the NEG to the second operand, simplify_gen_binary
2596 below could through simplify_associative_operation move
2597 the NEG around again and recurse endlessly. */
2598 if (temp
2599 && GET_CODE (op0) == MULT
2600 && GET_CODE (temp) == MULT
2601 && XEXP (op0, 0) == XEXP (temp, 0)
2602 && GET_CODE (XEXP (temp, 1)) == NEG
2603 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2604 temp = NULL_RTX;
2605 if (temp)
2606 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2609 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2610 x is NaN, since x * 0 is then also NaN. Nor is it valid
2611 when the mode has signed zeros, since multiplying a negative
2612 number by 0 will give -0, not 0. */
2613 if (!HONOR_NANS (mode)
2614 && !HONOR_SIGNED_ZEROS (mode)
2615 && trueop1 == CONST0_RTX (mode)
2616 && ! side_effects_p (op0))
2617 return op1;
2619 /* In IEEE floating point, x*1 is not equivalent to x for
2620 signalling NaNs. */
2621 if (!HONOR_SNANS (mode)
2622 && trueop1 == CONST1_RTX (mode))
2623 return op0;
2625 /* Convert multiply by constant power of two into shift unless
2626 we are still generating RTL. This test is a kludge. */
2627 if (CONST_INT_P (trueop1)
2628 && (val = exact_log2 (UINTVAL (trueop1))) >= 0
2629 /* If the mode is larger than the host word size, and the
2630 uppermost bit is set, then this isn't a power of two due
2631 to implicit sign extension. */
2632 && (width <= HOST_BITS_PER_WIDE_INT
2633 || val != HOST_BITS_PER_WIDE_INT - 1))
2634 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2636 /* Likewise for multipliers wider than a word. */
2637 if (CONST_DOUBLE_AS_INT_P (trueop1)
2638 && GET_MODE (op0) == mode
2639 && CONST_DOUBLE_LOW (trueop1) == 0
2640 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0
2641 && (val < HOST_BITS_PER_DOUBLE_INT - 1
2642 || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_DOUBLE_INT))
2643 return simplify_gen_binary (ASHIFT, mode, op0,
2644 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2646 /* x*2 is x+x and x*(-1) is -x */
2647 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2648 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2649 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2650 && GET_MODE (op0) == mode)
2652 REAL_VALUE_TYPE d;
2653 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2655 if (REAL_VALUES_EQUAL (d, dconst2))
2656 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2658 if (!HONOR_SNANS (mode)
2659 && REAL_VALUES_EQUAL (d, dconstm1))
2660 return simplify_gen_unary (NEG, mode, op0, mode);
2663 /* Optimize -x * -x as x * x. */
2664 if (FLOAT_MODE_P (mode)
2665 && GET_CODE (op0) == NEG
2666 && GET_CODE (op1) == NEG
2667 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2668 && !side_effects_p (XEXP (op0, 0)))
2669 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2671 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2672 if (SCALAR_FLOAT_MODE_P (mode)
2673 && GET_CODE (op0) == ABS
2674 && GET_CODE (op1) == ABS
2675 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2676 && !side_effects_p (XEXP (op0, 0)))
2677 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2679 /* Reassociate multiplication, but for floating point MULTs
2680 only when the user specifies unsafe math optimizations. */
2681 if (! FLOAT_MODE_P (mode)
2682 || flag_unsafe_math_optimizations)
2684 tem = simplify_associative_operation (code, mode, op0, op1);
2685 if (tem)
2686 return tem;
2688 break;
2690 case IOR:
2691 if (trueop1 == CONST0_RTX (mode))
2692 return op0;
2693 if (INTEGRAL_MODE_P (mode)
2694 && trueop1 == CONSTM1_RTX (mode)
2695 && !side_effects_p (op0))
2696 return op1;
2697 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2698 return op0;
2699 /* A | (~A) -> -1 */
2700 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2701 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2702 && ! side_effects_p (op0)
2703 && SCALAR_INT_MODE_P (mode))
2704 return constm1_rtx;
2706 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2707 if (CONST_INT_P (op1)
2708 && HWI_COMPUTABLE_MODE_P (mode)
2709 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2710 && !side_effects_p (op0))
2711 return op1;
2713 /* Canonicalize (X & C1) | C2. */
2714 if (GET_CODE (op0) == AND
2715 && CONST_INT_P (trueop1)
2716 && CONST_INT_P (XEXP (op0, 1)))
2718 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2719 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2720 HOST_WIDE_INT c2 = INTVAL (trueop1);
2722 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2723 if ((c1 & c2) == c1
2724 && !side_effects_p (XEXP (op0, 0)))
2725 return trueop1;
2727 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2728 if (((c1|c2) & mask) == mask)
2729 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2731 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2732 if (((c1 & ~c2) & mask) != (c1 & mask))
2734 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2735 gen_int_mode (c1 & ~c2, mode));
2736 return simplify_gen_binary (IOR, mode, tem, op1);
2740 /* Convert (A & B) | A to A. */
2741 if (GET_CODE (op0) == AND
2742 && (rtx_equal_p (XEXP (op0, 0), op1)
2743 || rtx_equal_p (XEXP (op0, 1), op1))
2744 && ! side_effects_p (XEXP (op0, 0))
2745 && ! side_effects_p (XEXP (op0, 1)))
2746 return op1;
2748 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2749 mode size to (rotate A CX). */
2751 if (GET_CODE (op1) == ASHIFT
2752 || GET_CODE (op1) == SUBREG)
2754 opleft = op1;
2755 opright = op0;
2757 else
2759 opright = op1;
2760 opleft = op0;
2763 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2764 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2765 && CONST_INT_P (XEXP (opleft, 1))
2766 && CONST_INT_P (XEXP (opright, 1))
2767 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2768 == GET_MODE_PRECISION (mode)))
2769 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2771 /* Same, but for ashift that has been "simplified" to a wider mode
2772 by simplify_shift_const. */
2774 if (GET_CODE (opleft) == SUBREG
2775 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2776 && GET_CODE (opright) == LSHIFTRT
2777 && GET_CODE (XEXP (opright, 0)) == SUBREG
2778 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2779 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2780 && (GET_MODE_SIZE (GET_MODE (opleft))
2781 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2782 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2783 SUBREG_REG (XEXP (opright, 0)))
2784 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2785 && CONST_INT_P (XEXP (opright, 1))
2786 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2787 == GET_MODE_PRECISION (mode)))
2788 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2789 XEXP (SUBREG_REG (opleft), 1));
2791 /* If we have (ior (and (X C1) C2)), simplify this by making
2792 C1 as small as possible if C1 actually changes. */
2793 if (CONST_INT_P (op1)
2794 && (HWI_COMPUTABLE_MODE_P (mode)
2795 || INTVAL (op1) > 0)
2796 && GET_CODE (op0) == AND
2797 && CONST_INT_P (XEXP (op0, 1))
2798 && CONST_INT_P (op1)
2799 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2800 return simplify_gen_binary (IOR, mode,
2801 simplify_gen_binary
2802 (AND, mode, XEXP (op0, 0),
2803 GEN_INT (UINTVAL (XEXP (op0, 1))
2804 & ~UINTVAL (op1))),
2805 op1);
2807 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2808 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2809 the PLUS does not affect any of the bits in OP1: then we can do
2810 the IOR as a PLUS and we can associate. This is valid if OP1
2811 can be safely shifted left C bits. */
2812 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2813 && GET_CODE (XEXP (op0, 0)) == PLUS
2814 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2815 && CONST_INT_P (XEXP (op0, 1))
2816 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2818 int count = INTVAL (XEXP (op0, 1));
2819 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2821 if (mask >> count == INTVAL (trueop1)
2822 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2823 return simplify_gen_binary (ASHIFTRT, mode,
2824 plus_constant (mode, XEXP (op0, 0),
2825 mask),
2826 XEXP (op0, 1));
2829 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2830 if (tem)
2831 return tem;
2833 tem = simplify_associative_operation (code, mode, op0, op1);
2834 if (tem)
2835 return tem;
2836 break;
2838 case XOR:
2839 if (trueop1 == CONST0_RTX (mode))
2840 return op0;
2841 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2842 return simplify_gen_unary (NOT, mode, op0, mode);
2843 if (rtx_equal_p (trueop0, trueop1)
2844 && ! side_effects_p (op0)
2845 && GET_MODE_CLASS (mode) != MODE_CC)
2846 return CONST0_RTX (mode);
2848 /* Canonicalize XOR of the most significant bit to PLUS. */
2849 if (CONST_SCALAR_INT_P (op1)
2850 && mode_signbit_p (mode, op1))
2851 return simplify_gen_binary (PLUS, mode, op0, op1);
2852 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2853 if (CONST_SCALAR_INT_P (op1)
2854 && GET_CODE (op0) == PLUS
2855 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2856 && mode_signbit_p (mode, XEXP (op0, 1)))
2857 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2858 simplify_gen_binary (XOR, mode, op1,
2859 XEXP (op0, 1)));
2861 /* If we are XORing two things that have no bits in common,
2862 convert them into an IOR. This helps to detect rotation encoded
2863 using those methods and possibly other simplifications. */
2865 if (HWI_COMPUTABLE_MODE_P (mode)
2866 && (nonzero_bits (op0, mode)
2867 & nonzero_bits (op1, mode)) == 0)
2868 return (simplify_gen_binary (IOR, mode, op0, op1));
2870 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2871 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2872 (NOT y). */
2874 int num_negated = 0;
2876 if (GET_CODE (op0) == NOT)
2877 num_negated++, op0 = XEXP (op0, 0);
2878 if (GET_CODE (op1) == NOT)
2879 num_negated++, op1 = XEXP (op1, 0);
2881 if (num_negated == 2)
2882 return simplify_gen_binary (XOR, mode, op0, op1);
2883 else if (num_negated == 1)
2884 return simplify_gen_unary (NOT, mode,
2885 simplify_gen_binary (XOR, mode, op0, op1),
2886 mode);
2889 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2890 correspond to a machine insn or result in further simplifications
2891 if B is a constant. */
2893 if (GET_CODE (op0) == AND
2894 && rtx_equal_p (XEXP (op0, 1), op1)
2895 && ! side_effects_p (op1))
2896 return simplify_gen_binary (AND, mode,
2897 simplify_gen_unary (NOT, mode,
2898 XEXP (op0, 0), mode),
2899 op1);
2901 else if (GET_CODE (op0) == AND
2902 && rtx_equal_p (XEXP (op0, 0), op1)
2903 && ! side_effects_p (op1))
2904 return simplify_gen_binary (AND, mode,
2905 simplify_gen_unary (NOT, mode,
2906 XEXP (op0, 1), mode),
2907 op1);
2909 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2910 we can transform like this:
2911 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2912 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2913 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2914 Attempt a few simplifications when B and C are both constants. */
2915 if (GET_CODE (op0) == AND
2916 && CONST_INT_P (op1)
2917 && CONST_INT_P (XEXP (op0, 1)))
2919 rtx a = XEXP (op0, 0);
2920 rtx b = XEXP (op0, 1);
2921 rtx c = op1;
2922 HOST_WIDE_INT bval = INTVAL (b);
2923 HOST_WIDE_INT cval = INTVAL (c);
2925 rtx na_c
2926 = simplify_binary_operation (AND, mode,
2927 simplify_gen_unary (NOT, mode, a, mode),
2929 if ((~cval & bval) == 0)
2931 /* Try to simplify ~A&C | ~B&C. */
2932 if (na_c != NULL_RTX)
2933 return simplify_gen_binary (IOR, mode, na_c,
2934 GEN_INT (~bval & cval));
2936 else
2938 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2939 if (na_c == const0_rtx)
2941 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2942 GEN_INT (~cval & bval));
2943 return simplify_gen_binary (IOR, mode, a_nc_b,
2944 GEN_INT (~bval & cval));
2949 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2950 comparison if STORE_FLAG_VALUE is 1. */
2951 if (STORE_FLAG_VALUE == 1
2952 && trueop1 == const1_rtx
2953 && COMPARISON_P (op0)
2954 && (reversed = reversed_comparison (op0, mode)))
2955 return reversed;
2957 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2958 is (lt foo (const_int 0)), so we can perform the above
2959 simplification if STORE_FLAG_VALUE is 1. */
2961 if (STORE_FLAG_VALUE == 1
2962 && trueop1 == const1_rtx
2963 && GET_CODE (op0) == LSHIFTRT
2964 && CONST_INT_P (XEXP (op0, 1))
2965 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2966 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2968 /* (xor (comparison foo bar) (const_int sign-bit))
2969 when STORE_FLAG_VALUE is the sign bit. */
2970 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2971 && trueop1 == const_true_rtx
2972 && COMPARISON_P (op0)
2973 && (reversed = reversed_comparison (op0, mode)))
2974 return reversed;
2976 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2977 if (tem)
2978 return tem;
2980 tem = simplify_associative_operation (code, mode, op0, op1);
2981 if (tem)
2982 return tem;
2983 break;
2985 case AND:
2986 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2987 return trueop1;
2988 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2989 return op0;
2990 if (HWI_COMPUTABLE_MODE_P (mode))
2992 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2993 HOST_WIDE_INT nzop1;
2994 if (CONST_INT_P (trueop1))
2996 HOST_WIDE_INT val1 = INTVAL (trueop1);
2997 /* If we are turning off bits already known off in OP0, we need
2998 not do an AND. */
2999 if ((nzop0 & ~val1) == 0)
3000 return op0;
3002 nzop1 = nonzero_bits (trueop1, mode);
3003 /* If we are clearing all the nonzero bits, the result is zero. */
3004 if ((nzop1 & nzop0) == 0
3005 && !side_effects_p (op0) && !side_effects_p (op1))
3006 return CONST0_RTX (mode);
3008 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
3009 && GET_MODE_CLASS (mode) != MODE_CC)
3010 return op0;
3011 /* A & (~A) -> 0 */
3012 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3013 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3014 && ! side_effects_p (op0)
3015 && GET_MODE_CLASS (mode) != MODE_CC)
3016 return CONST0_RTX (mode);
3018 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3019 there are no nonzero bits of C outside of X's mode. */
3020 if ((GET_CODE (op0) == SIGN_EXTEND
3021 || GET_CODE (op0) == ZERO_EXTEND)
3022 && CONST_INT_P (trueop1)
3023 && HWI_COMPUTABLE_MODE_P (mode)
3024 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
3025 & UINTVAL (trueop1)) == 0)
3027 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
3028 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
3029 gen_int_mode (INTVAL (trueop1),
3030 imode));
3031 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
3034 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3035 we might be able to further simplify the AND with X and potentially
3036 remove the truncation altogether. */
3037 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
3039 rtx x = XEXP (op0, 0);
3040 enum machine_mode xmode = GET_MODE (x);
3041 tem = simplify_gen_binary (AND, xmode, x,
3042 gen_int_mode (INTVAL (trueop1), xmode));
3043 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
3046 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3047 if (GET_CODE (op0) == IOR
3048 && CONST_INT_P (trueop1)
3049 && CONST_INT_P (XEXP (op0, 1)))
3051 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
3052 return simplify_gen_binary (IOR, mode,
3053 simplify_gen_binary (AND, mode,
3054 XEXP (op0, 0), op1),
3055 gen_int_mode (tmp, mode));
3058 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3059 insn (and may simplify more). */
3060 if (GET_CODE (op0) == XOR
3061 && rtx_equal_p (XEXP (op0, 0), op1)
3062 && ! side_effects_p (op1))
3063 return simplify_gen_binary (AND, mode,
3064 simplify_gen_unary (NOT, mode,
3065 XEXP (op0, 1), mode),
3066 op1);
3068 if (GET_CODE (op0) == XOR
3069 && rtx_equal_p (XEXP (op0, 1), op1)
3070 && ! side_effects_p (op1))
3071 return simplify_gen_binary (AND, mode,
3072 simplify_gen_unary (NOT, mode,
3073 XEXP (op0, 0), mode),
3074 op1);
3076 /* Similarly for (~(A ^ B)) & A. */
3077 if (GET_CODE (op0) == NOT
3078 && GET_CODE (XEXP (op0, 0)) == XOR
3079 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3080 && ! side_effects_p (op1))
3081 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3083 if (GET_CODE (op0) == NOT
3084 && GET_CODE (XEXP (op0, 0)) == XOR
3085 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3086 && ! side_effects_p (op1))
3087 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3089 /* Convert (A | B) & A to A. */
3090 if (GET_CODE (op0) == IOR
3091 && (rtx_equal_p (XEXP (op0, 0), op1)
3092 || rtx_equal_p (XEXP (op0, 1), op1))
3093 && ! side_effects_p (XEXP (op0, 0))
3094 && ! side_effects_p (XEXP (op0, 1)))
3095 return op1;
3097 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3098 ((A & N) + B) & M -> (A + B) & M
3099 Similarly if (N & M) == 0,
3100 ((A | N) + B) & M -> (A + B) & M
3101 and for - instead of + and/or ^ instead of |.
3102 Also, if (N & M) == 0, then
3103 (A +- N) & M -> A & M. */
3104 if (CONST_INT_P (trueop1)
3105 && HWI_COMPUTABLE_MODE_P (mode)
3106 && ~UINTVAL (trueop1)
3107 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3108 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3110 rtx pmop[2];
3111 int which;
3113 pmop[0] = XEXP (op0, 0);
3114 pmop[1] = XEXP (op0, 1);
3116 if (CONST_INT_P (pmop[1])
3117 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3118 return simplify_gen_binary (AND, mode, pmop[0], op1);
3120 for (which = 0; which < 2; which++)
3122 tem = pmop[which];
3123 switch (GET_CODE (tem))
3125 case AND:
3126 if (CONST_INT_P (XEXP (tem, 1))
3127 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3128 == UINTVAL (trueop1))
3129 pmop[which] = XEXP (tem, 0);
3130 break;
3131 case IOR:
3132 case XOR:
3133 if (CONST_INT_P (XEXP (tem, 1))
3134 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3135 pmop[which] = XEXP (tem, 0);
3136 break;
3137 default:
3138 break;
3142 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3144 tem = simplify_gen_binary (GET_CODE (op0), mode,
3145 pmop[0], pmop[1]);
3146 return simplify_gen_binary (code, mode, tem, op1);
3150 /* (and X (ior (not X) Y) -> (and X Y) */
3151 if (GET_CODE (op1) == IOR
3152 && GET_CODE (XEXP (op1, 0)) == NOT
3153 && op0 == XEXP (XEXP (op1, 0), 0))
3154 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3156 /* (and (ior (not X) Y) X) -> (and X Y) */
3157 if (GET_CODE (op0) == IOR
3158 && GET_CODE (XEXP (op0, 0)) == NOT
3159 && op1 == XEXP (XEXP (op0, 0), 0))
3160 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3162 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3163 if (tem)
3164 return tem;
3166 tem = simplify_associative_operation (code, mode, op0, op1);
3167 if (tem)
3168 return tem;
3169 break;
3171 case UDIV:
3172 /* 0/x is 0 (or x&0 if x has side-effects). */
3173 if (trueop0 == CONST0_RTX (mode))
3175 if (side_effects_p (op1))
3176 return simplify_gen_binary (AND, mode, op1, trueop0);
3177 return trueop0;
3179 /* x/1 is x. */
3180 if (trueop1 == CONST1_RTX (mode))
3182 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3183 if (tem)
3184 return tem;
3186 /* Convert divide by power of two into shift. */
3187 if (CONST_INT_P (trueop1)
3188 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3189 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3190 break;
3192 case DIV:
3193 /* Handle floating point and integers separately. */
3194 if (SCALAR_FLOAT_MODE_P (mode))
3196 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3197 safe for modes with NaNs, since 0.0 / 0.0 will then be
3198 NaN rather than 0.0. Nor is it safe for modes with signed
3199 zeros, since dividing 0 by a negative number gives -0.0 */
3200 if (trueop0 == CONST0_RTX (mode)
3201 && !HONOR_NANS (mode)
3202 && !HONOR_SIGNED_ZEROS (mode)
3203 && ! side_effects_p (op1))
3204 return op0;
3205 /* x/1.0 is x. */
3206 if (trueop1 == CONST1_RTX (mode)
3207 && !HONOR_SNANS (mode))
3208 return op0;
3210 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3211 && trueop1 != CONST0_RTX (mode))
3213 REAL_VALUE_TYPE d;
3214 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
3216 /* x/-1.0 is -x. */
3217 if (REAL_VALUES_EQUAL (d, dconstm1)
3218 && !HONOR_SNANS (mode))
3219 return simplify_gen_unary (NEG, mode, op0, mode);
3221 /* Change FP division by a constant into multiplication.
3222 Only do this with -freciprocal-math. */
3223 if (flag_reciprocal_math
3224 && !REAL_VALUES_EQUAL (d, dconst0))
3226 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
3227 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3228 return simplify_gen_binary (MULT, mode, op0, tem);
3232 else if (SCALAR_INT_MODE_P (mode))
3234 /* 0/x is 0 (or x&0 if x has side-effects). */
3235 if (trueop0 == CONST0_RTX (mode)
3236 && !cfun->can_throw_non_call_exceptions)
3238 if (side_effects_p (op1))
3239 return simplify_gen_binary (AND, mode, op1, trueop0);
3240 return trueop0;
3242 /* x/1 is x. */
3243 if (trueop1 == CONST1_RTX (mode))
3245 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3246 if (tem)
3247 return tem;
3249 /* x/-1 is -x. */
3250 if (trueop1 == constm1_rtx)
3252 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3253 if (x)
3254 return simplify_gen_unary (NEG, mode, x, mode);
3257 break;
3259 case UMOD:
3260 /* 0%x is 0 (or x&0 if x has side-effects). */
3261 if (trueop0 == CONST0_RTX (mode))
3263 if (side_effects_p (op1))
3264 return simplify_gen_binary (AND, mode, op1, trueop0);
3265 return trueop0;
3267 /* x%1 is 0 (of x&0 if x has side-effects). */
3268 if (trueop1 == CONST1_RTX (mode))
3270 if (side_effects_p (op0))
3271 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3272 return CONST0_RTX (mode);
3274 /* Implement modulus by power of two as AND. */
3275 if (CONST_INT_P (trueop1)
3276 && exact_log2 (UINTVAL (trueop1)) > 0)
3277 return simplify_gen_binary (AND, mode, op0,
3278 GEN_INT (INTVAL (op1) - 1));
3279 break;
3281 case MOD:
3282 /* 0%x is 0 (or x&0 if x has side-effects). */
3283 if (trueop0 == CONST0_RTX (mode))
3285 if (side_effects_p (op1))
3286 return simplify_gen_binary (AND, mode, op1, trueop0);
3287 return trueop0;
3289 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3290 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3292 if (side_effects_p (op0))
3293 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3294 return CONST0_RTX (mode);
3296 break;
3298 case ROTATERT:
3299 case ROTATE:
3300 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3301 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3302 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3303 amount instead. */
3304 if (CONST_INT_P (trueop1)
3305 && IN_RANGE (INTVAL (trueop1),
3306 GET_MODE_BITSIZE (mode) / 2 + (code == ROTATE),
3307 GET_MODE_BITSIZE (mode) - 1))
3308 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3309 mode, op0, GEN_INT (GET_MODE_BITSIZE (mode)
3310 - INTVAL (trueop1)));
3311 /* FALLTHRU */
3312 case ASHIFTRT:
3313 if (trueop1 == CONST0_RTX (mode))
3314 return op0;
3315 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3316 return op0;
3317 /* Rotating ~0 always results in ~0. */
3318 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3319 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3320 && ! side_effects_p (op1))
3321 return op0;
3322 canonicalize_shift:
3323 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3325 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
3326 if (val != INTVAL (op1))
3327 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3329 break;
3331 case ASHIFT:
3332 case SS_ASHIFT:
3333 case US_ASHIFT:
3334 if (trueop1 == CONST0_RTX (mode))
3335 return op0;
3336 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3337 return op0;
3338 goto canonicalize_shift;
3340 case LSHIFTRT:
3341 if (trueop1 == CONST0_RTX (mode))
3342 return op0;
3343 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3344 return op0;
3345 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3346 if (GET_CODE (op0) == CLZ
3347 && CONST_INT_P (trueop1)
3348 && STORE_FLAG_VALUE == 1
3349 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3351 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
3352 unsigned HOST_WIDE_INT zero_val = 0;
3354 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3355 && zero_val == GET_MODE_PRECISION (imode)
3356 && INTVAL (trueop1) == exact_log2 (zero_val))
3357 return simplify_gen_relational (EQ, mode, imode,
3358 XEXP (op0, 0), const0_rtx);
3360 goto canonicalize_shift;
3362 case SMIN:
3363 if (width <= HOST_BITS_PER_WIDE_INT
3364 && mode_signbit_p (mode, trueop1)
3365 && ! side_effects_p (op0))
3366 return op1;
3367 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3368 return op0;
3369 tem = simplify_associative_operation (code, mode, op0, op1);
3370 if (tem)
3371 return tem;
3372 break;
3374 case SMAX:
3375 if (width <= HOST_BITS_PER_WIDE_INT
3376 && CONST_INT_P (trueop1)
3377 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3378 && ! side_effects_p (op0))
3379 return op1;
3380 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3381 return op0;
3382 tem = simplify_associative_operation (code, mode, op0, op1);
3383 if (tem)
3384 return tem;
3385 break;
3387 case UMIN:
3388 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3389 return op1;
3390 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3391 return op0;
3392 tem = simplify_associative_operation (code, mode, op0, op1);
3393 if (tem)
3394 return tem;
3395 break;
3397 case UMAX:
3398 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3399 return op1;
3400 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3401 return op0;
3402 tem = simplify_associative_operation (code, mode, op0, op1);
3403 if (tem)
3404 return tem;
3405 break;
3407 case SS_PLUS:
3408 case US_PLUS:
3409 case SS_MINUS:
3410 case US_MINUS:
3411 case SS_MULT:
3412 case US_MULT:
3413 case SS_DIV:
3414 case US_DIV:
3415 /* ??? There are simplifications that can be done. */
3416 return 0;
3418 case VEC_SELECT:
3419 if (!VECTOR_MODE_P (mode))
3421 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3422 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3423 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3424 gcc_assert (XVECLEN (trueop1, 0) == 1);
3425 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3427 if (GET_CODE (trueop0) == CONST_VECTOR)
3428 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3429 (trueop1, 0, 0)));
3431 /* Extract a scalar element from a nested VEC_SELECT expression
3432 (with optional nested VEC_CONCAT expression). Some targets
3433 (i386) extract scalar element from a vector using chain of
3434 nested VEC_SELECT expressions. When input operand is a memory
3435 operand, this operation can be simplified to a simple scalar
3436 load from an offseted memory address. */
3437 if (GET_CODE (trueop0) == VEC_SELECT)
3439 rtx op0 = XEXP (trueop0, 0);
3440 rtx op1 = XEXP (trueop0, 1);
3442 enum machine_mode opmode = GET_MODE (op0);
3443 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3444 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3446 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3447 int elem;
3449 rtvec vec;
3450 rtx tmp_op, tmp;
3452 gcc_assert (GET_CODE (op1) == PARALLEL);
3453 gcc_assert (i < n_elts);
3455 /* Select element, pointed by nested selector. */
3456 elem = INTVAL (XVECEXP (op1, 0, i));
3458 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3459 if (GET_CODE (op0) == VEC_CONCAT)
3461 rtx op00 = XEXP (op0, 0);
3462 rtx op01 = XEXP (op0, 1);
3464 enum machine_mode mode00, mode01;
3465 int n_elts00, n_elts01;
3467 mode00 = GET_MODE (op00);
3468 mode01 = GET_MODE (op01);
3470 /* Find out number of elements of each operand. */
3471 if (VECTOR_MODE_P (mode00))
3473 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3474 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3476 else
3477 n_elts00 = 1;
3479 if (VECTOR_MODE_P (mode01))
3481 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3482 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3484 else
3485 n_elts01 = 1;
3487 gcc_assert (n_elts == n_elts00 + n_elts01);
3489 /* Select correct operand of VEC_CONCAT
3490 and adjust selector. */
3491 if (elem < n_elts01)
3492 tmp_op = op00;
3493 else
3495 tmp_op = op01;
3496 elem -= n_elts00;
3499 else
3500 tmp_op = op0;
3502 vec = rtvec_alloc (1);
3503 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3505 tmp = gen_rtx_fmt_ee (code, mode,
3506 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3507 return tmp;
3509 if (GET_CODE (trueop0) == VEC_DUPLICATE
3510 && GET_MODE (XEXP (trueop0, 0)) == mode)
3511 return XEXP (trueop0, 0);
3513 else
3515 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3516 gcc_assert (GET_MODE_INNER (mode)
3517 == GET_MODE_INNER (GET_MODE (trueop0)));
3518 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3520 if (GET_CODE (trueop0) == CONST_VECTOR)
3522 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3523 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3524 rtvec v = rtvec_alloc (n_elts);
3525 unsigned int i;
3527 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3528 for (i = 0; i < n_elts; i++)
3530 rtx x = XVECEXP (trueop1, 0, i);
3532 gcc_assert (CONST_INT_P (x));
3533 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3534 INTVAL (x));
3537 return gen_rtx_CONST_VECTOR (mode, v);
3540 /* Recognize the identity. */
3541 if (GET_MODE (trueop0) == mode)
3543 bool maybe_ident = true;
3544 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3546 rtx j = XVECEXP (trueop1, 0, i);
3547 if (!CONST_INT_P (j) || INTVAL (j) != i)
3549 maybe_ident = false;
3550 break;
3553 if (maybe_ident)
3554 return trueop0;
3557 /* If we build {a,b} then permute it, build the result directly. */
3558 if (XVECLEN (trueop1, 0) == 2
3559 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3560 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3561 && GET_CODE (trueop0) == VEC_CONCAT
3562 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3563 && GET_MODE (XEXP (trueop0, 0)) == mode
3564 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3565 && GET_MODE (XEXP (trueop0, 1)) == mode)
3567 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3568 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3569 rtx subop0, subop1;
3571 gcc_assert (i0 < 4 && i1 < 4);
3572 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3573 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3575 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3578 if (XVECLEN (trueop1, 0) == 2
3579 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3580 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3581 && GET_CODE (trueop0) == VEC_CONCAT
3582 && GET_MODE (trueop0) == mode)
3584 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3585 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3586 rtx subop0, subop1;
3588 gcc_assert (i0 < 2 && i1 < 2);
3589 subop0 = XEXP (trueop0, i0);
3590 subop1 = XEXP (trueop0, i1);
3592 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3596 if (XVECLEN (trueop1, 0) == 1
3597 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3598 && GET_CODE (trueop0) == VEC_CONCAT)
3600 rtx vec = trueop0;
3601 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3603 /* Try to find the element in the VEC_CONCAT. */
3604 while (GET_MODE (vec) != mode
3605 && GET_CODE (vec) == VEC_CONCAT)
3607 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3608 if (offset < vec_size)
3609 vec = XEXP (vec, 0);
3610 else
3612 offset -= vec_size;
3613 vec = XEXP (vec, 1);
3615 vec = avoid_constant_pool_reference (vec);
3618 if (GET_MODE (vec) == mode)
3619 return vec;
3622 /* If we select elements in a vec_merge that all come from the same
3623 operand, select from that operand directly. */
3624 if (GET_CODE (op0) == VEC_MERGE)
3626 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3627 if (CONST_INT_P (trueop02))
3629 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3630 bool all_operand0 = true;
3631 bool all_operand1 = true;
3632 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3634 rtx j = XVECEXP (trueop1, 0, i);
3635 if (sel & (1 << UINTVAL (j)))
3636 all_operand1 = false;
3637 else
3638 all_operand0 = false;
3640 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3641 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3642 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3643 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3647 return 0;
3648 case VEC_CONCAT:
3650 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3651 ? GET_MODE (trueop0)
3652 : GET_MODE_INNER (mode));
3653 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3654 ? GET_MODE (trueop1)
3655 : GET_MODE_INNER (mode));
3657 gcc_assert (VECTOR_MODE_P (mode));
3658 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3659 == GET_MODE_SIZE (mode));
3661 if (VECTOR_MODE_P (op0_mode))
3662 gcc_assert (GET_MODE_INNER (mode)
3663 == GET_MODE_INNER (op0_mode));
3664 else
3665 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3667 if (VECTOR_MODE_P (op1_mode))
3668 gcc_assert (GET_MODE_INNER (mode)
3669 == GET_MODE_INNER (op1_mode));
3670 else
3671 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3673 if ((GET_CODE (trueop0) == CONST_VECTOR
3674 || CONST_SCALAR_INT_P (trueop0)
3675 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3676 && (GET_CODE (trueop1) == CONST_VECTOR
3677 || CONST_SCALAR_INT_P (trueop1)
3678 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3680 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3681 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3682 rtvec v = rtvec_alloc (n_elts);
3683 unsigned int i;
3684 unsigned in_n_elts = 1;
3686 if (VECTOR_MODE_P (op0_mode))
3687 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3688 for (i = 0; i < n_elts; i++)
3690 if (i < in_n_elts)
3692 if (!VECTOR_MODE_P (op0_mode))
3693 RTVEC_ELT (v, i) = trueop0;
3694 else
3695 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3697 else
3699 if (!VECTOR_MODE_P (op1_mode))
3700 RTVEC_ELT (v, i) = trueop1;
3701 else
3702 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3703 i - in_n_elts);
3707 return gen_rtx_CONST_VECTOR (mode, v);
3710 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3711 Restrict the transformation to avoid generating a VEC_SELECT with a
3712 mode unrelated to its operand. */
3713 if (GET_CODE (trueop0) == VEC_SELECT
3714 && GET_CODE (trueop1) == VEC_SELECT
3715 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3716 && GET_MODE (XEXP (trueop0, 0)) == mode)
3718 rtx par0 = XEXP (trueop0, 1);
3719 rtx par1 = XEXP (trueop1, 1);
3720 int len0 = XVECLEN (par0, 0);
3721 int len1 = XVECLEN (par1, 0);
3722 rtvec vec = rtvec_alloc (len0 + len1);
3723 for (int i = 0; i < len0; i++)
3724 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3725 for (int i = 0; i < len1; i++)
3726 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3727 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3728 gen_rtx_PARALLEL (VOIDmode, vec));
3731 return 0;
3733 default:
3734 gcc_unreachable ();
3737 return 0;
3741 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3742 rtx op0, rtx op1)
3744 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3745 HOST_WIDE_INT val;
3746 unsigned int width = GET_MODE_PRECISION (mode);
3748 if (VECTOR_MODE_P (mode)
3749 && code != VEC_CONCAT
3750 && GET_CODE (op0) == CONST_VECTOR
3751 && GET_CODE (op1) == CONST_VECTOR)
3753 unsigned n_elts = GET_MODE_NUNITS (mode);
3754 enum machine_mode op0mode = GET_MODE (op0);
3755 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3756 enum machine_mode op1mode = GET_MODE (op1);
3757 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3758 rtvec v = rtvec_alloc (n_elts);
3759 unsigned int i;
3761 gcc_assert (op0_n_elts == n_elts);
3762 gcc_assert (op1_n_elts == n_elts);
3763 for (i = 0; i < n_elts; i++)
3765 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3766 CONST_VECTOR_ELT (op0, i),
3767 CONST_VECTOR_ELT (op1, i));
3768 if (!x)
3769 return 0;
3770 RTVEC_ELT (v, i) = x;
3773 return gen_rtx_CONST_VECTOR (mode, v);
3776 if (VECTOR_MODE_P (mode)
3777 && code == VEC_CONCAT
3778 && (CONST_SCALAR_INT_P (op0)
3779 || GET_CODE (op0) == CONST_FIXED
3780 || CONST_DOUBLE_AS_FLOAT_P (op0))
3781 && (CONST_SCALAR_INT_P (op1)
3782 || CONST_DOUBLE_AS_FLOAT_P (op1)
3783 || GET_CODE (op1) == CONST_FIXED))
3785 unsigned n_elts = GET_MODE_NUNITS (mode);
3786 rtvec v = rtvec_alloc (n_elts);
3788 gcc_assert (n_elts >= 2);
3789 if (n_elts == 2)
3791 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3792 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3794 RTVEC_ELT (v, 0) = op0;
3795 RTVEC_ELT (v, 1) = op1;
3797 else
3799 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3800 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3801 unsigned i;
3803 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3804 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3805 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3807 for (i = 0; i < op0_n_elts; ++i)
3808 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3809 for (i = 0; i < op1_n_elts; ++i)
3810 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3813 return gen_rtx_CONST_VECTOR (mode, v);
3816 if (SCALAR_FLOAT_MODE_P (mode)
3817 && CONST_DOUBLE_AS_FLOAT_P (op0)
3818 && CONST_DOUBLE_AS_FLOAT_P (op1)
3819 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3821 if (code == AND
3822 || code == IOR
3823 || code == XOR)
3825 long tmp0[4];
3826 long tmp1[4];
3827 REAL_VALUE_TYPE r;
3828 int i;
3830 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3831 GET_MODE (op0));
3832 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3833 GET_MODE (op1));
3834 for (i = 0; i < 4; i++)
3836 switch (code)
3838 case AND:
3839 tmp0[i] &= tmp1[i];
3840 break;
3841 case IOR:
3842 tmp0[i] |= tmp1[i];
3843 break;
3844 case XOR:
3845 tmp0[i] ^= tmp1[i];
3846 break;
3847 default:
3848 gcc_unreachable ();
3851 real_from_target (&r, tmp0, mode);
3852 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3854 else
3856 REAL_VALUE_TYPE f0, f1, value, result;
3857 bool inexact;
3859 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3860 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3861 real_convert (&f0, mode, &f0);
3862 real_convert (&f1, mode, &f1);
3864 if (HONOR_SNANS (mode)
3865 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3866 return 0;
3868 if (code == DIV
3869 && REAL_VALUES_EQUAL (f1, dconst0)
3870 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3871 return 0;
3873 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3874 && flag_trapping_math
3875 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3877 int s0 = REAL_VALUE_NEGATIVE (f0);
3878 int s1 = REAL_VALUE_NEGATIVE (f1);
3880 switch (code)
3882 case PLUS:
3883 /* Inf + -Inf = NaN plus exception. */
3884 if (s0 != s1)
3885 return 0;
3886 break;
3887 case MINUS:
3888 /* Inf - Inf = NaN plus exception. */
3889 if (s0 == s1)
3890 return 0;
3891 break;
3892 case DIV:
3893 /* Inf / Inf = NaN plus exception. */
3894 return 0;
3895 default:
3896 break;
3900 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3901 && flag_trapping_math
3902 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3903 || (REAL_VALUE_ISINF (f1)
3904 && REAL_VALUES_EQUAL (f0, dconst0))))
3905 /* Inf * 0 = NaN plus exception. */
3906 return 0;
3908 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3909 &f0, &f1);
3910 real_convert (&result, mode, &value);
3912 /* Don't constant fold this floating point operation if
3913 the result has overflowed and flag_trapping_math. */
3915 if (flag_trapping_math
3916 && MODE_HAS_INFINITIES (mode)
3917 && REAL_VALUE_ISINF (result)
3918 && !REAL_VALUE_ISINF (f0)
3919 && !REAL_VALUE_ISINF (f1))
3920 /* Overflow plus exception. */
3921 return 0;
3923 /* Don't constant fold this floating point operation if the
3924 result may dependent upon the run-time rounding mode and
3925 flag_rounding_math is set, or if GCC's software emulation
3926 is unable to accurately represent the result. */
3928 if ((flag_rounding_math
3929 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3930 && (inexact || !real_identical (&result, &value)))
3931 return NULL_RTX;
3933 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3937 /* We can fold some multi-word operations. */
3938 if (GET_MODE_CLASS (mode) == MODE_INT
3939 && width == HOST_BITS_PER_DOUBLE_INT
3940 && (CONST_DOUBLE_AS_INT_P (op0) || CONST_INT_P (op0))
3941 && (CONST_DOUBLE_AS_INT_P (op1) || CONST_INT_P (op1)))
3943 double_int o0, o1, res, tmp;
3944 bool overflow;
3946 o0 = rtx_to_double_int (op0);
3947 o1 = rtx_to_double_int (op1);
3949 switch (code)
3951 case MINUS:
3952 /* A - B == A + (-B). */
3953 o1 = -o1;
3955 /* Fall through.... */
3957 case PLUS:
3958 res = o0 + o1;
3959 break;
3961 case MULT:
3962 res = o0 * o1;
3963 break;
3965 case DIV:
3966 res = o0.divmod_with_overflow (o1, false, TRUNC_DIV_EXPR,
3967 &tmp, &overflow);
3968 if (overflow)
3969 return 0;
3970 break;
3972 case MOD:
3973 tmp = o0.divmod_with_overflow (o1, false, TRUNC_DIV_EXPR,
3974 &res, &overflow);
3975 if (overflow)
3976 return 0;
3977 break;
3979 case UDIV:
3980 res = o0.divmod_with_overflow (o1, true, TRUNC_DIV_EXPR,
3981 &tmp, &overflow);
3982 if (overflow)
3983 return 0;
3984 break;
3986 case UMOD:
3987 tmp = o0.divmod_with_overflow (o1, true, TRUNC_DIV_EXPR,
3988 &res, &overflow);
3989 if (overflow)
3990 return 0;
3991 break;
3993 case AND:
3994 res = o0 & o1;
3995 break;
3997 case IOR:
3998 res = o0 | o1;
3999 break;
4001 case XOR:
4002 res = o0 ^ o1;
4003 break;
4005 case SMIN:
4006 res = o0.smin (o1);
4007 break;
4009 case SMAX:
4010 res = o0.smax (o1);
4011 break;
4013 case UMIN:
4014 res = o0.umin (o1);
4015 break;
4017 case UMAX:
4018 res = o0.umax (o1);
4019 break;
4021 case LSHIFTRT: case ASHIFTRT:
4022 case ASHIFT:
4023 case ROTATE: case ROTATERT:
4025 unsigned HOST_WIDE_INT cnt;
4027 if (SHIFT_COUNT_TRUNCATED)
4029 o1.high = 0;
4030 o1.low &= GET_MODE_PRECISION (mode) - 1;
4033 if (!o1.fits_uhwi ()
4034 || o1.to_uhwi () >= GET_MODE_PRECISION (mode))
4035 return 0;
4037 cnt = o1.to_uhwi ();
4038 unsigned short prec = GET_MODE_PRECISION (mode);
4040 if (code == LSHIFTRT || code == ASHIFTRT)
4041 res = o0.rshift (cnt, prec, code == ASHIFTRT);
4042 else if (code == ASHIFT)
4043 res = o0.alshift (cnt, prec);
4044 else if (code == ROTATE)
4045 res = o0.lrotate (cnt, prec);
4046 else /* code == ROTATERT */
4047 res = o0.rrotate (cnt, prec);
4049 break;
4051 default:
4052 return 0;
4055 return immed_double_int_const (res, mode);
4058 if (CONST_INT_P (op0) && CONST_INT_P (op1)
4059 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
4061 /* Get the integer argument values in two forms:
4062 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
4064 arg0 = INTVAL (op0);
4065 arg1 = INTVAL (op1);
4067 if (width < HOST_BITS_PER_WIDE_INT)
4069 arg0 &= GET_MODE_MASK (mode);
4070 arg1 &= GET_MODE_MASK (mode);
4072 arg0s = arg0;
4073 if (val_signbit_known_set_p (mode, arg0s))
4074 arg0s |= ~GET_MODE_MASK (mode);
4076 arg1s = arg1;
4077 if (val_signbit_known_set_p (mode, arg1s))
4078 arg1s |= ~GET_MODE_MASK (mode);
4080 else
4082 arg0s = arg0;
4083 arg1s = arg1;
4086 /* Compute the value of the arithmetic. */
4088 switch (code)
4090 case PLUS:
4091 val = arg0s + arg1s;
4092 break;
4094 case MINUS:
4095 val = arg0s - arg1s;
4096 break;
4098 case MULT:
4099 val = arg0s * arg1s;
4100 break;
4102 case DIV:
4103 if (arg1s == 0
4104 || ((unsigned HOST_WIDE_INT) arg0s
4105 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4106 && arg1s == -1))
4107 return 0;
4108 val = arg0s / arg1s;
4109 break;
4111 case MOD:
4112 if (arg1s == 0
4113 || ((unsigned HOST_WIDE_INT) arg0s
4114 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4115 && arg1s == -1))
4116 return 0;
4117 val = arg0s % arg1s;
4118 break;
4120 case UDIV:
4121 if (arg1 == 0
4122 || ((unsigned HOST_WIDE_INT) arg0s
4123 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4124 && arg1s == -1))
4125 return 0;
4126 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
4127 break;
4129 case UMOD:
4130 if (arg1 == 0
4131 || ((unsigned HOST_WIDE_INT) arg0s
4132 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4133 && arg1s == -1))
4134 return 0;
4135 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
4136 break;
4138 case AND:
4139 val = arg0 & arg1;
4140 break;
4142 case IOR:
4143 val = arg0 | arg1;
4144 break;
4146 case XOR:
4147 val = arg0 ^ arg1;
4148 break;
4150 case LSHIFTRT:
4151 case ASHIFT:
4152 case ASHIFTRT:
4153 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
4154 the value is in range. We can't return any old value for
4155 out-of-range arguments because either the middle-end (via
4156 shift_truncation_mask) or the back-end might be relying on
4157 target-specific knowledge. Nor can we rely on
4158 shift_truncation_mask, since the shift might not be part of an
4159 ashlM3, lshrM3 or ashrM3 instruction. */
4160 if (SHIFT_COUNT_TRUNCATED)
4161 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
4162 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
4163 return 0;
4165 val = (code == ASHIFT
4166 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
4167 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
4169 /* Sign-extend the result for arithmetic right shifts. */
4170 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
4171 val |= ((unsigned HOST_WIDE_INT) (-1)) << (width - arg1);
4172 break;
4174 case ROTATERT:
4175 if (arg1 < 0)
4176 return 0;
4178 arg1 %= width;
4179 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
4180 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
4181 break;
4183 case ROTATE:
4184 if (arg1 < 0)
4185 return 0;
4187 arg1 %= width;
4188 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
4189 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
4190 break;
4192 case COMPARE:
4193 /* Do nothing here. */
4194 return 0;
4196 case SMIN:
4197 val = arg0s <= arg1s ? arg0s : arg1s;
4198 break;
4200 case UMIN:
4201 val = ((unsigned HOST_WIDE_INT) arg0
4202 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4203 break;
4205 case SMAX:
4206 val = arg0s > arg1s ? arg0s : arg1s;
4207 break;
4209 case UMAX:
4210 val = ((unsigned HOST_WIDE_INT) arg0
4211 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4212 break;
4214 case SS_PLUS:
4215 case US_PLUS:
4216 case SS_MINUS:
4217 case US_MINUS:
4218 case SS_MULT:
4219 case US_MULT:
4220 case SS_DIV:
4221 case US_DIV:
4222 case SS_ASHIFT:
4223 case US_ASHIFT:
4224 /* ??? There are simplifications that can be done. */
4225 return 0;
4227 default:
4228 gcc_unreachable ();
4231 return gen_int_mode (val, mode);
4234 return NULL_RTX;
4239 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
4240 PLUS or MINUS.
4242 Rather than test for specific case, we do this by a brute-force method
4243 and do all possible simplifications until no more changes occur. Then
4244 we rebuild the operation. */
4246 struct simplify_plus_minus_op_data
4248 rtx op;
4249 short neg;
4252 static bool
4253 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4255 int result;
4257 result = (commutative_operand_precedence (y)
4258 - commutative_operand_precedence (x));
4259 if (result)
4260 return result > 0;
4262 /* Group together equal REGs to do more simplification. */
4263 if (REG_P (x) && REG_P (y))
4264 return REGNO (x) > REGNO (y);
4265 else
4266 return false;
4269 static rtx
4270 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
4271 rtx op1)
4273 struct simplify_plus_minus_op_data ops[8];
4274 rtx result, tem;
4275 int n_ops = 2, input_ops = 2;
4276 int changed, n_constants = 0, canonicalized = 0;
4277 int i, j;
4279 memset (ops, 0, sizeof ops);
4281 /* Set up the two operands and then expand them until nothing has been
4282 changed. If we run out of room in our array, give up; this should
4283 almost never happen. */
4285 ops[0].op = op0;
4286 ops[0].neg = 0;
4287 ops[1].op = op1;
4288 ops[1].neg = (code == MINUS);
4292 changed = 0;
4294 for (i = 0; i < n_ops; i++)
4296 rtx this_op = ops[i].op;
4297 int this_neg = ops[i].neg;
4298 enum rtx_code this_code = GET_CODE (this_op);
4300 switch (this_code)
4302 case PLUS:
4303 case MINUS:
4304 if (n_ops == 7)
4305 return NULL_RTX;
4307 ops[n_ops].op = XEXP (this_op, 1);
4308 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4309 n_ops++;
4311 ops[i].op = XEXP (this_op, 0);
4312 input_ops++;
4313 changed = 1;
4314 canonicalized |= this_neg;
4315 break;
4317 case NEG:
4318 ops[i].op = XEXP (this_op, 0);
4319 ops[i].neg = ! this_neg;
4320 changed = 1;
4321 canonicalized = 1;
4322 break;
4324 case CONST:
4325 if (n_ops < 7
4326 && GET_CODE (XEXP (this_op, 0)) == PLUS
4327 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4328 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4330 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4331 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4332 ops[n_ops].neg = this_neg;
4333 n_ops++;
4334 changed = 1;
4335 canonicalized = 1;
4337 break;
4339 case NOT:
4340 /* ~a -> (-a - 1) */
4341 if (n_ops != 7)
4343 ops[n_ops].op = CONSTM1_RTX (mode);
4344 ops[n_ops++].neg = this_neg;
4345 ops[i].op = XEXP (this_op, 0);
4346 ops[i].neg = !this_neg;
4347 changed = 1;
4348 canonicalized = 1;
4350 break;
4352 case CONST_INT:
4353 n_constants++;
4354 if (this_neg)
4356 ops[i].op = neg_const_int (mode, this_op);
4357 ops[i].neg = 0;
4358 changed = 1;
4359 canonicalized = 1;
4361 break;
4363 default:
4364 break;
4368 while (changed);
4370 if (n_constants > 1)
4371 canonicalized = 1;
4373 gcc_assert (n_ops >= 2);
4375 /* If we only have two operands, we can avoid the loops. */
4376 if (n_ops == 2)
4378 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4379 rtx lhs, rhs;
4381 /* Get the two operands. Be careful with the order, especially for
4382 the cases where code == MINUS. */
4383 if (ops[0].neg && ops[1].neg)
4385 lhs = gen_rtx_NEG (mode, ops[0].op);
4386 rhs = ops[1].op;
4388 else if (ops[0].neg)
4390 lhs = ops[1].op;
4391 rhs = ops[0].op;
4393 else
4395 lhs = ops[0].op;
4396 rhs = ops[1].op;
4399 return simplify_const_binary_operation (code, mode, lhs, rhs);
4402 /* Now simplify each pair of operands until nothing changes. */
4405 /* Insertion sort is good enough for an eight-element array. */
4406 for (i = 1; i < n_ops; i++)
4408 struct simplify_plus_minus_op_data save;
4409 j = i - 1;
4410 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
4411 continue;
4413 canonicalized = 1;
4414 save = ops[i];
4416 ops[j + 1] = ops[j];
4417 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
4418 ops[j + 1] = save;
4421 changed = 0;
4422 for (i = n_ops - 1; i > 0; i--)
4423 for (j = i - 1; j >= 0; j--)
4425 rtx lhs = ops[j].op, rhs = ops[i].op;
4426 int lneg = ops[j].neg, rneg = ops[i].neg;
4428 if (lhs != 0 && rhs != 0)
4430 enum rtx_code ncode = PLUS;
4432 if (lneg != rneg)
4434 ncode = MINUS;
4435 if (lneg)
4436 tem = lhs, lhs = rhs, rhs = tem;
4438 else if (swap_commutative_operands_p (lhs, rhs))
4439 tem = lhs, lhs = rhs, rhs = tem;
4441 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4442 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4444 rtx tem_lhs, tem_rhs;
4446 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4447 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4448 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
4450 if (tem && !CONSTANT_P (tem))
4451 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4453 else
4454 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4456 /* Reject "simplifications" that just wrap the two
4457 arguments in a CONST. Failure to do so can result
4458 in infinite recursion with simplify_binary_operation
4459 when it calls us to simplify CONST operations. */
4460 if (tem
4461 && ! (GET_CODE (tem) == CONST
4462 && GET_CODE (XEXP (tem, 0)) == ncode
4463 && XEXP (XEXP (tem, 0), 0) == lhs
4464 && XEXP (XEXP (tem, 0), 1) == rhs))
4466 lneg &= rneg;
4467 if (GET_CODE (tem) == NEG)
4468 tem = XEXP (tem, 0), lneg = !lneg;
4469 if (CONST_INT_P (tem) && lneg)
4470 tem = neg_const_int (mode, tem), lneg = 0;
4472 ops[i].op = tem;
4473 ops[i].neg = lneg;
4474 ops[j].op = NULL_RTX;
4475 changed = 1;
4476 canonicalized = 1;
4481 /* If nothing changed, fail. */
4482 if (!canonicalized)
4483 return NULL_RTX;
4485 /* Pack all the operands to the lower-numbered entries. */
4486 for (i = 0, j = 0; j < n_ops; j++)
4487 if (ops[j].op)
4489 ops[i] = ops[j];
4490 i++;
4492 n_ops = i;
4494 while (changed);
4496 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4497 if (n_ops == 2
4498 && CONST_INT_P (ops[1].op)
4499 && CONSTANT_P (ops[0].op)
4500 && ops[0].neg)
4501 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4503 /* We suppressed creation of trivial CONST expressions in the
4504 combination loop to avoid recursion. Create one manually now.
4505 The combination loop should have ensured that there is exactly
4506 one CONST_INT, and the sort will have ensured that it is last
4507 in the array and that any other constant will be next-to-last. */
4509 if (n_ops > 1
4510 && CONST_INT_P (ops[n_ops - 1].op)
4511 && CONSTANT_P (ops[n_ops - 2].op))
4513 rtx value = ops[n_ops - 1].op;
4514 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4515 value = neg_const_int (mode, value);
4516 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4517 INTVAL (value));
4518 n_ops--;
4521 /* Put a non-negated operand first, if possible. */
4523 for (i = 0; i < n_ops && ops[i].neg; i++)
4524 continue;
4525 if (i == n_ops)
4526 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4527 else if (i != 0)
4529 tem = ops[0].op;
4530 ops[0] = ops[i];
4531 ops[i].op = tem;
4532 ops[i].neg = 1;
4535 /* Now make the result by performing the requested operations. */
4536 result = ops[0].op;
4537 for (i = 1; i < n_ops; i++)
4538 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4539 mode, result, ops[i].op);
4541 return result;
4544 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4545 static bool
4546 plus_minus_operand_p (const_rtx x)
4548 return GET_CODE (x) == PLUS
4549 || GET_CODE (x) == MINUS
4550 || (GET_CODE (x) == CONST
4551 && GET_CODE (XEXP (x, 0)) == PLUS
4552 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4553 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4556 /* Like simplify_binary_operation except used for relational operators.
4557 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4558 not also be VOIDmode.
4560 CMP_MODE specifies in which mode the comparison is done in, so it is
4561 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4562 the operands or, if both are VOIDmode, the operands are compared in
4563 "infinite precision". */
4565 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
4566 enum machine_mode cmp_mode, rtx op0, rtx op1)
4568 rtx tem, trueop0, trueop1;
4570 if (cmp_mode == VOIDmode)
4571 cmp_mode = GET_MODE (op0);
4572 if (cmp_mode == VOIDmode)
4573 cmp_mode = GET_MODE (op1);
4575 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4576 if (tem)
4578 if (SCALAR_FLOAT_MODE_P (mode))
4580 if (tem == const0_rtx)
4581 return CONST0_RTX (mode);
4582 #ifdef FLOAT_STORE_FLAG_VALUE
4584 REAL_VALUE_TYPE val;
4585 val = FLOAT_STORE_FLAG_VALUE (mode);
4586 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4588 #else
4589 return NULL_RTX;
4590 #endif
4592 if (VECTOR_MODE_P (mode))
4594 if (tem == const0_rtx)
4595 return CONST0_RTX (mode);
4596 #ifdef VECTOR_STORE_FLAG_VALUE
4598 int i, units;
4599 rtvec v;
4601 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4602 if (val == NULL_RTX)
4603 return NULL_RTX;
4604 if (val == const1_rtx)
4605 return CONST1_RTX (mode);
4607 units = GET_MODE_NUNITS (mode);
4608 v = rtvec_alloc (units);
4609 for (i = 0; i < units; i++)
4610 RTVEC_ELT (v, i) = val;
4611 return gen_rtx_raw_CONST_VECTOR (mode, v);
4613 #else
4614 return NULL_RTX;
4615 #endif
4618 return tem;
4621 /* For the following tests, ensure const0_rtx is op1. */
4622 if (swap_commutative_operands_p (op0, op1)
4623 || (op0 == const0_rtx && op1 != const0_rtx))
4624 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4626 /* If op0 is a compare, extract the comparison arguments from it. */
4627 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4628 return simplify_gen_relational (code, mode, VOIDmode,
4629 XEXP (op0, 0), XEXP (op0, 1));
4631 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4632 || CC0_P (op0))
4633 return NULL_RTX;
4635 trueop0 = avoid_constant_pool_reference (op0);
4636 trueop1 = avoid_constant_pool_reference (op1);
4637 return simplify_relational_operation_1 (code, mode, cmp_mode,
4638 trueop0, trueop1);
4641 /* This part of simplify_relational_operation is only used when CMP_MODE
4642 is not in class MODE_CC (i.e. it is a real comparison).
4644 MODE is the mode of the result, while CMP_MODE specifies in which
4645 mode the comparison is done in, so it is the mode of the operands. */
4647 static rtx
4648 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
4649 enum machine_mode cmp_mode, rtx op0, rtx op1)
4651 enum rtx_code op0code = GET_CODE (op0);
4653 if (op1 == const0_rtx && COMPARISON_P (op0))
4655 /* If op0 is a comparison, extract the comparison arguments
4656 from it. */
4657 if (code == NE)
4659 if (GET_MODE (op0) == mode)
4660 return simplify_rtx (op0);
4661 else
4662 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4663 XEXP (op0, 0), XEXP (op0, 1));
4665 else if (code == EQ)
4667 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4668 if (new_code != UNKNOWN)
4669 return simplify_gen_relational (new_code, mode, VOIDmode,
4670 XEXP (op0, 0), XEXP (op0, 1));
4674 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4675 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4676 if ((code == LTU || code == GEU)
4677 && GET_CODE (op0) == PLUS
4678 && CONST_INT_P (XEXP (op0, 1))
4679 && (rtx_equal_p (op1, XEXP (op0, 0))
4680 || rtx_equal_p (op1, XEXP (op0, 1)))
4681 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4682 && XEXP (op0, 1) != const0_rtx)
4684 rtx new_cmp
4685 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4686 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4687 cmp_mode, XEXP (op0, 0), new_cmp);
4690 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4691 if ((code == LTU || code == GEU)
4692 && GET_CODE (op0) == PLUS
4693 && rtx_equal_p (op1, XEXP (op0, 1))
4694 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4695 && !rtx_equal_p (op1, XEXP (op0, 0)))
4696 return simplify_gen_relational (code, mode, cmp_mode, op0,
4697 copy_rtx (XEXP (op0, 0)));
4699 if (op1 == const0_rtx)
4701 /* Canonicalize (GTU x 0) as (NE x 0). */
4702 if (code == GTU)
4703 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4704 /* Canonicalize (LEU x 0) as (EQ x 0). */
4705 if (code == LEU)
4706 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4708 else if (op1 == const1_rtx)
4710 switch (code)
4712 case GE:
4713 /* Canonicalize (GE x 1) as (GT x 0). */
4714 return simplify_gen_relational (GT, mode, cmp_mode,
4715 op0, const0_rtx);
4716 case GEU:
4717 /* Canonicalize (GEU x 1) as (NE x 0). */
4718 return simplify_gen_relational (NE, mode, cmp_mode,
4719 op0, const0_rtx);
4720 case LT:
4721 /* Canonicalize (LT x 1) as (LE x 0). */
4722 return simplify_gen_relational (LE, mode, cmp_mode,
4723 op0, const0_rtx);
4724 case LTU:
4725 /* Canonicalize (LTU x 1) as (EQ x 0). */
4726 return simplify_gen_relational (EQ, mode, cmp_mode,
4727 op0, const0_rtx);
4728 default:
4729 break;
4732 else if (op1 == constm1_rtx)
4734 /* Canonicalize (LE x -1) as (LT x 0). */
4735 if (code == LE)
4736 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4737 /* Canonicalize (GT x -1) as (GE x 0). */
4738 if (code == GT)
4739 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4742 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4743 if ((code == EQ || code == NE)
4744 && (op0code == PLUS || op0code == MINUS)
4745 && CONSTANT_P (op1)
4746 && CONSTANT_P (XEXP (op0, 1))
4747 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4749 rtx x = XEXP (op0, 0);
4750 rtx c = XEXP (op0, 1);
4751 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4752 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4754 /* Detect an infinite recursive condition, where we oscillate at this
4755 simplification case between:
4756 A + B == C <---> C - B == A,
4757 where A, B, and C are all constants with non-simplifiable expressions,
4758 usually SYMBOL_REFs. */
4759 if (GET_CODE (tem) == invcode
4760 && CONSTANT_P (x)
4761 && rtx_equal_p (c, XEXP (tem, 1)))
4762 return NULL_RTX;
4764 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4767 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4768 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4769 if (code == NE
4770 && op1 == const0_rtx
4771 && GET_MODE_CLASS (mode) == MODE_INT
4772 && cmp_mode != VOIDmode
4773 /* ??? Work-around BImode bugs in the ia64 backend. */
4774 && mode != BImode
4775 && cmp_mode != BImode
4776 && nonzero_bits (op0, cmp_mode) == 1
4777 && STORE_FLAG_VALUE == 1)
4778 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4779 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4780 : lowpart_subreg (mode, op0, cmp_mode);
4782 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4783 if ((code == EQ || code == NE)
4784 && op1 == const0_rtx
4785 && op0code == XOR)
4786 return simplify_gen_relational (code, mode, cmp_mode,
4787 XEXP (op0, 0), XEXP (op0, 1));
4789 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4790 if ((code == EQ || code == NE)
4791 && op0code == XOR
4792 && rtx_equal_p (XEXP (op0, 0), op1)
4793 && !side_effects_p (XEXP (op0, 0)))
4794 return simplify_gen_relational (code, mode, cmp_mode,
4795 XEXP (op0, 1), const0_rtx);
4797 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4798 if ((code == EQ || code == NE)
4799 && op0code == XOR
4800 && rtx_equal_p (XEXP (op0, 1), op1)
4801 && !side_effects_p (XEXP (op0, 1)))
4802 return simplify_gen_relational (code, mode, cmp_mode,
4803 XEXP (op0, 0), const0_rtx);
4805 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4806 if ((code == EQ || code == NE)
4807 && op0code == XOR
4808 && CONST_SCALAR_INT_P (op1)
4809 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4810 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4811 simplify_gen_binary (XOR, cmp_mode,
4812 XEXP (op0, 1), op1));
4814 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4815 if ((code == EQ || code == NE)
4816 && GET_CODE (op0) == BSWAP
4817 && (CONST_INT_P (op1) || CONST_DOUBLE_AS_INT_P (op1)))
4818 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4819 simplify_gen_unary (BSWAP, cmp_mode,
4820 op1, cmp_mode));
4822 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4823 if ((code == EQ || code == NE)
4824 && GET_CODE (op0) == BSWAP
4825 && GET_CODE (op1) == BSWAP)
4826 return simplify_gen_relational (code, mode, cmp_mode,
4827 XEXP (op0, 0), XEXP (op1, 0));
4829 if (op0code == POPCOUNT && op1 == const0_rtx)
4830 switch (code)
4832 case EQ:
4833 case LE:
4834 case LEU:
4835 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4836 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4837 XEXP (op0, 0), const0_rtx);
4839 case NE:
4840 case GT:
4841 case GTU:
4842 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4843 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4844 XEXP (op0, 0), const0_rtx);
4846 default:
4847 break;
4850 return NULL_RTX;
4853 enum
4855 CMP_EQ = 1,
4856 CMP_LT = 2,
4857 CMP_GT = 4,
4858 CMP_LTU = 8,
4859 CMP_GTU = 16
4863 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4864 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4865 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4866 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4867 For floating-point comparisons, assume that the operands were ordered. */
4869 static rtx
4870 comparison_result (enum rtx_code code, int known_results)
4872 switch (code)
4874 case EQ:
4875 case UNEQ:
4876 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4877 case NE:
4878 case LTGT:
4879 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4881 case LT:
4882 case UNLT:
4883 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4884 case GE:
4885 case UNGE:
4886 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4888 case GT:
4889 case UNGT:
4890 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4891 case LE:
4892 case UNLE:
4893 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4895 case LTU:
4896 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4897 case GEU:
4898 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4900 case GTU:
4901 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4902 case LEU:
4903 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4905 case ORDERED:
4906 return const_true_rtx;
4907 case UNORDERED:
4908 return const0_rtx;
4909 default:
4910 gcc_unreachable ();
4914 /* Check if the given comparison (done in the given MODE) is actually a
4915 tautology or a contradiction.
4916 If no simplification is possible, this function returns zero.
4917 Otherwise, it returns either const_true_rtx or const0_rtx. */
4920 simplify_const_relational_operation (enum rtx_code code,
4921 enum machine_mode mode,
4922 rtx op0, rtx op1)
4924 rtx tem;
4925 rtx trueop0;
4926 rtx trueop1;
4928 gcc_assert (mode != VOIDmode
4929 || (GET_MODE (op0) == VOIDmode
4930 && GET_MODE (op1) == VOIDmode));
4932 /* If op0 is a compare, extract the comparison arguments from it. */
4933 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4935 op1 = XEXP (op0, 1);
4936 op0 = XEXP (op0, 0);
4938 if (GET_MODE (op0) != VOIDmode)
4939 mode = GET_MODE (op0);
4940 else if (GET_MODE (op1) != VOIDmode)
4941 mode = GET_MODE (op1);
4942 else
4943 return 0;
4946 /* We can't simplify MODE_CC values since we don't know what the
4947 actual comparison is. */
4948 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4949 return 0;
4951 /* Make sure the constant is second. */
4952 if (swap_commutative_operands_p (op0, op1))
4954 tem = op0, op0 = op1, op1 = tem;
4955 code = swap_condition (code);
4958 trueop0 = avoid_constant_pool_reference (op0);
4959 trueop1 = avoid_constant_pool_reference (op1);
4961 /* For integer comparisons of A and B maybe we can simplify A - B and can
4962 then simplify a comparison of that with zero. If A and B are both either
4963 a register or a CONST_INT, this can't help; testing for these cases will
4964 prevent infinite recursion here and speed things up.
4966 We can only do this for EQ and NE comparisons as otherwise we may
4967 lose or introduce overflow which we cannot disregard as undefined as
4968 we do not know the signedness of the operation on either the left or
4969 the right hand side of the comparison. */
4971 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4972 && (code == EQ || code == NE)
4973 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4974 && (REG_P (op1) || CONST_INT_P (trueop1)))
4975 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4976 /* We cannot do this if tem is a nonzero address. */
4977 && ! nonzero_address_p (tem))
4978 return simplify_const_relational_operation (signed_condition (code),
4979 mode, tem, const0_rtx);
4981 if (! HONOR_NANS (mode) && code == ORDERED)
4982 return const_true_rtx;
4984 if (! HONOR_NANS (mode) && code == UNORDERED)
4985 return const0_rtx;
4987 /* For modes without NaNs, if the two operands are equal, we know the
4988 result except if they have side-effects. Even with NaNs we know
4989 the result of unordered comparisons and, if signaling NaNs are
4990 irrelevant, also the result of LT/GT/LTGT. */
4991 if ((! HONOR_NANS (GET_MODE (trueop0))
4992 || code == UNEQ || code == UNLE || code == UNGE
4993 || ((code == LT || code == GT || code == LTGT)
4994 && ! HONOR_SNANS (GET_MODE (trueop0))))
4995 && rtx_equal_p (trueop0, trueop1)
4996 && ! side_effects_p (trueop0))
4997 return comparison_result (code, CMP_EQ);
4999 /* If the operands are floating-point constants, see if we can fold
5000 the result. */
5001 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
5002 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
5003 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
5005 REAL_VALUE_TYPE d0, d1;
5007 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
5008 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
5010 /* Comparisons are unordered iff at least one of the values is NaN. */
5011 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
5012 switch (code)
5014 case UNEQ:
5015 case UNLT:
5016 case UNGT:
5017 case UNLE:
5018 case UNGE:
5019 case NE:
5020 case UNORDERED:
5021 return const_true_rtx;
5022 case EQ:
5023 case LT:
5024 case GT:
5025 case LE:
5026 case GE:
5027 case LTGT:
5028 case ORDERED:
5029 return const0_rtx;
5030 default:
5031 return 0;
5034 return comparison_result (code,
5035 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
5036 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
5039 /* Otherwise, see if the operands are both integers. */
5040 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
5041 && (CONST_DOUBLE_AS_INT_P (trueop0) || CONST_INT_P (trueop0))
5042 && (CONST_DOUBLE_AS_INT_P (trueop1) || CONST_INT_P (trueop1)))
5044 int width = GET_MODE_PRECISION (mode);
5045 HOST_WIDE_INT l0s, h0s, l1s, h1s;
5046 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
5048 /* Get the two words comprising each integer constant. */
5049 if (CONST_DOUBLE_AS_INT_P (trueop0))
5051 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
5052 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
5054 else
5056 l0u = l0s = INTVAL (trueop0);
5057 h0u = h0s = HWI_SIGN_EXTEND (l0s);
5060 if (CONST_DOUBLE_AS_INT_P (trueop1))
5062 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
5063 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
5065 else
5067 l1u = l1s = INTVAL (trueop1);
5068 h1u = h1s = HWI_SIGN_EXTEND (l1s);
5071 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
5072 we have to sign or zero-extend the values. */
5073 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
5075 l0u &= GET_MODE_MASK (mode);
5076 l1u &= GET_MODE_MASK (mode);
5078 if (val_signbit_known_set_p (mode, l0s))
5079 l0s |= ~GET_MODE_MASK (mode);
5081 if (val_signbit_known_set_p (mode, l1s))
5082 l1s |= ~GET_MODE_MASK (mode);
5084 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
5085 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
5087 if (h0u == h1u && l0u == l1u)
5088 return comparison_result (code, CMP_EQ);
5089 else
5091 int cr;
5092 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
5093 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
5094 return comparison_result (code, cr);
5098 /* Optimize comparisons with upper and lower bounds. */
5099 if (HWI_COMPUTABLE_MODE_P (mode)
5100 && CONST_INT_P (trueop1))
5102 int sign;
5103 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
5104 HOST_WIDE_INT val = INTVAL (trueop1);
5105 HOST_WIDE_INT mmin, mmax;
5107 if (code == GEU
5108 || code == LEU
5109 || code == GTU
5110 || code == LTU)
5111 sign = 0;
5112 else
5113 sign = 1;
5115 /* Get a reduced range if the sign bit is zero. */
5116 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
5118 mmin = 0;
5119 mmax = nonzero;
5121 else
5123 rtx mmin_rtx, mmax_rtx;
5124 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
5126 mmin = INTVAL (mmin_rtx);
5127 mmax = INTVAL (mmax_rtx);
5128 if (sign)
5130 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
5132 mmin >>= (sign_copies - 1);
5133 mmax >>= (sign_copies - 1);
5137 switch (code)
5139 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5140 case GEU:
5141 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5142 return const_true_rtx;
5143 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5144 return const0_rtx;
5145 break;
5146 case GE:
5147 if (val <= mmin)
5148 return const_true_rtx;
5149 if (val > mmax)
5150 return const0_rtx;
5151 break;
5153 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5154 case LEU:
5155 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5156 return const_true_rtx;
5157 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5158 return const0_rtx;
5159 break;
5160 case LE:
5161 if (val >= mmax)
5162 return const_true_rtx;
5163 if (val < mmin)
5164 return const0_rtx;
5165 break;
5167 case EQ:
5168 /* x == y is always false for y out of range. */
5169 if (val < mmin || val > mmax)
5170 return const0_rtx;
5171 break;
5173 /* x > y is always false for y >= mmax, always true for y < mmin. */
5174 case GTU:
5175 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5176 return const0_rtx;
5177 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5178 return const_true_rtx;
5179 break;
5180 case GT:
5181 if (val >= mmax)
5182 return const0_rtx;
5183 if (val < mmin)
5184 return const_true_rtx;
5185 break;
5187 /* x < y is always false for y <= mmin, always true for y > mmax. */
5188 case LTU:
5189 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5190 return const0_rtx;
5191 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5192 return const_true_rtx;
5193 break;
5194 case LT:
5195 if (val <= mmin)
5196 return const0_rtx;
5197 if (val > mmax)
5198 return const_true_rtx;
5199 break;
5201 case NE:
5202 /* x != y is always true for y out of range. */
5203 if (val < mmin || val > mmax)
5204 return const_true_rtx;
5205 break;
5207 default:
5208 break;
5212 /* Optimize integer comparisons with zero. */
5213 if (trueop1 == const0_rtx)
5215 /* Some addresses are known to be nonzero. We don't know
5216 their sign, but equality comparisons are known. */
5217 if (nonzero_address_p (trueop0))
5219 if (code == EQ || code == LEU)
5220 return const0_rtx;
5221 if (code == NE || code == GTU)
5222 return const_true_rtx;
5225 /* See if the first operand is an IOR with a constant. If so, we
5226 may be able to determine the result of this comparison. */
5227 if (GET_CODE (op0) == IOR)
5229 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5230 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5232 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
5233 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5234 && (UINTVAL (inner_const)
5235 & ((unsigned HOST_WIDE_INT) 1
5236 << sign_bitnum)));
5238 switch (code)
5240 case EQ:
5241 case LEU:
5242 return const0_rtx;
5243 case NE:
5244 case GTU:
5245 return const_true_rtx;
5246 case LT:
5247 case LE:
5248 if (has_sign)
5249 return const_true_rtx;
5250 break;
5251 case GT:
5252 case GE:
5253 if (has_sign)
5254 return const0_rtx;
5255 break;
5256 default:
5257 break;
5263 /* Optimize comparison of ABS with zero. */
5264 if (trueop1 == CONST0_RTX (mode)
5265 && (GET_CODE (trueop0) == ABS
5266 || (GET_CODE (trueop0) == FLOAT_EXTEND
5267 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5269 switch (code)
5271 case LT:
5272 /* Optimize abs(x) < 0.0. */
5273 if (!HONOR_SNANS (mode)
5274 && (!INTEGRAL_MODE_P (mode)
5275 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5277 if (INTEGRAL_MODE_P (mode)
5278 && (issue_strict_overflow_warning
5279 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5280 warning (OPT_Wstrict_overflow,
5281 ("assuming signed overflow does not occur when "
5282 "assuming abs (x) < 0 is false"));
5283 return const0_rtx;
5285 break;
5287 case GE:
5288 /* Optimize abs(x) >= 0.0. */
5289 if (!HONOR_NANS (mode)
5290 && (!INTEGRAL_MODE_P (mode)
5291 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5293 if (INTEGRAL_MODE_P (mode)
5294 && (issue_strict_overflow_warning
5295 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5296 warning (OPT_Wstrict_overflow,
5297 ("assuming signed overflow does not occur when "
5298 "assuming abs (x) >= 0 is true"));
5299 return const_true_rtx;
5301 break;
5303 case UNGE:
5304 /* Optimize ! (abs(x) < 0.0). */
5305 return const_true_rtx;
5307 default:
5308 break;
5312 return 0;
5315 /* Simplify CODE, an operation with result mode MODE and three operands,
5316 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5317 a constant. Return 0 if no simplifications is possible. */
5320 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
5321 enum machine_mode op0_mode, rtx op0, rtx op1,
5322 rtx op2)
5324 unsigned int width = GET_MODE_PRECISION (mode);
5325 bool any_change = false;
5326 rtx tem, trueop2;
5328 /* VOIDmode means "infinite" precision. */
5329 if (width == 0)
5330 width = HOST_BITS_PER_WIDE_INT;
5332 switch (code)
5334 case FMA:
5335 /* Simplify negations around the multiplication. */
5336 /* -a * -b + c => a * b + c. */
5337 if (GET_CODE (op0) == NEG)
5339 tem = simplify_unary_operation (NEG, mode, op1, mode);
5340 if (tem)
5341 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5343 else if (GET_CODE (op1) == NEG)
5345 tem = simplify_unary_operation (NEG, mode, op0, mode);
5346 if (tem)
5347 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5350 /* Canonicalize the two multiplication operands. */
5351 /* a * -b + c => -b * a + c. */
5352 if (swap_commutative_operands_p (op0, op1))
5353 tem = op0, op0 = op1, op1 = tem, any_change = true;
5355 if (any_change)
5356 return gen_rtx_FMA (mode, op0, op1, op2);
5357 return NULL_RTX;
5359 case SIGN_EXTRACT:
5360 case ZERO_EXTRACT:
5361 if (CONST_INT_P (op0)
5362 && CONST_INT_P (op1)
5363 && CONST_INT_P (op2)
5364 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5365 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5367 /* Extracting a bit-field from a constant */
5368 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5369 HOST_WIDE_INT op1val = INTVAL (op1);
5370 HOST_WIDE_INT op2val = INTVAL (op2);
5371 if (BITS_BIG_ENDIAN)
5372 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5373 else
5374 val >>= op2val;
5376 if (HOST_BITS_PER_WIDE_INT != op1val)
5378 /* First zero-extend. */
5379 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
5380 /* If desired, propagate sign bit. */
5381 if (code == SIGN_EXTRACT
5382 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
5383 != 0)
5384 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5387 return gen_int_mode (val, mode);
5389 break;
5391 case IF_THEN_ELSE:
5392 if (CONST_INT_P (op0))
5393 return op0 != const0_rtx ? op1 : op2;
5395 /* Convert c ? a : a into "a". */
5396 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5397 return op1;
5399 /* Convert a != b ? a : b into "a". */
5400 if (GET_CODE (op0) == NE
5401 && ! side_effects_p (op0)
5402 && ! HONOR_NANS (mode)
5403 && ! HONOR_SIGNED_ZEROS (mode)
5404 && ((rtx_equal_p (XEXP (op0, 0), op1)
5405 && rtx_equal_p (XEXP (op0, 1), op2))
5406 || (rtx_equal_p (XEXP (op0, 0), op2)
5407 && rtx_equal_p (XEXP (op0, 1), op1))))
5408 return op1;
5410 /* Convert a == b ? a : b into "b". */
5411 if (GET_CODE (op0) == EQ
5412 && ! side_effects_p (op0)
5413 && ! HONOR_NANS (mode)
5414 && ! HONOR_SIGNED_ZEROS (mode)
5415 && ((rtx_equal_p (XEXP (op0, 0), op1)
5416 && rtx_equal_p (XEXP (op0, 1), op2))
5417 || (rtx_equal_p (XEXP (op0, 0), op2)
5418 && rtx_equal_p (XEXP (op0, 1), op1))))
5419 return op2;
5421 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5423 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5424 ? GET_MODE (XEXP (op0, 1))
5425 : GET_MODE (XEXP (op0, 0)));
5426 rtx temp;
5428 /* Look for happy constants in op1 and op2. */
5429 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5431 HOST_WIDE_INT t = INTVAL (op1);
5432 HOST_WIDE_INT f = INTVAL (op2);
5434 if (t == STORE_FLAG_VALUE && f == 0)
5435 code = GET_CODE (op0);
5436 else if (t == 0 && f == STORE_FLAG_VALUE)
5438 enum rtx_code tmp;
5439 tmp = reversed_comparison_code (op0, NULL_RTX);
5440 if (tmp == UNKNOWN)
5441 break;
5442 code = tmp;
5444 else
5445 break;
5447 return simplify_gen_relational (code, mode, cmp_mode,
5448 XEXP (op0, 0), XEXP (op0, 1));
5451 if (cmp_mode == VOIDmode)
5452 cmp_mode = op0_mode;
5453 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5454 cmp_mode, XEXP (op0, 0),
5455 XEXP (op0, 1));
5457 /* See if any simplifications were possible. */
5458 if (temp)
5460 if (CONST_INT_P (temp))
5461 return temp == const0_rtx ? op2 : op1;
5462 else if (temp)
5463 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5466 break;
5468 case VEC_MERGE:
5469 gcc_assert (GET_MODE (op0) == mode);
5470 gcc_assert (GET_MODE (op1) == mode);
5471 gcc_assert (VECTOR_MODE_P (mode));
5472 trueop2 = avoid_constant_pool_reference (op2);
5473 if (CONST_INT_P (trueop2))
5475 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5476 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5477 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5478 unsigned HOST_WIDE_INT mask;
5479 if (n_elts == HOST_BITS_PER_WIDE_INT)
5480 mask = -1;
5481 else
5482 mask = ((unsigned HOST_WIDE_INT) 1 << n_elts) - 1;
5484 if (!(sel & mask) && !side_effects_p (op0))
5485 return op1;
5486 if ((sel & mask) == mask && !side_effects_p (op1))
5487 return op0;
5489 rtx trueop0 = avoid_constant_pool_reference (op0);
5490 rtx trueop1 = avoid_constant_pool_reference (op1);
5491 if (GET_CODE (trueop0) == CONST_VECTOR
5492 && GET_CODE (trueop1) == CONST_VECTOR)
5494 rtvec v = rtvec_alloc (n_elts);
5495 unsigned int i;
5497 for (i = 0; i < n_elts; i++)
5498 RTVEC_ELT (v, i) = ((sel & ((unsigned HOST_WIDE_INT) 1 << i))
5499 ? CONST_VECTOR_ELT (trueop0, i)
5500 : CONST_VECTOR_ELT (trueop1, i));
5501 return gen_rtx_CONST_VECTOR (mode, v);
5504 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5505 if no element from a appears in the result. */
5506 if (GET_CODE (op0) == VEC_MERGE)
5508 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5509 if (CONST_INT_P (tem))
5511 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5512 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5513 return simplify_gen_ternary (code, mode, mode,
5514 XEXP (op0, 1), op1, op2);
5515 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5516 return simplify_gen_ternary (code, mode, mode,
5517 XEXP (op0, 0), op1, op2);
5520 if (GET_CODE (op1) == VEC_MERGE)
5522 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5523 if (CONST_INT_P (tem))
5525 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5526 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5527 return simplify_gen_ternary (code, mode, mode,
5528 op0, XEXP (op1, 1), op2);
5529 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5530 return simplify_gen_ternary (code, mode, mode,
5531 op0, XEXP (op1, 0), op2);
5536 if (rtx_equal_p (op0, op1)
5537 && !side_effects_p (op2) && !side_effects_p (op1))
5538 return op0;
5540 break;
5542 default:
5543 gcc_unreachable ();
5546 return 0;
5549 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
5550 or CONST_VECTOR,
5551 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5553 Works by unpacking OP into a collection of 8-bit values
5554 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5555 and then repacking them again for OUTERMODE. */
5557 static rtx
5558 simplify_immed_subreg (enum machine_mode outermode, rtx op,
5559 enum machine_mode innermode, unsigned int byte)
5561 /* We support up to 512-bit values (for V8DFmode). */
5562 enum {
5563 max_bitsize = 512,
5564 value_bit = 8,
5565 value_mask = (1 << value_bit) - 1
5567 unsigned char value[max_bitsize / value_bit];
5568 int value_start;
5569 int i;
5570 int elem;
5572 int num_elem;
5573 rtx * elems;
5574 int elem_bitsize;
5575 rtx result_s;
5576 rtvec result_v = NULL;
5577 enum mode_class outer_class;
5578 enum machine_mode outer_submode;
5580 /* Some ports misuse CCmode. */
5581 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5582 return op;
5584 /* We have no way to represent a complex constant at the rtl level. */
5585 if (COMPLEX_MODE_P (outermode))
5586 return NULL_RTX;
5588 /* Unpack the value. */
5590 if (GET_CODE (op) == CONST_VECTOR)
5592 num_elem = CONST_VECTOR_NUNITS (op);
5593 elems = &CONST_VECTOR_ELT (op, 0);
5594 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5596 else
5598 num_elem = 1;
5599 elems = &op;
5600 elem_bitsize = max_bitsize;
5602 /* If this asserts, it is too complicated; reducing value_bit may help. */
5603 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5604 /* I don't know how to handle endianness of sub-units. */
5605 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5607 for (elem = 0; elem < num_elem; elem++)
5609 unsigned char * vp;
5610 rtx el = elems[elem];
5612 /* Vectors are kept in target memory order. (This is probably
5613 a mistake.) */
5615 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5616 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5617 / BITS_PER_UNIT);
5618 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5619 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5620 unsigned bytele = (subword_byte % UNITS_PER_WORD
5621 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5622 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5625 switch (GET_CODE (el))
5627 case CONST_INT:
5628 for (i = 0;
5629 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5630 i += value_bit)
5631 *vp++ = INTVAL (el) >> i;
5632 /* CONST_INTs are always logically sign-extended. */
5633 for (; i < elem_bitsize; i += value_bit)
5634 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5635 break;
5637 case CONST_DOUBLE:
5638 if (GET_MODE (el) == VOIDmode)
5640 unsigned char extend = 0;
5641 /* If this triggers, someone should have generated a
5642 CONST_INT instead. */
5643 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5645 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5646 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5647 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5649 *vp++
5650 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5651 i += value_bit;
5654 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5655 extend = -1;
5656 for (; i < elem_bitsize; i += value_bit)
5657 *vp++ = extend;
5659 else
5661 long tmp[max_bitsize / 32];
5662 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5664 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5665 gcc_assert (bitsize <= elem_bitsize);
5666 gcc_assert (bitsize % value_bit == 0);
5668 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5669 GET_MODE (el));
5671 /* real_to_target produces its result in words affected by
5672 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5673 and use WORDS_BIG_ENDIAN instead; see the documentation
5674 of SUBREG in rtl.texi. */
5675 for (i = 0; i < bitsize; i += value_bit)
5677 int ibase;
5678 if (WORDS_BIG_ENDIAN)
5679 ibase = bitsize - 1 - i;
5680 else
5681 ibase = i;
5682 *vp++ = tmp[ibase / 32] >> i % 32;
5685 /* It shouldn't matter what's done here, so fill it with
5686 zero. */
5687 for (; i < elem_bitsize; i += value_bit)
5688 *vp++ = 0;
5690 break;
5692 case CONST_FIXED:
5693 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5695 for (i = 0; i < elem_bitsize; i += value_bit)
5696 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5698 else
5700 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5701 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5702 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5703 i += value_bit)
5704 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5705 >> (i - HOST_BITS_PER_WIDE_INT);
5706 for (; i < elem_bitsize; i += value_bit)
5707 *vp++ = 0;
5709 break;
5711 default:
5712 gcc_unreachable ();
5716 /* Now, pick the right byte to start with. */
5717 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5718 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5719 will already have offset 0. */
5720 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5722 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5723 - byte);
5724 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5725 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5726 byte = (subword_byte % UNITS_PER_WORD
5727 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5730 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5731 so if it's become negative it will instead be very large.) */
5732 gcc_assert (byte < GET_MODE_SIZE (innermode));
5734 /* Convert from bytes to chunks of size value_bit. */
5735 value_start = byte * (BITS_PER_UNIT / value_bit);
5737 /* Re-pack the value. */
5739 if (VECTOR_MODE_P (outermode))
5741 num_elem = GET_MODE_NUNITS (outermode);
5742 result_v = rtvec_alloc (num_elem);
5743 elems = &RTVEC_ELT (result_v, 0);
5744 outer_submode = GET_MODE_INNER (outermode);
5746 else
5748 num_elem = 1;
5749 elems = &result_s;
5750 outer_submode = outermode;
5753 outer_class = GET_MODE_CLASS (outer_submode);
5754 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5756 gcc_assert (elem_bitsize % value_bit == 0);
5757 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5759 for (elem = 0; elem < num_elem; elem++)
5761 unsigned char *vp;
5763 /* Vectors are stored in target memory order. (This is probably
5764 a mistake.) */
5766 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5767 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5768 / BITS_PER_UNIT);
5769 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5770 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5771 unsigned bytele = (subword_byte % UNITS_PER_WORD
5772 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5773 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5776 switch (outer_class)
5778 case MODE_INT:
5779 case MODE_PARTIAL_INT:
5781 unsigned HOST_WIDE_INT hi = 0, lo = 0;
5783 for (i = 0;
5784 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5785 i += value_bit)
5786 lo |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5787 for (; i < elem_bitsize; i += value_bit)
5788 hi |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5789 << (i - HOST_BITS_PER_WIDE_INT);
5791 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5792 know why. */
5793 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5794 elems[elem] = gen_int_mode (lo, outer_submode);
5795 else if (elem_bitsize <= HOST_BITS_PER_DOUBLE_INT)
5796 elems[elem] = immed_double_const (lo, hi, outer_submode);
5797 else
5798 return NULL_RTX;
5800 break;
5802 case MODE_FLOAT:
5803 case MODE_DECIMAL_FLOAT:
5805 REAL_VALUE_TYPE r;
5806 long tmp[max_bitsize / 32];
5808 /* real_from_target wants its input in words affected by
5809 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5810 and use WORDS_BIG_ENDIAN instead; see the documentation
5811 of SUBREG in rtl.texi. */
5812 for (i = 0; i < max_bitsize / 32; i++)
5813 tmp[i] = 0;
5814 for (i = 0; i < elem_bitsize; i += value_bit)
5816 int ibase;
5817 if (WORDS_BIG_ENDIAN)
5818 ibase = elem_bitsize - 1 - i;
5819 else
5820 ibase = i;
5821 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5824 real_from_target (&r, tmp, outer_submode);
5825 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5827 break;
5829 case MODE_FRACT:
5830 case MODE_UFRACT:
5831 case MODE_ACCUM:
5832 case MODE_UACCUM:
5834 FIXED_VALUE_TYPE f;
5835 f.data.low = 0;
5836 f.data.high = 0;
5837 f.mode = outer_submode;
5839 for (i = 0;
5840 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5841 i += value_bit)
5842 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5843 for (; i < elem_bitsize; i += value_bit)
5844 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5845 << (i - HOST_BITS_PER_WIDE_INT));
5847 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5849 break;
5851 default:
5852 gcc_unreachable ();
5855 if (VECTOR_MODE_P (outermode))
5856 return gen_rtx_CONST_VECTOR (outermode, result_v);
5857 else
5858 return result_s;
5861 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5862 Return 0 if no simplifications are possible. */
5864 simplify_subreg (enum machine_mode outermode, rtx op,
5865 enum machine_mode innermode, unsigned int byte)
5867 /* Little bit of sanity checking. */
5868 gcc_assert (innermode != VOIDmode);
5869 gcc_assert (outermode != VOIDmode);
5870 gcc_assert (innermode != BLKmode);
5871 gcc_assert (outermode != BLKmode);
5873 gcc_assert (GET_MODE (op) == innermode
5874 || GET_MODE (op) == VOIDmode);
5876 if ((byte % GET_MODE_SIZE (outermode)) != 0)
5877 return NULL_RTX;
5879 if (byte >= GET_MODE_SIZE (innermode))
5880 return NULL_RTX;
5882 if (outermode == innermode && !byte)
5883 return op;
5885 if (CONST_SCALAR_INT_P (op)
5886 || CONST_DOUBLE_AS_FLOAT_P (op)
5887 || GET_CODE (op) == CONST_FIXED
5888 || GET_CODE (op) == CONST_VECTOR)
5889 return simplify_immed_subreg (outermode, op, innermode, byte);
5891 /* Changing mode twice with SUBREG => just change it once,
5892 or not at all if changing back op starting mode. */
5893 if (GET_CODE (op) == SUBREG)
5895 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5896 int final_offset = byte + SUBREG_BYTE (op);
5897 rtx newx;
5899 if (outermode == innermostmode
5900 && byte == 0 && SUBREG_BYTE (op) == 0)
5901 return SUBREG_REG (op);
5903 /* The SUBREG_BYTE represents offset, as if the value were stored
5904 in memory. Irritating exception is paradoxical subreg, where
5905 we define SUBREG_BYTE to be 0. On big endian machines, this
5906 value should be negative. For a moment, undo this exception. */
5907 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5909 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5910 if (WORDS_BIG_ENDIAN)
5911 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5912 if (BYTES_BIG_ENDIAN)
5913 final_offset += difference % UNITS_PER_WORD;
5915 if (SUBREG_BYTE (op) == 0
5916 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5918 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5919 if (WORDS_BIG_ENDIAN)
5920 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5921 if (BYTES_BIG_ENDIAN)
5922 final_offset += difference % UNITS_PER_WORD;
5925 /* See whether resulting subreg will be paradoxical. */
5926 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5928 /* In nonparadoxical subregs we can't handle negative offsets. */
5929 if (final_offset < 0)
5930 return NULL_RTX;
5931 /* Bail out in case resulting subreg would be incorrect. */
5932 if (final_offset % GET_MODE_SIZE (outermode)
5933 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5934 return NULL_RTX;
5936 else
5938 int offset = 0;
5939 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5941 /* In paradoxical subreg, see if we are still looking on lower part.
5942 If so, our SUBREG_BYTE will be 0. */
5943 if (WORDS_BIG_ENDIAN)
5944 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5945 if (BYTES_BIG_ENDIAN)
5946 offset += difference % UNITS_PER_WORD;
5947 if (offset == final_offset)
5948 final_offset = 0;
5949 else
5950 return NULL_RTX;
5953 /* Recurse for further possible simplifications. */
5954 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5955 final_offset);
5956 if (newx)
5957 return newx;
5958 if (validate_subreg (outermode, innermostmode,
5959 SUBREG_REG (op), final_offset))
5961 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5962 if (SUBREG_PROMOTED_VAR_P (op)
5963 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5964 && GET_MODE_CLASS (outermode) == MODE_INT
5965 && IN_RANGE (GET_MODE_SIZE (outermode),
5966 GET_MODE_SIZE (innermode),
5967 GET_MODE_SIZE (innermostmode))
5968 && subreg_lowpart_p (newx))
5970 SUBREG_PROMOTED_VAR_P (newx) = 1;
5971 SUBREG_PROMOTED_UNSIGNED_SET
5972 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5974 return newx;
5976 return NULL_RTX;
5979 /* SUBREG of a hard register => just change the register number
5980 and/or mode. If the hard register is not valid in that mode,
5981 suppress this simplification. If the hard register is the stack,
5982 frame, or argument pointer, leave this as a SUBREG. */
5984 if (REG_P (op) && HARD_REGISTER_P (op))
5986 unsigned int regno, final_regno;
5988 regno = REGNO (op);
5989 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5990 if (HARD_REGISTER_NUM_P (final_regno))
5992 rtx x;
5993 int final_offset = byte;
5995 /* Adjust offset for paradoxical subregs. */
5996 if (byte == 0
5997 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5999 int difference = (GET_MODE_SIZE (innermode)
6000 - GET_MODE_SIZE (outermode));
6001 if (WORDS_BIG_ENDIAN)
6002 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6003 if (BYTES_BIG_ENDIAN)
6004 final_offset += difference % UNITS_PER_WORD;
6007 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
6009 /* Propagate original regno. We don't have any way to specify
6010 the offset inside original regno, so do so only for lowpart.
6011 The information is used only by alias analysis that can not
6012 grog partial register anyway. */
6014 if (subreg_lowpart_offset (outermode, innermode) == byte)
6015 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
6016 return x;
6020 /* If we have a SUBREG of a register that we are replacing and we are
6021 replacing it with a MEM, make a new MEM and try replacing the
6022 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6023 or if we would be widening it. */
6025 if (MEM_P (op)
6026 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
6027 /* Allow splitting of volatile memory references in case we don't
6028 have instruction to move the whole thing. */
6029 && (! MEM_VOLATILE_P (op)
6030 || ! have_insn_for (SET, innermode))
6031 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
6032 return adjust_address_nv (op, outermode, byte);
6034 /* Handle complex values represented as CONCAT
6035 of real and imaginary part. */
6036 if (GET_CODE (op) == CONCAT)
6038 unsigned int part_size, final_offset;
6039 rtx part, res;
6041 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
6042 if (byte < part_size)
6044 part = XEXP (op, 0);
6045 final_offset = byte;
6047 else
6049 part = XEXP (op, 1);
6050 final_offset = byte - part_size;
6053 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
6054 return NULL_RTX;
6056 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
6057 if (res)
6058 return res;
6059 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
6060 return gen_rtx_SUBREG (outermode, part, final_offset);
6061 return NULL_RTX;
6064 /* A SUBREG resulting from a zero extension may fold to zero if
6065 it extracts higher bits that the ZERO_EXTEND's source bits. */
6066 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
6068 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
6069 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
6070 return CONST0_RTX (outermode);
6073 if (SCALAR_INT_MODE_P (outermode)
6074 && SCALAR_INT_MODE_P (innermode)
6075 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
6076 && byte == subreg_lowpart_offset (outermode, innermode))
6078 rtx tem = simplify_truncation (outermode, op, innermode);
6079 if (tem)
6080 return tem;
6083 return NULL_RTX;
6086 /* Make a SUBREG operation or equivalent if it folds. */
6089 simplify_gen_subreg (enum machine_mode outermode, rtx op,
6090 enum machine_mode innermode, unsigned int byte)
6092 rtx newx;
6094 newx = simplify_subreg (outermode, op, innermode, byte);
6095 if (newx)
6096 return newx;
6098 if (GET_CODE (op) == SUBREG
6099 || GET_CODE (op) == CONCAT
6100 || GET_MODE (op) == VOIDmode)
6101 return NULL_RTX;
6103 if (validate_subreg (outermode, innermode, op, byte))
6104 return gen_rtx_SUBREG (outermode, op, byte);
6106 return NULL_RTX;
6109 /* Simplify X, an rtx expression.
6111 Return the simplified expression or NULL if no simplifications
6112 were possible.
6114 This is the preferred entry point into the simplification routines;
6115 however, we still allow passes to call the more specific routines.
6117 Right now GCC has three (yes, three) major bodies of RTL simplification
6118 code that need to be unified.
6120 1. fold_rtx in cse.c. This code uses various CSE specific
6121 information to aid in RTL simplification.
6123 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6124 it uses combine specific information to aid in RTL
6125 simplification.
6127 3. The routines in this file.
6130 Long term we want to only have one body of simplification code; to
6131 get to that state I recommend the following steps:
6133 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6134 which are not pass dependent state into these routines.
6136 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6137 use this routine whenever possible.
6139 3. Allow for pass dependent state to be provided to these
6140 routines and add simplifications based on the pass dependent
6141 state. Remove code from cse.c & combine.c that becomes
6142 redundant/dead.
6144 It will take time, but ultimately the compiler will be easier to
6145 maintain and improve. It's totally silly that when we add a
6146 simplification that it needs to be added to 4 places (3 for RTL
6147 simplification and 1 for tree simplification. */
6150 simplify_rtx (const_rtx x)
6152 const enum rtx_code code = GET_CODE (x);
6153 const enum machine_mode mode = GET_MODE (x);
6155 switch (GET_RTX_CLASS (code))
6157 case RTX_UNARY:
6158 return simplify_unary_operation (code, mode,
6159 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6160 case RTX_COMM_ARITH:
6161 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6162 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6164 /* Fall through.... */
6166 case RTX_BIN_ARITH:
6167 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6169 case RTX_TERNARY:
6170 case RTX_BITFIELD_OPS:
6171 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6172 XEXP (x, 0), XEXP (x, 1),
6173 XEXP (x, 2));
6175 case RTX_COMPARE:
6176 case RTX_COMM_COMPARE:
6177 return simplify_relational_operation (code, mode,
6178 ((GET_MODE (XEXP (x, 0))
6179 != VOIDmode)
6180 ? GET_MODE (XEXP (x, 0))
6181 : GET_MODE (XEXP (x, 1))),
6182 XEXP (x, 0),
6183 XEXP (x, 1));
6185 case RTX_EXTRA:
6186 if (code == SUBREG)
6187 return simplify_subreg (mode, SUBREG_REG (x),
6188 GET_MODE (SUBREG_REG (x)),
6189 SUBREG_BYTE (x));
6190 break;
6192 case RTX_OBJ:
6193 if (code == LO_SUM)
6195 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6196 if (GET_CODE (XEXP (x, 0)) == HIGH
6197 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6198 return XEXP (x, 1);
6200 break;
6202 default:
6203 break;
6205 return NULL;