2013-05-30 Ed Smith-Rowland <3dw4rd@verizon.net>
[official-gcc.git] / gcc / simplify-rtx.c
blob9bb31e76ed9812632f828037b5ca69c8a88b9339
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2013 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "tm_p.h"
28 #include "regs.h"
29 #include "hard-reg-set.h"
30 #include "flags.h"
31 #include "insn-config.h"
32 #include "recog.h"
33 #include "function.h"
34 #include "expr.h"
35 #include "diagnostic-core.h"
36 #include "ggc.h"
37 #include "target.h"
39 /* Simplification and canonicalization of RTL. */
41 /* Much code operates on (low, high) pairs; the low value is an
42 unsigned wide int, the high value a signed wide int. We
43 occasionally need to sign extend from low to high as if low were a
44 signed wide int. */
45 #define HWI_SIGN_EXTEND(low) \
46 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
48 static rtx neg_const_int (enum machine_mode, const_rtx);
49 static bool plus_minus_operand_p (const_rtx);
50 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
51 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
52 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
53 unsigned int);
54 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
55 rtx, rtx);
56 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
57 enum machine_mode, rtx, rtx);
58 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
59 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
60 rtx, rtx, rtx, rtx);
62 /* Negate a CONST_INT rtx, truncating (because a conversion from a
63 maximally negative number can overflow). */
64 static rtx
65 neg_const_int (enum machine_mode mode, const_rtx i)
67 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
70 /* Test whether expression, X, is an immediate constant that represents
71 the most significant bit of machine mode MODE. */
73 bool
74 mode_signbit_p (enum machine_mode mode, const_rtx x)
76 unsigned HOST_WIDE_INT val;
77 unsigned int width;
79 if (GET_MODE_CLASS (mode) != MODE_INT)
80 return false;
82 width = GET_MODE_PRECISION (mode);
83 if (width == 0)
84 return false;
86 if (width <= HOST_BITS_PER_WIDE_INT
87 && CONST_INT_P (x))
88 val = INTVAL (x);
89 else if (width <= HOST_BITS_PER_DOUBLE_INT
90 && CONST_DOUBLE_AS_INT_P (x)
91 && CONST_DOUBLE_LOW (x) == 0)
93 val = CONST_DOUBLE_HIGH (x);
94 width -= HOST_BITS_PER_WIDE_INT;
96 else
97 /* FIXME: We don't yet have a representation for wider modes. */
98 return false;
100 if (width < HOST_BITS_PER_WIDE_INT)
101 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
102 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
105 /* Test whether VAL is equal to the most significant bit of mode MODE
106 (after masking with the mode mask of MODE). Returns false if the
107 precision of MODE is too large to handle. */
109 bool
110 val_signbit_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
112 unsigned int width;
114 if (GET_MODE_CLASS (mode) != MODE_INT)
115 return false;
117 width = GET_MODE_PRECISION (mode);
118 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
119 return false;
121 val &= GET_MODE_MASK (mode);
122 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
125 /* Test whether the most significant bit of mode MODE is set in VAL.
126 Returns false if the precision of MODE is too large to handle. */
127 bool
128 val_signbit_known_set_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
130 unsigned int width;
132 if (GET_MODE_CLASS (mode) != MODE_INT)
133 return false;
135 width = GET_MODE_PRECISION (mode);
136 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
137 return false;
139 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
140 return val != 0;
143 /* Test whether the most significant bit of mode MODE is clear in VAL.
144 Returns false if the precision of MODE is too large to handle. */
145 bool
146 val_signbit_known_clear_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
148 unsigned int width;
150 if (GET_MODE_CLASS (mode) != MODE_INT)
151 return false;
153 width = GET_MODE_PRECISION (mode);
154 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
155 return false;
157 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
158 return val == 0;
161 /* Make a binary operation by properly ordering the operands and
162 seeing if the expression folds. */
165 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
166 rtx op1)
168 rtx tem;
170 /* If this simplifies, do it. */
171 tem = simplify_binary_operation (code, mode, op0, op1);
172 if (tem)
173 return tem;
175 /* Put complex operands first and constants second if commutative. */
176 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
177 && swap_commutative_operands_p (op0, op1))
178 tem = op0, op0 = op1, op1 = tem;
180 return gen_rtx_fmt_ee (code, mode, op0, op1);
183 /* If X is a MEM referencing the constant pool, return the real value.
184 Otherwise return X. */
186 avoid_constant_pool_reference (rtx x)
188 rtx c, tmp, addr;
189 enum machine_mode cmode;
190 HOST_WIDE_INT offset = 0;
192 switch (GET_CODE (x))
194 case MEM:
195 break;
197 case FLOAT_EXTEND:
198 /* Handle float extensions of constant pool references. */
199 tmp = XEXP (x, 0);
200 c = avoid_constant_pool_reference (tmp);
201 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
203 REAL_VALUE_TYPE d;
205 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
206 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
208 return x;
210 default:
211 return x;
214 if (GET_MODE (x) == BLKmode)
215 return x;
217 addr = XEXP (x, 0);
219 /* Call target hook to avoid the effects of -fpic etc.... */
220 addr = targetm.delegitimize_address (addr);
222 /* Split the address into a base and integer offset. */
223 if (GET_CODE (addr) == CONST
224 && GET_CODE (XEXP (addr, 0)) == PLUS
225 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
227 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
228 addr = XEXP (XEXP (addr, 0), 0);
231 if (GET_CODE (addr) == LO_SUM)
232 addr = XEXP (addr, 1);
234 /* If this is a constant pool reference, we can turn it into its
235 constant and hope that simplifications happen. */
236 if (GET_CODE (addr) == SYMBOL_REF
237 && CONSTANT_POOL_ADDRESS_P (addr))
239 c = get_pool_constant (addr);
240 cmode = get_pool_mode (addr);
242 /* If we're accessing the constant in a different mode than it was
243 originally stored, attempt to fix that up via subreg simplifications.
244 If that fails we have no choice but to return the original memory. */
245 if ((offset != 0 || cmode != GET_MODE (x))
246 && offset >= 0 && offset < GET_MODE_SIZE (cmode))
248 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
249 if (tem && CONSTANT_P (tem))
250 return tem;
252 else
253 return c;
256 return x;
259 /* Simplify a MEM based on its attributes. This is the default
260 delegitimize_address target hook, and it's recommended that every
261 overrider call it. */
264 delegitimize_mem_from_attrs (rtx x)
266 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
267 use their base addresses as equivalent. */
268 if (MEM_P (x)
269 && MEM_EXPR (x)
270 && MEM_OFFSET_KNOWN_P (x))
272 tree decl = MEM_EXPR (x);
273 enum machine_mode mode = GET_MODE (x);
274 HOST_WIDE_INT offset = 0;
276 switch (TREE_CODE (decl))
278 default:
279 decl = NULL;
280 break;
282 case VAR_DECL:
283 break;
285 case ARRAY_REF:
286 case ARRAY_RANGE_REF:
287 case COMPONENT_REF:
288 case BIT_FIELD_REF:
289 case REALPART_EXPR:
290 case IMAGPART_EXPR:
291 case VIEW_CONVERT_EXPR:
293 HOST_WIDE_INT bitsize, bitpos;
294 tree toffset;
295 int unsignedp, volatilep = 0;
297 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
298 &mode, &unsignedp, &volatilep, false);
299 if (bitsize != GET_MODE_BITSIZE (mode)
300 || (bitpos % BITS_PER_UNIT)
301 || (toffset && !host_integerp (toffset, 0)))
302 decl = NULL;
303 else
305 offset += bitpos / BITS_PER_UNIT;
306 if (toffset)
307 offset += TREE_INT_CST_LOW (toffset);
309 break;
313 if (decl
314 && mode == GET_MODE (x)
315 && TREE_CODE (decl) == VAR_DECL
316 && (TREE_STATIC (decl)
317 || DECL_THREAD_LOCAL_P (decl))
318 && DECL_RTL_SET_P (decl)
319 && MEM_P (DECL_RTL (decl)))
321 rtx newx;
323 offset += MEM_OFFSET (x);
325 newx = DECL_RTL (decl);
327 if (MEM_P (newx))
329 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
331 /* Avoid creating a new MEM needlessly if we already had
332 the same address. We do if there's no OFFSET and the
333 old address X is identical to NEWX, or if X is of the
334 form (plus NEWX OFFSET), or the NEWX is of the form
335 (plus Y (const_int Z)) and X is that with the offset
336 added: (plus Y (const_int Z+OFFSET)). */
337 if (!((offset == 0
338 || (GET_CODE (o) == PLUS
339 && GET_CODE (XEXP (o, 1)) == CONST_INT
340 && (offset == INTVAL (XEXP (o, 1))
341 || (GET_CODE (n) == PLUS
342 && GET_CODE (XEXP (n, 1)) == CONST_INT
343 && (INTVAL (XEXP (n, 1)) + offset
344 == INTVAL (XEXP (o, 1)))
345 && (n = XEXP (n, 0))))
346 && (o = XEXP (o, 0))))
347 && rtx_equal_p (o, n)))
348 x = adjust_address_nv (newx, mode, offset);
350 else if (GET_MODE (x) == GET_MODE (newx)
351 && offset == 0)
352 x = newx;
356 return x;
359 /* Make a unary operation by first seeing if it folds and otherwise making
360 the specified operation. */
363 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
364 enum machine_mode op_mode)
366 rtx tem;
368 /* If this simplifies, use it. */
369 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
370 return tem;
372 return gen_rtx_fmt_e (code, mode, op);
375 /* Likewise for ternary operations. */
378 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
379 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
381 rtx tem;
383 /* If this simplifies, use it. */
384 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
385 op0, op1, op2)))
386 return tem;
388 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
391 /* Likewise, for relational operations.
392 CMP_MODE specifies mode comparison is done in. */
395 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
396 enum machine_mode cmp_mode, rtx op0, rtx op1)
398 rtx tem;
400 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
401 op0, op1)))
402 return tem;
404 return gen_rtx_fmt_ee (code, mode, op0, op1);
407 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
408 and simplify the result. If FN is non-NULL, call this callback on each
409 X, if it returns non-NULL, replace X with its return value and simplify the
410 result. */
413 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
414 rtx (*fn) (rtx, const_rtx, void *), void *data)
416 enum rtx_code code = GET_CODE (x);
417 enum machine_mode mode = GET_MODE (x);
418 enum machine_mode op_mode;
419 const char *fmt;
420 rtx op0, op1, op2, newx, op;
421 rtvec vec, newvec;
422 int i, j;
424 if (__builtin_expect (fn != NULL, 0))
426 newx = fn (x, old_rtx, data);
427 if (newx)
428 return newx;
430 else if (rtx_equal_p (x, old_rtx))
431 return copy_rtx ((rtx) data);
433 switch (GET_RTX_CLASS (code))
435 case RTX_UNARY:
436 op0 = XEXP (x, 0);
437 op_mode = GET_MODE (op0);
438 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
439 if (op0 == XEXP (x, 0))
440 return x;
441 return simplify_gen_unary (code, mode, op0, op_mode);
443 case RTX_BIN_ARITH:
444 case RTX_COMM_ARITH:
445 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
446 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
447 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
448 return x;
449 return simplify_gen_binary (code, mode, op0, op1);
451 case RTX_COMPARE:
452 case RTX_COMM_COMPARE:
453 op0 = XEXP (x, 0);
454 op1 = XEXP (x, 1);
455 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
456 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
457 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
458 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
459 return x;
460 return simplify_gen_relational (code, mode, op_mode, op0, op1);
462 case RTX_TERNARY:
463 case RTX_BITFIELD_OPS:
464 op0 = XEXP (x, 0);
465 op_mode = GET_MODE (op0);
466 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
467 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
468 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
469 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
470 return x;
471 if (op_mode == VOIDmode)
472 op_mode = GET_MODE (op0);
473 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
475 case RTX_EXTRA:
476 if (code == SUBREG)
478 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
479 if (op0 == SUBREG_REG (x))
480 return x;
481 op0 = simplify_gen_subreg (GET_MODE (x), op0,
482 GET_MODE (SUBREG_REG (x)),
483 SUBREG_BYTE (x));
484 return op0 ? op0 : x;
486 break;
488 case RTX_OBJ:
489 if (code == MEM)
491 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
492 if (op0 == XEXP (x, 0))
493 return x;
494 return replace_equiv_address_nv (x, op0);
496 else if (code == LO_SUM)
498 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
499 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
501 /* (lo_sum (high x) x) -> x */
502 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
503 return op1;
505 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
506 return x;
507 return gen_rtx_LO_SUM (mode, op0, op1);
509 break;
511 default:
512 break;
515 newx = x;
516 fmt = GET_RTX_FORMAT (code);
517 for (i = 0; fmt[i]; i++)
518 switch (fmt[i])
520 case 'E':
521 vec = XVEC (x, i);
522 newvec = XVEC (newx, i);
523 for (j = 0; j < GET_NUM_ELEM (vec); j++)
525 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
526 old_rtx, fn, data);
527 if (op != RTVEC_ELT (vec, j))
529 if (newvec == vec)
531 newvec = shallow_copy_rtvec (vec);
532 if (x == newx)
533 newx = shallow_copy_rtx (x);
534 XVEC (newx, i) = newvec;
536 RTVEC_ELT (newvec, j) = op;
539 break;
541 case 'e':
542 if (XEXP (x, i))
544 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
545 if (op != XEXP (x, i))
547 if (x == newx)
548 newx = shallow_copy_rtx (x);
549 XEXP (newx, i) = op;
552 break;
554 return newx;
557 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
558 resulting RTX. Return a new RTX which is as simplified as possible. */
561 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
563 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
566 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
567 Only handle cases where the truncated value is inherently an rvalue.
569 RTL provides two ways of truncating a value:
571 1. a lowpart subreg. This form is only a truncation when both
572 the outer and inner modes (here MODE and OP_MODE respectively)
573 are scalar integers, and only then when the subreg is used as
574 an rvalue.
576 It is only valid to form such truncating subregs if the
577 truncation requires no action by the target. The onus for
578 proving this is on the creator of the subreg -- e.g. the
579 caller to simplify_subreg or simplify_gen_subreg -- and typically
580 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
582 2. a TRUNCATE. This form handles both scalar and compound integers.
584 The first form is preferred where valid. However, the TRUNCATE
585 handling in simplify_unary_operation turns the second form into the
586 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
587 so it is generally safe to form rvalue truncations using:
589 simplify_gen_unary (TRUNCATE, ...)
591 and leave simplify_unary_operation to work out which representation
592 should be used.
594 Because of the proof requirements on (1), simplify_truncation must
595 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
596 regardless of whether the outer truncation came from a SUBREG or a
597 TRUNCATE. For example, if the caller has proven that an SImode
598 truncation of:
600 (and:DI X Y)
602 is a no-op and can be represented as a subreg, it does not follow
603 that SImode truncations of X and Y are also no-ops. On a target
604 like 64-bit MIPS that requires SImode values to be stored in
605 sign-extended form, an SImode truncation of:
607 (and:DI (reg:DI X) (const_int 63))
609 is trivially a no-op because only the lower 6 bits can be set.
610 However, X is still an arbitrary 64-bit number and so we cannot
611 assume that truncating it too is a no-op. */
613 static rtx
614 simplify_truncation (enum machine_mode mode, rtx op,
615 enum machine_mode op_mode)
617 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
618 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
619 gcc_assert (precision <= op_precision);
621 /* Optimize truncations of zero and sign extended values. */
622 if (GET_CODE (op) == ZERO_EXTEND
623 || GET_CODE (op) == SIGN_EXTEND)
625 /* There are three possibilities. If MODE is the same as the
626 origmode, we can omit both the extension and the subreg.
627 If MODE is not larger than the origmode, we can apply the
628 truncation without the extension. Finally, if the outermode
629 is larger than the origmode, we can just extend to the appropriate
630 mode. */
631 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
632 if (mode == origmode)
633 return XEXP (op, 0);
634 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
635 return simplify_gen_unary (TRUNCATE, mode,
636 XEXP (op, 0), origmode);
637 else
638 return simplify_gen_unary (GET_CODE (op), mode,
639 XEXP (op, 0), origmode);
642 /* Simplify (truncate:SI (op:DI (x:DI) (y:DI)))
643 to (op:SI (truncate:SI (x:DI)) (truncate:SI (x:DI))). */
644 if (GET_CODE (op) == PLUS
645 || GET_CODE (op) == MINUS
646 || GET_CODE (op) == MULT)
648 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
649 if (op0)
651 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
652 if (op1)
653 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
657 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
658 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
659 the outer subreg is effectively a truncation to the original mode. */
660 if ((GET_CODE (op) == LSHIFTRT
661 || GET_CODE (op) == ASHIFTRT)
662 /* Ensure that OP_MODE is at least twice as wide as MODE
663 to avoid the possibility that an outer LSHIFTRT shifts by more
664 than the sign extension's sign_bit_copies and introduces zeros
665 into the high bits of the result. */
666 && 2 * precision <= op_precision
667 && CONST_INT_P (XEXP (op, 1))
668 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
669 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
670 && UINTVAL (XEXP (op, 1)) < precision)
671 return simplify_gen_binary (ASHIFTRT, mode,
672 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
674 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
675 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
676 the outer subreg is effectively a truncation to the original mode. */
677 if ((GET_CODE (op) == LSHIFTRT
678 || GET_CODE (op) == ASHIFTRT)
679 && CONST_INT_P (XEXP (op, 1))
680 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
681 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
682 && UINTVAL (XEXP (op, 1)) < precision)
683 return simplify_gen_binary (LSHIFTRT, mode,
684 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
686 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
687 to (ashift:QI (x:QI) C), where C is a suitable small constant and
688 the outer subreg is effectively a truncation to the original mode. */
689 if (GET_CODE (op) == ASHIFT
690 && CONST_INT_P (XEXP (op, 1))
691 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
692 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
693 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
694 && UINTVAL (XEXP (op, 1)) < precision)
695 return simplify_gen_binary (ASHIFT, mode,
696 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
698 /* Recognize a word extraction from a multi-word subreg. */
699 if ((GET_CODE (op) == LSHIFTRT
700 || GET_CODE (op) == ASHIFTRT)
701 && SCALAR_INT_MODE_P (mode)
702 && SCALAR_INT_MODE_P (op_mode)
703 && precision >= BITS_PER_WORD
704 && 2 * precision <= op_precision
705 && CONST_INT_P (XEXP (op, 1))
706 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
707 && UINTVAL (XEXP (op, 1)) < op_precision)
709 int byte = subreg_lowpart_offset (mode, op_mode);
710 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
711 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
712 (WORDS_BIG_ENDIAN
713 ? byte - shifted_bytes
714 : byte + shifted_bytes));
717 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
718 and try replacing the TRUNCATE and shift with it. Don't do this
719 if the MEM has a mode-dependent address. */
720 if ((GET_CODE (op) == LSHIFTRT
721 || GET_CODE (op) == ASHIFTRT)
722 && SCALAR_INT_MODE_P (op_mode)
723 && MEM_P (XEXP (op, 0))
724 && CONST_INT_P (XEXP (op, 1))
725 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
726 && INTVAL (XEXP (op, 1)) > 0
727 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
728 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
729 MEM_ADDR_SPACE (XEXP (op, 0)))
730 && ! MEM_VOLATILE_P (XEXP (op, 0))
731 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
732 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
734 int byte = subreg_lowpart_offset (mode, op_mode);
735 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
736 return adjust_address_nv (XEXP (op, 0), mode,
737 (WORDS_BIG_ENDIAN
738 ? byte - shifted_bytes
739 : byte + shifted_bytes));
742 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
743 (OP:SI foo:SI) if OP is NEG or ABS. */
744 if ((GET_CODE (op) == ABS
745 || GET_CODE (op) == NEG)
746 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
747 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
748 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
749 return simplify_gen_unary (GET_CODE (op), mode,
750 XEXP (XEXP (op, 0), 0), mode);
752 /* (truncate:A (subreg:B (truncate:C X) 0)) is
753 (truncate:A X). */
754 if (GET_CODE (op) == SUBREG
755 && SCALAR_INT_MODE_P (mode)
756 && SCALAR_INT_MODE_P (op_mode)
757 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
758 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
759 && subreg_lowpart_p (op))
761 rtx inner = XEXP (SUBREG_REG (op), 0);
762 if (GET_MODE_PRECISION (mode)
763 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
764 return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
765 else
766 /* If subreg above is paradoxical and C is narrower
767 than A, return (subreg:A (truncate:C X) 0). */
768 return simplify_gen_subreg (mode, SUBREG_REG (op),
769 GET_MODE (SUBREG_REG (op)), 0);
772 /* (truncate:A (truncate:B X)) is (truncate:A X). */
773 if (GET_CODE (op) == TRUNCATE)
774 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
775 GET_MODE (XEXP (op, 0)));
777 return NULL_RTX;
780 /* Try to simplify a unary operation CODE whose output mode is to be
781 MODE with input operand OP whose mode was originally OP_MODE.
782 Return zero if no simplification can be made. */
784 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
785 rtx op, enum machine_mode op_mode)
787 rtx trueop, tem;
789 trueop = avoid_constant_pool_reference (op);
791 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
792 if (tem)
793 return tem;
795 return simplify_unary_operation_1 (code, mode, op);
798 /* Perform some simplifications we can do even if the operands
799 aren't constant. */
800 static rtx
801 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
803 enum rtx_code reversed;
804 rtx temp;
806 switch (code)
808 case NOT:
809 /* (not (not X)) == X. */
810 if (GET_CODE (op) == NOT)
811 return XEXP (op, 0);
813 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
814 comparison is all ones. */
815 if (COMPARISON_P (op)
816 && (mode == BImode || STORE_FLAG_VALUE == -1)
817 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
818 return simplify_gen_relational (reversed, mode, VOIDmode,
819 XEXP (op, 0), XEXP (op, 1));
821 /* (not (plus X -1)) can become (neg X). */
822 if (GET_CODE (op) == PLUS
823 && XEXP (op, 1) == constm1_rtx)
824 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
826 /* Similarly, (not (neg X)) is (plus X -1). */
827 if (GET_CODE (op) == NEG)
828 return plus_constant (mode, XEXP (op, 0), -1);
830 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
831 if (GET_CODE (op) == XOR
832 && CONST_INT_P (XEXP (op, 1))
833 && (temp = simplify_unary_operation (NOT, mode,
834 XEXP (op, 1), mode)) != 0)
835 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
837 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
838 if (GET_CODE (op) == PLUS
839 && CONST_INT_P (XEXP (op, 1))
840 && mode_signbit_p (mode, XEXP (op, 1))
841 && (temp = simplify_unary_operation (NOT, mode,
842 XEXP (op, 1), mode)) != 0)
843 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
846 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
847 operands other than 1, but that is not valid. We could do a
848 similar simplification for (not (lshiftrt C X)) where C is
849 just the sign bit, but this doesn't seem common enough to
850 bother with. */
851 if (GET_CODE (op) == ASHIFT
852 && XEXP (op, 0) == const1_rtx)
854 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
855 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
858 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
859 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
860 so we can perform the above simplification. */
861 if (STORE_FLAG_VALUE == -1
862 && GET_CODE (op) == ASHIFTRT
863 && GET_CODE (XEXP (op, 1))
864 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
865 return simplify_gen_relational (GE, mode, VOIDmode,
866 XEXP (op, 0), const0_rtx);
869 if (GET_CODE (op) == SUBREG
870 && subreg_lowpart_p (op)
871 && (GET_MODE_SIZE (GET_MODE (op))
872 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
873 && GET_CODE (SUBREG_REG (op)) == ASHIFT
874 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
876 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
877 rtx x;
879 x = gen_rtx_ROTATE (inner_mode,
880 simplify_gen_unary (NOT, inner_mode, const1_rtx,
881 inner_mode),
882 XEXP (SUBREG_REG (op), 1));
883 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
884 if (temp)
885 return temp;
888 /* Apply De Morgan's laws to reduce number of patterns for machines
889 with negating logical insns (and-not, nand, etc.). If result has
890 only one NOT, put it first, since that is how the patterns are
891 coded. */
892 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
894 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
895 enum machine_mode op_mode;
897 op_mode = GET_MODE (in1);
898 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
900 op_mode = GET_MODE (in2);
901 if (op_mode == VOIDmode)
902 op_mode = mode;
903 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
905 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
907 rtx tem = in2;
908 in2 = in1; in1 = tem;
911 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
912 mode, in1, in2);
915 /* (not (bswap x)) -> (bswap (not x)). */
916 if (GET_CODE (op) == BSWAP)
918 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
919 return simplify_gen_unary (BSWAP, mode, x, mode);
921 break;
923 case NEG:
924 /* (neg (neg X)) == X. */
925 if (GET_CODE (op) == NEG)
926 return XEXP (op, 0);
928 /* (neg (plus X 1)) can become (not X). */
929 if (GET_CODE (op) == PLUS
930 && XEXP (op, 1) == const1_rtx)
931 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
933 /* Similarly, (neg (not X)) is (plus X 1). */
934 if (GET_CODE (op) == NOT)
935 return plus_constant (mode, XEXP (op, 0), 1);
937 /* (neg (minus X Y)) can become (minus Y X). This transformation
938 isn't safe for modes with signed zeros, since if X and Y are
939 both +0, (minus Y X) is the same as (minus X Y). If the
940 rounding mode is towards +infinity (or -infinity) then the two
941 expressions will be rounded differently. */
942 if (GET_CODE (op) == MINUS
943 && !HONOR_SIGNED_ZEROS (mode)
944 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
945 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
947 if (GET_CODE (op) == PLUS
948 && !HONOR_SIGNED_ZEROS (mode)
949 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
951 /* (neg (plus A C)) is simplified to (minus -C A). */
952 if (CONST_SCALAR_INT_P (XEXP (op, 1))
953 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
955 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
956 if (temp)
957 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
960 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
961 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
962 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
965 /* (neg (mult A B)) becomes (mult A (neg B)).
966 This works even for floating-point values. */
967 if (GET_CODE (op) == MULT
968 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
970 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
971 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
974 /* NEG commutes with ASHIFT since it is multiplication. Only do
975 this if we can then eliminate the NEG (e.g., if the operand
976 is a constant). */
977 if (GET_CODE (op) == ASHIFT)
979 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
980 if (temp)
981 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
984 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
985 C is equal to the width of MODE minus 1. */
986 if (GET_CODE (op) == ASHIFTRT
987 && CONST_INT_P (XEXP (op, 1))
988 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
989 return simplify_gen_binary (LSHIFTRT, mode,
990 XEXP (op, 0), XEXP (op, 1));
992 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
993 C is equal to the width of MODE minus 1. */
994 if (GET_CODE (op) == LSHIFTRT
995 && CONST_INT_P (XEXP (op, 1))
996 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
997 return simplify_gen_binary (ASHIFTRT, mode,
998 XEXP (op, 0), XEXP (op, 1));
1000 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1001 if (GET_CODE (op) == XOR
1002 && XEXP (op, 1) == const1_rtx
1003 && nonzero_bits (XEXP (op, 0), mode) == 1)
1004 return plus_constant (mode, XEXP (op, 0), -1);
1006 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1007 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1008 if (GET_CODE (op) == LT
1009 && XEXP (op, 1) == const0_rtx
1010 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1012 enum machine_mode inner = GET_MODE (XEXP (op, 0));
1013 int isize = GET_MODE_PRECISION (inner);
1014 if (STORE_FLAG_VALUE == 1)
1016 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1017 GEN_INT (isize - 1));
1018 if (mode == inner)
1019 return temp;
1020 if (GET_MODE_PRECISION (mode) > isize)
1021 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1022 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1024 else if (STORE_FLAG_VALUE == -1)
1026 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1027 GEN_INT (isize - 1));
1028 if (mode == inner)
1029 return temp;
1030 if (GET_MODE_PRECISION (mode) > isize)
1031 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1032 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1035 break;
1037 case TRUNCATE:
1038 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1039 with the umulXi3_highpart patterns. */
1040 if (GET_CODE (op) == LSHIFTRT
1041 && GET_CODE (XEXP (op, 0)) == MULT)
1042 break;
1044 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1046 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1048 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1049 if (temp)
1050 return temp;
1052 /* We can't handle truncation to a partial integer mode here
1053 because we don't know the real bitsize of the partial
1054 integer mode. */
1055 break;
1058 if (GET_MODE (op) != VOIDmode)
1060 temp = simplify_truncation (mode, op, GET_MODE (op));
1061 if (temp)
1062 return temp;
1065 /* If we know that the value is already truncated, we can
1066 replace the TRUNCATE with a SUBREG. */
1067 if (GET_MODE_NUNITS (mode) == 1
1068 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1069 || truncated_to_mode (mode, op)))
1071 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1072 if (temp)
1073 return temp;
1076 /* A truncate of a comparison can be replaced with a subreg if
1077 STORE_FLAG_VALUE permits. This is like the previous test,
1078 but it works even if the comparison is done in a mode larger
1079 than HOST_BITS_PER_WIDE_INT. */
1080 if (HWI_COMPUTABLE_MODE_P (mode)
1081 && COMPARISON_P (op)
1082 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1084 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1085 if (temp)
1086 return temp;
1089 /* A truncate of a memory is just loading the low part of the memory
1090 if we are not changing the meaning of the address. */
1091 if (GET_CODE (op) == MEM
1092 && !VECTOR_MODE_P (mode)
1093 && !MEM_VOLATILE_P (op)
1094 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1096 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1097 if (temp)
1098 return temp;
1101 break;
1103 case FLOAT_TRUNCATE:
1104 if (DECIMAL_FLOAT_MODE_P (mode))
1105 break;
1107 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1108 if (GET_CODE (op) == FLOAT_EXTEND
1109 && GET_MODE (XEXP (op, 0)) == mode)
1110 return XEXP (op, 0);
1112 /* (float_truncate:SF (float_truncate:DF foo:XF))
1113 = (float_truncate:SF foo:XF).
1114 This may eliminate double rounding, so it is unsafe.
1116 (float_truncate:SF (float_extend:XF foo:DF))
1117 = (float_truncate:SF foo:DF).
1119 (float_truncate:DF (float_extend:XF foo:SF))
1120 = (float_extend:SF foo:DF). */
1121 if ((GET_CODE (op) == FLOAT_TRUNCATE
1122 && flag_unsafe_math_optimizations)
1123 || GET_CODE (op) == FLOAT_EXTEND)
1124 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1125 0)))
1126 > GET_MODE_SIZE (mode)
1127 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1128 mode,
1129 XEXP (op, 0), mode);
1131 /* (float_truncate (float x)) is (float x) */
1132 if (GET_CODE (op) == FLOAT
1133 && (flag_unsafe_math_optimizations
1134 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
1135 && ((unsigned)significand_size (GET_MODE (op))
1136 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1137 - num_sign_bit_copies (XEXP (op, 0),
1138 GET_MODE (XEXP (op, 0))))))))
1139 return simplify_gen_unary (FLOAT, mode,
1140 XEXP (op, 0),
1141 GET_MODE (XEXP (op, 0)));
1143 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1144 (OP:SF foo:SF) if OP is NEG or ABS. */
1145 if ((GET_CODE (op) == ABS
1146 || GET_CODE (op) == NEG)
1147 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1148 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1149 return simplify_gen_unary (GET_CODE (op), mode,
1150 XEXP (XEXP (op, 0), 0), mode);
1152 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1153 is (float_truncate:SF x). */
1154 if (GET_CODE (op) == SUBREG
1155 && subreg_lowpart_p (op)
1156 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1157 return SUBREG_REG (op);
1158 break;
1160 case FLOAT_EXTEND:
1161 if (DECIMAL_FLOAT_MODE_P (mode))
1162 break;
1164 /* (float_extend (float_extend x)) is (float_extend x)
1166 (float_extend (float x)) is (float x) assuming that double
1167 rounding can't happen.
1169 if (GET_CODE (op) == FLOAT_EXTEND
1170 || (GET_CODE (op) == FLOAT
1171 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1172 && ((unsigned)significand_size (GET_MODE (op))
1173 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1174 - num_sign_bit_copies (XEXP (op, 0),
1175 GET_MODE (XEXP (op, 0)))))))
1176 return simplify_gen_unary (GET_CODE (op), mode,
1177 XEXP (op, 0),
1178 GET_MODE (XEXP (op, 0)));
1180 break;
1182 case ABS:
1183 /* (abs (neg <foo>)) -> (abs <foo>) */
1184 if (GET_CODE (op) == NEG)
1185 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1186 GET_MODE (XEXP (op, 0)));
1188 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1189 do nothing. */
1190 if (GET_MODE (op) == VOIDmode)
1191 break;
1193 /* If operand is something known to be positive, ignore the ABS. */
1194 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1195 || val_signbit_known_clear_p (GET_MODE (op),
1196 nonzero_bits (op, GET_MODE (op))))
1197 return op;
1199 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1200 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1201 return gen_rtx_NEG (mode, op);
1203 break;
1205 case FFS:
1206 /* (ffs (*_extend <X>)) = (ffs <X>) */
1207 if (GET_CODE (op) == SIGN_EXTEND
1208 || GET_CODE (op) == ZERO_EXTEND)
1209 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1210 GET_MODE (XEXP (op, 0)));
1211 break;
1213 case POPCOUNT:
1214 switch (GET_CODE (op))
1216 case BSWAP:
1217 case ZERO_EXTEND:
1218 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1219 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1220 GET_MODE (XEXP (op, 0)));
1222 case ROTATE:
1223 case ROTATERT:
1224 /* Rotations don't affect popcount. */
1225 if (!side_effects_p (XEXP (op, 1)))
1226 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1227 GET_MODE (XEXP (op, 0)));
1228 break;
1230 default:
1231 break;
1233 break;
1235 case PARITY:
1236 switch (GET_CODE (op))
1238 case NOT:
1239 case BSWAP:
1240 case ZERO_EXTEND:
1241 case SIGN_EXTEND:
1242 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1243 GET_MODE (XEXP (op, 0)));
1245 case ROTATE:
1246 case ROTATERT:
1247 /* Rotations don't affect parity. */
1248 if (!side_effects_p (XEXP (op, 1)))
1249 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1250 GET_MODE (XEXP (op, 0)));
1251 break;
1253 default:
1254 break;
1256 break;
1258 case BSWAP:
1259 /* (bswap (bswap x)) -> x. */
1260 if (GET_CODE (op) == BSWAP)
1261 return XEXP (op, 0);
1262 break;
1264 case FLOAT:
1265 /* (float (sign_extend <X>)) = (float <X>). */
1266 if (GET_CODE (op) == SIGN_EXTEND)
1267 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1268 GET_MODE (XEXP (op, 0)));
1269 break;
1271 case SIGN_EXTEND:
1272 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1273 becomes just the MINUS if its mode is MODE. This allows
1274 folding switch statements on machines using casesi (such as
1275 the VAX). */
1276 if (GET_CODE (op) == TRUNCATE
1277 && GET_MODE (XEXP (op, 0)) == mode
1278 && GET_CODE (XEXP (op, 0)) == MINUS
1279 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1280 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1281 return XEXP (op, 0);
1283 /* Extending a widening multiplication should be canonicalized to
1284 a wider widening multiplication. */
1285 if (GET_CODE (op) == MULT)
1287 rtx lhs = XEXP (op, 0);
1288 rtx rhs = XEXP (op, 1);
1289 enum rtx_code lcode = GET_CODE (lhs);
1290 enum rtx_code rcode = GET_CODE (rhs);
1292 /* Widening multiplies usually extend both operands, but sometimes
1293 they use a shift to extract a portion of a register. */
1294 if ((lcode == SIGN_EXTEND
1295 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1296 && (rcode == SIGN_EXTEND
1297 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1299 enum machine_mode lmode = GET_MODE (lhs);
1300 enum machine_mode rmode = GET_MODE (rhs);
1301 int bits;
1303 if (lcode == ASHIFTRT)
1304 /* Number of bits not shifted off the end. */
1305 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1306 else /* lcode == SIGN_EXTEND */
1307 /* Size of inner mode. */
1308 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1310 if (rcode == ASHIFTRT)
1311 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1312 else /* rcode == SIGN_EXTEND */
1313 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1315 /* We can only widen multiplies if the result is mathematiclly
1316 equivalent. I.e. if overflow was impossible. */
1317 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1318 return simplify_gen_binary
1319 (MULT, mode,
1320 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1321 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1325 /* Check for a sign extension of a subreg of a promoted
1326 variable, where the promotion is sign-extended, and the
1327 target mode is the same as the variable's promotion. */
1328 if (GET_CODE (op) == SUBREG
1329 && SUBREG_PROMOTED_VAR_P (op)
1330 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1331 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1333 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1334 if (temp)
1335 return temp;
1338 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1339 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1340 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1342 gcc_assert (GET_MODE_BITSIZE (mode)
1343 > GET_MODE_BITSIZE (GET_MODE (op)));
1344 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1345 GET_MODE (XEXP (op, 0)));
1348 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1349 is (sign_extend:M (subreg:O <X>)) if there is mode with
1350 GET_MODE_BITSIZE (N) - I bits.
1351 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1352 is similarly (zero_extend:M (subreg:O <X>)). */
1353 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1354 && GET_CODE (XEXP (op, 0)) == ASHIFT
1355 && CONST_INT_P (XEXP (op, 1))
1356 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1357 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1359 enum machine_mode tmode
1360 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1361 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1362 gcc_assert (GET_MODE_BITSIZE (mode)
1363 > GET_MODE_BITSIZE (GET_MODE (op)));
1364 if (tmode != BLKmode)
1366 rtx inner =
1367 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1368 if (inner)
1369 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1370 ? SIGN_EXTEND : ZERO_EXTEND,
1371 mode, inner, tmode);
1375 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1376 /* As we do not know which address space the pointer is referring to,
1377 we can do this only if the target does not support different pointer
1378 or address modes depending on the address space. */
1379 if (target_default_pointer_address_modes_p ()
1380 && ! POINTERS_EXTEND_UNSIGNED
1381 && mode == Pmode && GET_MODE (op) == ptr_mode
1382 && (CONSTANT_P (op)
1383 || (GET_CODE (op) == SUBREG
1384 && REG_P (SUBREG_REG (op))
1385 && REG_POINTER (SUBREG_REG (op))
1386 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1387 return convert_memory_address (Pmode, op);
1388 #endif
1389 break;
1391 case ZERO_EXTEND:
1392 /* Check for a zero extension of a subreg of a promoted
1393 variable, where the promotion is zero-extended, and the
1394 target mode is the same as the variable's promotion. */
1395 if (GET_CODE (op) == SUBREG
1396 && SUBREG_PROMOTED_VAR_P (op)
1397 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1398 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1400 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1401 if (temp)
1402 return temp;
1405 /* Extending a widening multiplication should be canonicalized to
1406 a wider widening multiplication. */
1407 if (GET_CODE (op) == MULT)
1409 rtx lhs = XEXP (op, 0);
1410 rtx rhs = XEXP (op, 1);
1411 enum rtx_code lcode = GET_CODE (lhs);
1412 enum rtx_code rcode = GET_CODE (rhs);
1414 /* Widening multiplies usually extend both operands, but sometimes
1415 they use a shift to extract a portion of a register. */
1416 if ((lcode == ZERO_EXTEND
1417 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1418 && (rcode == ZERO_EXTEND
1419 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1421 enum machine_mode lmode = GET_MODE (lhs);
1422 enum machine_mode rmode = GET_MODE (rhs);
1423 int bits;
1425 if (lcode == LSHIFTRT)
1426 /* Number of bits not shifted off the end. */
1427 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1428 else /* lcode == ZERO_EXTEND */
1429 /* Size of inner mode. */
1430 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1432 if (rcode == LSHIFTRT)
1433 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1434 else /* rcode == ZERO_EXTEND */
1435 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1437 /* We can only widen multiplies if the result is mathematiclly
1438 equivalent. I.e. if overflow was impossible. */
1439 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1440 return simplify_gen_binary
1441 (MULT, mode,
1442 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1443 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1447 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1448 if (GET_CODE (op) == ZERO_EXTEND)
1449 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1450 GET_MODE (XEXP (op, 0)));
1452 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1453 is (zero_extend:M (subreg:O <X>)) if there is mode with
1454 GET_MODE_BITSIZE (N) - I bits. */
1455 if (GET_CODE (op) == LSHIFTRT
1456 && GET_CODE (XEXP (op, 0)) == ASHIFT
1457 && CONST_INT_P (XEXP (op, 1))
1458 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1459 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1461 enum machine_mode tmode
1462 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1463 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1464 if (tmode != BLKmode)
1466 rtx inner =
1467 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1468 if (inner)
1469 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1473 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1474 /* As we do not know which address space the pointer is referring to,
1475 we can do this only if the target does not support different pointer
1476 or address modes depending on the address space. */
1477 if (target_default_pointer_address_modes_p ()
1478 && POINTERS_EXTEND_UNSIGNED > 0
1479 && mode == Pmode && GET_MODE (op) == ptr_mode
1480 && (CONSTANT_P (op)
1481 || (GET_CODE (op) == SUBREG
1482 && REG_P (SUBREG_REG (op))
1483 && REG_POINTER (SUBREG_REG (op))
1484 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1485 return convert_memory_address (Pmode, op);
1486 #endif
1487 break;
1489 default:
1490 break;
1493 return 0;
1496 /* Try to compute the value of a unary operation CODE whose output mode is to
1497 be MODE with input operand OP whose mode was originally OP_MODE.
1498 Return zero if the value cannot be computed. */
1500 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1501 rtx op, enum machine_mode op_mode)
1503 unsigned int width = GET_MODE_PRECISION (mode);
1504 unsigned int op_width = GET_MODE_PRECISION (op_mode);
1506 if (code == VEC_DUPLICATE)
1508 gcc_assert (VECTOR_MODE_P (mode));
1509 if (GET_MODE (op) != VOIDmode)
1511 if (!VECTOR_MODE_P (GET_MODE (op)))
1512 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1513 else
1514 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1515 (GET_MODE (op)));
1517 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1518 || GET_CODE (op) == CONST_VECTOR)
1520 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1521 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1522 rtvec v = rtvec_alloc (n_elts);
1523 unsigned int i;
1525 if (GET_CODE (op) != CONST_VECTOR)
1526 for (i = 0; i < n_elts; i++)
1527 RTVEC_ELT (v, i) = op;
1528 else
1530 enum machine_mode inmode = GET_MODE (op);
1531 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1532 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1534 gcc_assert (in_n_elts < n_elts);
1535 gcc_assert ((n_elts % in_n_elts) == 0);
1536 for (i = 0; i < n_elts; i++)
1537 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1539 return gen_rtx_CONST_VECTOR (mode, v);
1543 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1545 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1546 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1547 enum machine_mode opmode = GET_MODE (op);
1548 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1549 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1550 rtvec v = rtvec_alloc (n_elts);
1551 unsigned int i;
1553 gcc_assert (op_n_elts == n_elts);
1554 for (i = 0; i < n_elts; i++)
1556 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1557 CONST_VECTOR_ELT (op, i),
1558 GET_MODE_INNER (opmode));
1559 if (!x)
1560 return 0;
1561 RTVEC_ELT (v, i) = x;
1563 return gen_rtx_CONST_VECTOR (mode, v);
1566 /* The order of these tests is critical so that, for example, we don't
1567 check the wrong mode (input vs. output) for a conversion operation,
1568 such as FIX. At some point, this should be simplified. */
1570 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1572 HOST_WIDE_INT hv, lv;
1573 REAL_VALUE_TYPE d;
1575 if (CONST_INT_P (op))
1576 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1577 else
1578 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1580 REAL_VALUE_FROM_INT (d, lv, hv, mode);
1581 d = real_value_truncate (mode, d);
1582 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1584 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1586 HOST_WIDE_INT hv, lv;
1587 REAL_VALUE_TYPE d;
1589 if (CONST_INT_P (op))
1590 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1591 else
1592 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1594 if (op_mode == VOIDmode
1595 || GET_MODE_PRECISION (op_mode) > HOST_BITS_PER_DOUBLE_INT)
1596 /* We should never get a negative number. */
1597 gcc_assert (hv >= 0);
1598 else if (GET_MODE_PRECISION (op_mode) <= HOST_BITS_PER_WIDE_INT)
1599 hv = 0, lv &= GET_MODE_MASK (op_mode);
1601 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1602 d = real_value_truncate (mode, d);
1603 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1606 if (CONST_INT_P (op)
1607 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1609 HOST_WIDE_INT arg0 = INTVAL (op);
1610 HOST_WIDE_INT val;
1612 switch (code)
1614 case NOT:
1615 val = ~ arg0;
1616 break;
1618 case NEG:
1619 val = - arg0;
1620 break;
1622 case ABS:
1623 val = (arg0 >= 0 ? arg0 : - arg0);
1624 break;
1626 case FFS:
1627 arg0 &= GET_MODE_MASK (mode);
1628 val = ffs_hwi (arg0);
1629 break;
1631 case CLZ:
1632 arg0 &= GET_MODE_MASK (mode);
1633 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1635 else
1636 val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 1;
1637 break;
1639 case CLRSB:
1640 arg0 &= GET_MODE_MASK (mode);
1641 if (arg0 == 0)
1642 val = GET_MODE_PRECISION (mode) - 1;
1643 else if (arg0 >= 0)
1644 val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 2;
1645 else if (arg0 < 0)
1646 val = GET_MODE_PRECISION (mode) - floor_log2 (~arg0) - 2;
1647 break;
1649 case CTZ:
1650 arg0 &= GET_MODE_MASK (mode);
1651 if (arg0 == 0)
1653 /* Even if the value at zero is undefined, we have to come
1654 up with some replacement. Seems good enough. */
1655 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1656 val = GET_MODE_PRECISION (mode);
1658 else
1659 val = ctz_hwi (arg0);
1660 break;
1662 case POPCOUNT:
1663 arg0 &= GET_MODE_MASK (mode);
1664 val = 0;
1665 while (arg0)
1666 val++, arg0 &= arg0 - 1;
1667 break;
1669 case PARITY:
1670 arg0 &= GET_MODE_MASK (mode);
1671 val = 0;
1672 while (arg0)
1673 val++, arg0 &= arg0 - 1;
1674 val &= 1;
1675 break;
1677 case BSWAP:
1679 unsigned int s;
1681 val = 0;
1682 for (s = 0; s < width; s += 8)
1684 unsigned int d = width - s - 8;
1685 unsigned HOST_WIDE_INT byte;
1686 byte = (arg0 >> s) & 0xff;
1687 val |= byte << d;
1690 break;
1692 case TRUNCATE:
1693 val = arg0;
1694 break;
1696 case ZERO_EXTEND:
1697 /* When zero-extending a CONST_INT, we need to know its
1698 original mode. */
1699 gcc_assert (op_mode != VOIDmode);
1700 if (op_width == HOST_BITS_PER_WIDE_INT)
1702 /* If we were really extending the mode,
1703 we would have to distinguish between zero-extension
1704 and sign-extension. */
1705 gcc_assert (width == op_width);
1706 val = arg0;
1708 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1709 val = arg0 & GET_MODE_MASK (op_mode);
1710 else
1711 return 0;
1712 break;
1714 case SIGN_EXTEND:
1715 if (op_mode == VOIDmode)
1716 op_mode = mode;
1717 op_width = GET_MODE_PRECISION (op_mode);
1718 if (op_width == HOST_BITS_PER_WIDE_INT)
1720 /* If we were really extending the mode,
1721 we would have to distinguish between zero-extension
1722 and sign-extension. */
1723 gcc_assert (width == op_width);
1724 val = arg0;
1726 else if (op_width < HOST_BITS_PER_WIDE_INT)
1728 val = arg0 & GET_MODE_MASK (op_mode);
1729 if (val_signbit_known_set_p (op_mode, val))
1730 val |= ~GET_MODE_MASK (op_mode);
1732 else
1733 return 0;
1734 break;
1736 case SQRT:
1737 case FLOAT_EXTEND:
1738 case FLOAT_TRUNCATE:
1739 case SS_TRUNCATE:
1740 case US_TRUNCATE:
1741 case SS_NEG:
1742 case US_NEG:
1743 case SS_ABS:
1744 return 0;
1746 default:
1747 gcc_unreachable ();
1750 return gen_int_mode (val, mode);
1753 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1754 for a DImode operation on a CONST_INT. */
1755 else if (width <= HOST_BITS_PER_DOUBLE_INT
1756 && (CONST_DOUBLE_AS_INT_P (op) || CONST_INT_P (op)))
1758 double_int first, value;
1760 if (CONST_DOUBLE_AS_INT_P (op))
1761 first = double_int::from_pair (CONST_DOUBLE_HIGH (op),
1762 CONST_DOUBLE_LOW (op));
1763 else
1764 first = double_int::from_shwi (INTVAL (op));
1766 switch (code)
1768 case NOT:
1769 value = ~first;
1770 break;
1772 case NEG:
1773 value = -first;
1774 break;
1776 case ABS:
1777 if (first.is_negative ())
1778 value = -first;
1779 else
1780 value = first;
1781 break;
1783 case FFS:
1784 value.high = 0;
1785 if (first.low != 0)
1786 value.low = ffs_hwi (first.low);
1787 else if (first.high != 0)
1788 value.low = HOST_BITS_PER_WIDE_INT + ffs_hwi (first.high);
1789 else
1790 value.low = 0;
1791 break;
1793 case CLZ:
1794 value.high = 0;
1795 if (first.high != 0)
1796 value.low = GET_MODE_PRECISION (mode) - floor_log2 (first.high) - 1
1797 - HOST_BITS_PER_WIDE_INT;
1798 else if (first.low != 0)
1799 value.low = GET_MODE_PRECISION (mode) - floor_log2 (first.low) - 1;
1800 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, value.low))
1801 value.low = GET_MODE_PRECISION (mode);
1802 break;
1804 case CTZ:
1805 value.high = 0;
1806 if (first.low != 0)
1807 value.low = ctz_hwi (first.low);
1808 else if (first.high != 0)
1809 value.low = HOST_BITS_PER_WIDE_INT + ctz_hwi (first.high);
1810 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, value.low))
1811 value.low = GET_MODE_PRECISION (mode);
1812 break;
1814 case POPCOUNT:
1815 value = double_int_zero;
1816 while (first.low)
1818 value.low++;
1819 first.low &= first.low - 1;
1821 while (first.high)
1823 value.low++;
1824 first.high &= first.high - 1;
1826 break;
1828 case PARITY:
1829 value = double_int_zero;
1830 while (first.low)
1832 value.low++;
1833 first.low &= first.low - 1;
1835 while (first.high)
1837 value.low++;
1838 first.high &= first.high - 1;
1840 value.low &= 1;
1841 break;
1843 case BSWAP:
1845 unsigned int s;
1847 value = double_int_zero;
1848 for (s = 0; s < width; s += 8)
1850 unsigned int d = width - s - 8;
1851 unsigned HOST_WIDE_INT byte;
1853 if (s < HOST_BITS_PER_WIDE_INT)
1854 byte = (first.low >> s) & 0xff;
1855 else
1856 byte = (first.high >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1858 if (d < HOST_BITS_PER_WIDE_INT)
1859 value.low |= byte << d;
1860 else
1861 value.high |= byte << (d - HOST_BITS_PER_WIDE_INT);
1864 break;
1866 case TRUNCATE:
1867 /* This is just a change-of-mode, so do nothing. */
1868 value = first;
1869 break;
1871 case ZERO_EXTEND:
1872 gcc_assert (op_mode != VOIDmode);
1874 if (op_width > HOST_BITS_PER_WIDE_INT)
1875 return 0;
1877 value = double_int::from_uhwi (first.low & GET_MODE_MASK (op_mode));
1878 break;
1880 case SIGN_EXTEND:
1881 if (op_mode == VOIDmode
1882 || op_width > HOST_BITS_PER_WIDE_INT)
1883 return 0;
1884 else
1886 value.low = first.low & GET_MODE_MASK (op_mode);
1887 if (val_signbit_known_set_p (op_mode, value.low))
1888 value.low |= ~GET_MODE_MASK (op_mode);
1890 value.high = HWI_SIGN_EXTEND (value.low);
1892 break;
1894 case SQRT:
1895 return 0;
1897 default:
1898 return 0;
1901 return immed_double_int_const (value, mode);
1904 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1905 && SCALAR_FLOAT_MODE_P (mode)
1906 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1908 REAL_VALUE_TYPE d, t;
1909 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1911 switch (code)
1913 case SQRT:
1914 if (HONOR_SNANS (mode) && real_isnan (&d))
1915 return 0;
1916 real_sqrt (&t, mode, &d);
1917 d = t;
1918 break;
1919 case ABS:
1920 d = real_value_abs (&d);
1921 break;
1922 case NEG:
1923 d = real_value_negate (&d);
1924 break;
1925 case FLOAT_TRUNCATE:
1926 d = real_value_truncate (mode, d);
1927 break;
1928 case FLOAT_EXTEND:
1929 /* All this does is change the mode, unless changing
1930 mode class. */
1931 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1932 real_convert (&d, mode, &d);
1933 break;
1934 case FIX:
1935 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1936 break;
1937 case NOT:
1939 long tmp[4];
1940 int i;
1942 real_to_target (tmp, &d, GET_MODE (op));
1943 for (i = 0; i < 4; i++)
1944 tmp[i] = ~tmp[i];
1945 real_from_target (&d, tmp, mode);
1946 break;
1948 default:
1949 gcc_unreachable ();
1951 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1954 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1955 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1956 && GET_MODE_CLASS (mode) == MODE_INT
1957 && width <= HOST_BITS_PER_DOUBLE_INT && width > 0)
1959 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1960 operators are intentionally left unspecified (to ease implementation
1961 by target backends), for consistency, this routine implements the
1962 same semantics for constant folding as used by the middle-end. */
1964 /* This was formerly used only for non-IEEE float.
1965 eggert@twinsun.com says it is safe for IEEE also. */
1966 HOST_WIDE_INT xh, xl, th, tl;
1967 REAL_VALUE_TYPE x, t;
1968 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1969 switch (code)
1971 case FIX:
1972 if (REAL_VALUE_ISNAN (x))
1973 return const0_rtx;
1975 /* Test against the signed upper bound. */
1976 if (width > HOST_BITS_PER_WIDE_INT)
1978 th = ((unsigned HOST_WIDE_INT) 1
1979 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1980 tl = -1;
1982 else
1984 th = 0;
1985 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1987 real_from_integer (&t, VOIDmode, tl, th, 0);
1988 if (REAL_VALUES_LESS (t, x))
1990 xh = th;
1991 xl = tl;
1992 break;
1995 /* Test against the signed lower bound. */
1996 if (width > HOST_BITS_PER_WIDE_INT)
1998 th = (unsigned HOST_WIDE_INT) (-1)
1999 << (width - HOST_BITS_PER_WIDE_INT - 1);
2000 tl = 0;
2002 else
2004 th = -1;
2005 tl = (unsigned HOST_WIDE_INT) (-1) << (width - 1);
2007 real_from_integer (&t, VOIDmode, tl, th, 0);
2008 if (REAL_VALUES_LESS (x, t))
2010 xh = th;
2011 xl = tl;
2012 break;
2014 REAL_VALUE_TO_INT (&xl, &xh, x);
2015 break;
2017 case UNSIGNED_FIX:
2018 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
2019 return const0_rtx;
2021 /* Test against the unsigned upper bound. */
2022 if (width == HOST_BITS_PER_DOUBLE_INT)
2024 th = -1;
2025 tl = -1;
2027 else if (width >= HOST_BITS_PER_WIDE_INT)
2029 th = ((unsigned HOST_WIDE_INT) 1
2030 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
2031 tl = -1;
2033 else
2035 th = 0;
2036 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
2038 real_from_integer (&t, VOIDmode, tl, th, 1);
2039 if (REAL_VALUES_LESS (t, x))
2041 xh = th;
2042 xl = tl;
2043 break;
2046 REAL_VALUE_TO_INT (&xl, &xh, x);
2047 break;
2049 default:
2050 gcc_unreachable ();
2052 return immed_double_const (xl, xh, mode);
2055 return NULL_RTX;
2058 /* Subroutine of simplify_binary_operation to simplify a binary operation
2059 CODE that can commute with byte swapping, with result mode MODE and
2060 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2061 Return zero if no simplification or canonicalization is possible. */
2063 static rtx
2064 simplify_byte_swapping_operation (enum rtx_code code, enum machine_mode mode,
2065 rtx op0, rtx op1)
2067 rtx tem;
2069 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2070 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
2072 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
2073 simplify_gen_unary (BSWAP, mode, op1, mode));
2074 return simplify_gen_unary (BSWAP, mode, tem, mode);
2077 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2078 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
2080 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
2081 return simplify_gen_unary (BSWAP, mode, tem, mode);
2084 return NULL_RTX;
2087 /* Subroutine of simplify_binary_operation to simplify a commutative,
2088 associative binary operation CODE with result mode MODE, operating
2089 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2090 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2091 canonicalization is possible. */
2093 static rtx
2094 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
2095 rtx op0, rtx op1)
2097 rtx tem;
2099 /* Linearize the operator to the left. */
2100 if (GET_CODE (op1) == code)
2102 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2103 if (GET_CODE (op0) == code)
2105 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2106 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2109 /* "a op (b op c)" becomes "(b op c) op a". */
2110 if (! swap_commutative_operands_p (op1, op0))
2111 return simplify_gen_binary (code, mode, op1, op0);
2113 tem = op0;
2114 op0 = op1;
2115 op1 = tem;
2118 if (GET_CODE (op0) == code)
2120 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2121 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2123 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2124 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2127 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2128 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2129 if (tem != 0)
2130 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2132 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2133 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2134 if (tem != 0)
2135 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2138 return 0;
2142 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2143 and OP1. Return 0 if no simplification is possible.
2145 Don't use this for relational operations such as EQ or LT.
2146 Use simplify_relational_operation instead. */
2148 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
2149 rtx op0, rtx op1)
2151 rtx trueop0, trueop1;
2152 rtx tem;
2154 /* Relational operations don't work here. We must know the mode
2155 of the operands in order to do the comparison correctly.
2156 Assuming a full word can give incorrect results.
2157 Consider comparing 128 with -128 in QImode. */
2158 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2159 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2161 /* Make sure the constant is second. */
2162 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2163 && swap_commutative_operands_p (op0, op1))
2165 tem = op0, op0 = op1, op1 = tem;
2168 trueop0 = avoid_constant_pool_reference (op0);
2169 trueop1 = avoid_constant_pool_reference (op1);
2171 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2172 if (tem)
2173 return tem;
2174 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2177 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2178 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2179 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2180 actual constants. */
2182 static rtx
2183 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
2184 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2186 rtx tem, reversed, opleft, opright;
2187 HOST_WIDE_INT val;
2188 unsigned int width = GET_MODE_PRECISION (mode);
2190 /* Even if we can't compute a constant result,
2191 there are some cases worth simplifying. */
2193 switch (code)
2195 case PLUS:
2196 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2197 when x is NaN, infinite, or finite and nonzero. They aren't
2198 when x is -0 and the rounding mode is not towards -infinity,
2199 since (-0) + 0 is then 0. */
2200 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2201 return op0;
2203 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2204 transformations are safe even for IEEE. */
2205 if (GET_CODE (op0) == NEG)
2206 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2207 else if (GET_CODE (op1) == NEG)
2208 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2210 /* (~a) + 1 -> -a */
2211 if (INTEGRAL_MODE_P (mode)
2212 && GET_CODE (op0) == NOT
2213 && trueop1 == const1_rtx)
2214 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2216 /* Handle both-operands-constant cases. We can only add
2217 CONST_INTs to constants since the sum of relocatable symbols
2218 can't be handled by most assemblers. Don't add CONST_INT
2219 to CONST_INT since overflow won't be computed properly if wider
2220 than HOST_BITS_PER_WIDE_INT. */
2222 if ((GET_CODE (op0) == CONST
2223 || GET_CODE (op0) == SYMBOL_REF
2224 || GET_CODE (op0) == LABEL_REF)
2225 && CONST_INT_P (op1))
2226 return plus_constant (mode, op0, INTVAL (op1));
2227 else if ((GET_CODE (op1) == CONST
2228 || GET_CODE (op1) == SYMBOL_REF
2229 || GET_CODE (op1) == LABEL_REF)
2230 && CONST_INT_P (op0))
2231 return plus_constant (mode, op1, INTVAL (op0));
2233 /* See if this is something like X * C - X or vice versa or
2234 if the multiplication is written as a shift. If so, we can
2235 distribute and make a new multiply, shift, or maybe just
2236 have X (if C is 2 in the example above). But don't make
2237 something more expensive than we had before. */
2239 if (SCALAR_INT_MODE_P (mode))
2241 double_int coeff0, coeff1;
2242 rtx lhs = op0, rhs = op1;
2244 coeff0 = double_int_one;
2245 coeff1 = double_int_one;
2247 if (GET_CODE (lhs) == NEG)
2249 coeff0 = double_int_minus_one;
2250 lhs = XEXP (lhs, 0);
2252 else if (GET_CODE (lhs) == MULT
2253 && CONST_INT_P (XEXP (lhs, 1)))
2255 coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1)));
2256 lhs = XEXP (lhs, 0);
2258 else if (GET_CODE (lhs) == ASHIFT
2259 && CONST_INT_P (XEXP (lhs, 1))
2260 && INTVAL (XEXP (lhs, 1)) >= 0
2261 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2263 coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1)));
2264 lhs = XEXP (lhs, 0);
2267 if (GET_CODE (rhs) == NEG)
2269 coeff1 = double_int_minus_one;
2270 rhs = XEXP (rhs, 0);
2272 else if (GET_CODE (rhs) == MULT
2273 && CONST_INT_P (XEXP (rhs, 1)))
2275 coeff1 = double_int::from_shwi (INTVAL (XEXP (rhs, 1)));
2276 rhs = XEXP (rhs, 0);
2278 else if (GET_CODE (rhs) == ASHIFT
2279 && CONST_INT_P (XEXP (rhs, 1))
2280 && INTVAL (XEXP (rhs, 1)) >= 0
2281 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2283 coeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1)));
2284 rhs = XEXP (rhs, 0);
2287 if (rtx_equal_p (lhs, rhs))
2289 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2290 rtx coeff;
2291 double_int val;
2292 bool speed = optimize_function_for_speed_p (cfun);
2294 val = coeff0 + coeff1;
2295 coeff = immed_double_int_const (val, mode);
2297 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2298 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2299 ? tem : 0;
2303 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2304 if (CONST_SCALAR_INT_P (op1)
2305 && GET_CODE (op0) == XOR
2306 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2307 && mode_signbit_p (mode, op1))
2308 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2309 simplify_gen_binary (XOR, mode, op1,
2310 XEXP (op0, 1)));
2312 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2313 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2314 && GET_CODE (op0) == MULT
2315 && GET_CODE (XEXP (op0, 0)) == NEG)
2317 rtx in1, in2;
2319 in1 = XEXP (XEXP (op0, 0), 0);
2320 in2 = XEXP (op0, 1);
2321 return simplify_gen_binary (MINUS, mode, op1,
2322 simplify_gen_binary (MULT, mode,
2323 in1, in2));
2326 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2327 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2328 is 1. */
2329 if (COMPARISON_P (op0)
2330 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2331 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2332 && (reversed = reversed_comparison (op0, mode)))
2333 return
2334 simplify_gen_unary (NEG, mode, reversed, mode);
2336 /* If one of the operands is a PLUS or a MINUS, see if we can
2337 simplify this by the associative law.
2338 Don't use the associative law for floating point.
2339 The inaccuracy makes it nonassociative,
2340 and subtle programs can break if operations are associated. */
2342 if (INTEGRAL_MODE_P (mode)
2343 && (plus_minus_operand_p (op0)
2344 || plus_minus_operand_p (op1))
2345 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2346 return tem;
2348 /* Reassociate floating point addition only when the user
2349 specifies associative math operations. */
2350 if (FLOAT_MODE_P (mode)
2351 && flag_associative_math)
2353 tem = simplify_associative_operation (code, mode, op0, op1);
2354 if (tem)
2355 return tem;
2357 break;
2359 case COMPARE:
2360 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2361 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2362 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2363 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2365 rtx xop00 = XEXP (op0, 0);
2366 rtx xop10 = XEXP (op1, 0);
2368 #ifdef HAVE_cc0
2369 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2370 #else
2371 if (REG_P (xop00) && REG_P (xop10)
2372 && GET_MODE (xop00) == GET_MODE (xop10)
2373 && REGNO (xop00) == REGNO (xop10)
2374 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2375 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2376 #endif
2377 return xop00;
2379 break;
2381 case MINUS:
2382 /* We can't assume x-x is 0 even with non-IEEE floating point,
2383 but since it is zero except in very strange circumstances, we
2384 will treat it as zero with -ffinite-math-only. */
2385 if (rtx_equal_p (trueop0, trueop1)
2386 && ! side_effects_p (op0)
2387 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2388 return CONST0_RTX (mode);
2390 /* Change subtraction from zero into negation. (0 - x) is the
2391 same as -x when x is NaN, infinite, or finite and nonzero.
2392 But if the mode has signed zeros, and does not round towards
2393 -infinity, then 0 - 0 is 0, not -0. */
2394 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2395 return simplify_gen_unary (NEG, mode, op1, mode);
2397 /* (-1 - a) is ~a. */
2398 if (trueop0 == constm1_rtx)
2399 return simplify_gen_unary (NOT, mode, op1, mode);
2401 /* Subtracting 0 has no effect unless the mode has signed zeros
2402 and supports rounding towards -infinity. In such a case,
2403 0 - 0 is -0. */
2404 if (!(HONOR_SIGNED_ZEROS (mode)
2405 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2406 && trueop1 == CONST0_RTX (mode))
2407 return op0;
2409 /* See if this is something like X * C - X or vice versa or
2410 if the multiplication is written as a shift. If so, we can
2411 distribute and make a new multiply, shift, or maybe just
2412 have X (if C is 2 in the example above). But don't make
2413 something more expensive than we had before. */
2415 if (SCALAR_INT_MODE_P (mode))
2417 double_int coeff0, negcoeff1;
2418 rtx lhs = op0, rhs = op1;
2420 coeff0 = double_int_one;
2421 negcoeff1 = double_int_minus_one;
2423 if (GET_CODE (lhs) == NEG)
2425 coeff0 = double_int_minus_one;
2426 lhs = XEXP (lhs, 0);
2428 else if (GET_CODE (lhs) == MULT
2429 && CONST_INT_P (XEXP (lhs, 1)))
2431 coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1)));
2432 lhs = XEXP (lhs, 0);
2434 else if (GET_CODE (lhs) == ASHIFT
2435 && CONST_INT_P (XEXP (lhs, 1))
2436 && INTVAL (XEXP (lhs, 1)) >= 0
2437 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2439 coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1)));
2440 lhs = XEXP (lhs, 0);
2443 if (GET_CODE (rhs) == NEG)
2445 negcoeff1 = double_int_one;
2446 rhs = XEXP (rhs, 0);
2448 else if (GET_CODE (rhs) == MULT
2449 && CONST_INT_P (XEXP (rhs, 1)))
2451 negcoeff1 = double_int::from_shwi (-INTVAL (XEXP (rhs, 1)));
2452 rhs = XEXP (rhs, 0);
2454 else if (GET_CODE (rhs) == ASHIFT
2455 && CONST_INT_P (XEXP (rhs, 1))
2456 && INTVAL (XEXP (rhs, 1)) >= 0
2457 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2459 negcoeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1)));
2460 negcoeff1 = -negcoeff1;
2461 rhs = XEXP (rhs, 0);
2464 if (rtx_equal_p (lhs, rhs))
2466 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2467 rtx coeff;
2468 double_int val;
2469 bool speed = optimize_function_for_speed_p (cfun);
2471 val = coeff0 + negcoeff1;
2472 coeff = immed_double_int_const (val, mode);
2474 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2475 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2476 ? tem : 0;
2480 /* (a - (-b)) -> (a + b). True even for IEEE. */
2481 if (GET_CODE (op1) == NEG)
2482 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2484 /* (-x - c) may be simplified as (-c - x). */
2485 if (GET_CODE (op0) == NEG
2486 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2488 tem = simplify_unary_operation (NEG, mode, op1, mode);
2489 if (tem)
2490 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2493 /* Don't let a relocatable value get a negative coeff. */
2494 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2495 return simplify_gen_binary (PLUS, mode,
2496 op0,
2497 neg_const_int (mode, op1));
2499 /* (x - (x & y)) -> (x & ~y) */
2500 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2502 if (rtx_equal_p (op0, XEXP (op1, 0)))
2504 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2505 GET_MODE (XEXP (op1, 1)));
2506 return simplify_gen_binary (AND, mode, op0, tem);
2508 if (rtx_equal_p (op0, XEXP (op1, 1)))
2510 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2511 GET_MODE (XEXP (op1, 0)));
2512 return simplify_gen_binary (AND, mode, op0, tem);
2516 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2517 by reversing the comparison code if valid. */
2518 if (STORE_FLAG_VALUE == 1
2519 && trueop0 == const1_rtx
2520 && COMPARISON_P (op1)
2521 && (reversed = reversed_comparison (op1, mode)))
2522 return reversed;
2524 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2525 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2526 && GET_CODE (op1) == MULT
2527 && GET_CODE (XEXP (op1, 0)) == NEG)
2529 rtx in1, in2;
2531 in1 = XEXP (XEXP (op1, 0), 0);
2532 in2 = XEXP (op1, 1);
2533 return simplify_gen_binary (PLUS, mode,
2534 simplify_gen_binary (MULT, mode,
2535 in1, in2),
2536 op0);
2539 /* Canonicalize (minus (neg A) (mult B C)) to
2540 (minus (mult (neg B) C) A). */
2541 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2542 && GET_CODE (op1) == MULT
2543 && GET_CODE (op0) == NEG)
2545 rtx in1, in2;
2547 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2548 in2 = XEXP (op1, 1);
2549 return simplify_gen_binary (MINUS, mode,
2550 simplify_gen_binary (MULT, mode,
2551 in1, in2),
2552 XEXP (op0, 0));
2555 /* If one of the operands is a PLUS or a MINUS, see if we can
2556 simplify this by the associative law. This will, for example,
2557 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2558 Don't use the associative law for floating point.
2559 The inaccuracy makes it nonassociative,
2560 and subtle programs can break if operations are associated. */
2562 if (INTEGRAL_MODE_P (mode)
2563 && (plus_minus_operand_p (op0)
2564 || plus_minus_operand_p (op1))
2565 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2566 return tem;
2567 break;
2569 case MULT:
2570 if (trueop1 == constm1_rtx)
2571 return simplify_gen_unary (NEG, mode, op0, mode);
2573 if (GET_CODE (op0) == NEG)
2575 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2576 /* If op1 is a MULT as well and simplify_unary_operation
2577 just moved the NEG to the second operand, simplify_gen_binary
2578 below could through simplify_associative_operation move
2579 the NEG around again and recurse endlessly. */
2580 if (temp
2581 && GET_CODE (op1) == MULT
2582 && GET_CODE (temp) == MULT
2583 && XEXP (op1, 0) == XEXP (temp, 0)
2584 && GET_CODE (XEXP (temp, 1)) == NEG
2585 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2586 temp = NULL_RTX;
2587 if (temp)
2588 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2590 if (GET_CODE (op1) == NEG)
2592 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2593 /* If op0 is a MULT as well and simplify_unary_operation
2594 just moved the NEG to the second operand, simplify_gen_binary
2595 below could through simplify_associative_operation move
2596 the NEG around again and recurse endlessly. */
2597 if (temp
2598 && GET_CODE (op0) == MULT
2599 && GET_CODE (temp) == MULT
2600 && XEXP (op0, 0) == XEXP (temp, 0)
2601 && GET_CODE (XEXP (temp, 1)) == NEG
2602 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2603 temp = NULL_RTX;
2604 if (temp)
2605 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2608 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2609 x is NaN, since x * 0 is then also NaN. Nor is it valid
2610 when the mode has signed zeros, since multiplying a negative
2611 number by 0 will give -0, not 0. */
2612 if (!HONOR_NANS (mode)
2613 && !HONOR_SIGNED_ZEROS (mode)
2614 && trueop1 == CONST0_RTX (mode)
2615 && ! side_effects_p (op0))
2616 return op1;
2618 /* In IEEE floating point, x*1 is not equivalent to x for
2619 signalling NaNs. */
2620 if (!HONOR_SNANS (mode)
2621 && trueop1 == CONST1_RTX (mode))
2622 return op0;
2624 /* Convert multiply by constant power of two into shift unless
2625 we are still generating RTL. This test is a kludge. */
2626 if (CONST_INT_P (trueop1)
2627 && (val = exact_log2 (UINTVAL (trueop1))) >= 0
2628 /* If the mode is larger than the host word size, and the
2629 uppermost bit is set, then this isn't a power of two due
2630 to implicit sign extension. */
2631 && (width <= HOST_BITS_PER_WIDE_INT
2632 || val != HOST_BITS_PER_WIDE_INT - 1))
2633 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2635 /* Likewise for multipliers wider than a word. */
2636 if (CONST_DOUBLE_AS_INT_P (trueop1)
2637 && GET_MODE (op0) == mode
2638 && CONST_DOUBLE_LOW (trueop1) == 0
2639 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0
2640 && (val < HOST_BITS_PER_DOUBLE_INT - 1
2641 || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_DOUBLE_INT))
2642 return simplify_gen_binary (ASHIFT, mode, op0,
2643 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2645 /* x*2 is x+x and x*(-1) is -x */
2646 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2647 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2648 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2649 && GET_MODE (op0) == mode)
2651 REAL_VALUE_TYPE d;
2652 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2654 if (REAL_VALUES_EQUAL (d, dconst2))
2655 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2657 if (!HONOR_SNANS (mode)
2658 && REAL_VALUES_EQUAL (d, dconstm1))
2659 return simplify_gen_unary (NEG, mode, op0, mode);
2662 /* Optimize -x * -x as x * x. */
2663 if (FLOAT_MODE_P (mode)
2664 && GET_CODE (op0) == NEG
2665 && GET_CODE (op1) == NEG
2666 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2667 && !side_effects_p (XEXP (op0, 0)))
2668 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2670 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2671 if (SCALAR_FLOAT_MODE_P (mode)
2672 && GET_CODE (op0) == ABS
2673 && GET_CODE (op1) == ABS
2674 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2675 && !side_effects_p (XEXP (op0, 0)))
2676 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2678 /* Reassociate multiplication, but for floating point MULTs
2679 only when the user specifies unsafe math optimizations. */
2680 if (! FLOAT_MODE_P (mode)
2681 || flag_unsafe_math_optimizations)
2683 tem = simplify_associative_operation (code, mode, op0, op1);
2684 if (tem)
2685 return tem;
2687 break;
2689 case IOR:
2690 if (trueop1 == CONST0_RTX (mode))
2691 return op0;
2692 if (INTEGRAL_MODE_P (mode)
2693 && trueop1 == CONSTM1_RTX (mode)
2694 && !side_effects_p (op0))
2695 return op1;
2696 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2697 return op0;
2698 /* A | (~A) -> -1 */
2699 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2700 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2701 && ! side_effects_p (op0)
2702 && SCALAR_INT_MODE_P (mode))
2703 return constm1_rtx;
2705 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2706 if (CONST_INT_P (op1)
2707 && HWI_COMPUTABLE_MODE_P (mode)
2708 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2709 && !side_effects_p (op0))
2710 return op1;
2712 /* Canonicalize (X & C1) | C2. */
2713 if (GET_CODE (op0) == AND
2714 && CONST_INT_P (trueop1)
2715 && CONST_INT_P (XEXP (op0, 1)))
2717 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2718 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2719 HOST_WIDE_INT c2 = INTVAL (trueop1);
2721 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2722 if ((c1 & c2) == c1
2723 && !side_effects_p (XEXP (op0, 0)))
2724 return trueop1;
2726 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2727 if (((c1|c2) & mask) == mask)
2728 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2730 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2731 if (((c1 & ~c2) & mask) != (c1 & mask))
2733 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2734 gen_int_mode (c1 & ~c2, mode));
2735 return simplify_gen_binary (IOR, mode, tem, op1);
2739 /* Convert (A & B) | A to A. */
2740 if (GET_CODE (op0) == AND
2741 && (rtx_equal_p (XEXP (op0, 0), op1)
2742 || rtx_equal_p (XEXP (op0, 1), op1))
2743 && ! side_effects_p (XEXP (op0, 0))
2744 && ! side_effects_p (XEXP (op0, 1)))
2745 return op1;
2747 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2748 mode size to (rotate A CX). */
2750 if (GET_CODE (op1) == ASHIFT
2751 || GET_CODE (op1) == SUBREG)
2753 opleft = op1;
2754 opright = op0;
2756 else
2758 opright = op1;
2759 opleft = op0;
2762 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2763 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2764 && CONST_INT_P (XEXP (opleft, 1))
2765 && CONST_INT_P (XEXP (opright, 1))
2766 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2767 == GET_MODE_PRECISION (mode)))
2768 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2770 /* Same, but for ashift that has been "simplified" to a wider mode
2771 by simplify_shift_const. */
2773 if (GET_CODE (opleft) == SUBREG
2774 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2775 && GET_CODE (opright) == LSHIFTRT
2776 && GET_CODE (XEXP (opright, 0)) == SUBREG
2777 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2778 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2779 && (GET_MODE_SIZE (GET_MODE (opleft))
2780 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2781 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2782 SUBREG_REG (XEXP (opright, 0)))
2783 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2784 && CONST_INT_P (XEXP (opright, 1))
2785 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2786 == GET_MODE_PRECISION (mode)))
2787 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2788 XEXP (SUBREG_REG (opleft), 1));
2790 /* If we have (ior (and (X C1) C2)), simplify this by making
2791 C1 as small as possible if C1 actually changes. */
2792 if (CONST_INT_P (op1)
2793 && (HWI_COMPUTABLE_MODE_P (mode)
2794 || INTVAL (op1) > 0)
2795 && GET_CODE (op0) == AND
2796 && CONST_INT_P (XEXP (op0, 1))
2797 && CONST_INT_P (op1)
2798 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2799 return simplify_gen_binary (IOR, mode,
2800 simplify_gen_binary
2801 (AND, mode, XEXP (op0, 0),
2802 GEN_INT (UINTVAL (XEXP (op0, 1))
2803 & ~UINTVAL (op1))),
2804 op1);
2806 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2807 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2808 the PLUS does not affect any of the bits in OP1: then we can do
2809 the IOR as a PLUS and we can associate. This is valid if OP1
2810 can be safely shifted left C bits. */
2811 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2812 && GET_CODE (XEXP (op0, 0)) == PLUS
2813 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2814 && CONST_INT_P (XEXP (op0, 1))
2815 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2817 int count = INTVAL (XEXP (op0, 1));
2818 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2820 if (mask >> count == INTVAL (trueop1)
2821 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2822 return simplify_gen_binary (ASHIFTRT, mode,
2823 plus_constant (mode, XEXP (op0, 0),
2824 mask),
2825 XEXP (op0, 1));
2828 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2829 if (tem)
2830 return tem;
2832 tem = simplify_associative_operation (code, mode, op0, op1);
2833 if (tem)
2834 return tem;
2835 break;
2837 case XOR:
2838 if (trueop1 == CONST0_RTX (mode))
2839 return op0;
2840 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2841 return simplify_gen_unary (NOT, mode, op0, mode);
2842 if (rtx_equal_p (trueop0, trueop1)
2843 && ! side_effects_p (op0)
2844 && GET_MODE_CLASS (mode) != MODE_CC)
2845 return CONST0_RTX (mode);
2847 /* Canonicalize XOR of the most significant bit to PLUS. */
2848 if (CONST_SCALAR_INT_P (op1)
2849 && mode_signbit_p (mode, op1))
2850 return simplify_gen_binary (PLUS, mode, op0, op1);
2851 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2852 if (CONST_SCALAR_INT_P (op1)
2853 && GET_CODE (op0) == PLUS
2854 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2855 && mode_signbit_p (mode, XEXP (op0, 1)))
2856 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2857 simplify_gen_binary (XOR, mode, op1,
2858 XEXP (op0, 1)));
2860 /* If we are XORing two things that have no bits in common,
2861 convert them into an IOR. This helps to detect rotation encoded
2862 using those methods and possibly other simplifications. */
2864 if (HWI_COMPUTABLE_MODE_P (mode)
2865 && (nonzero_bits (op0, mode)
2866 & nonzero_bits (op1, mode)) == 0)
2867 return (simplify_gen_binary (IOR, mode, op0, op1));
2869 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2870 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2871 (NOT y). */
2873 int num_negated = 0;
2875 if (GET_CODE (op0) == NOT)
2876 num_negated++, op0 = XEXP (op0, 0);
2877 if (GET_CODE (op1) == NOT)
2878 num_negated++, op1 = XEXP (op1, 0);
2880 if (num_negated == 2)
2881 return simplify_gen_binary (XOR, mode, op0, op1);
2882 else if (num_negated == 1)
2883 return simplify_gen_unary (NOT, mode,
2884 simplify_gen_binary (XOR, mode, op0, op1),
2885 mode);
2888 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2889 correspond to a machine insn or result in further simplifications
2890 if B is a constant. */
2892 if (GET_CODE (op0) == AND
2893 && rtx_equal_p (XEXP (op0, 1), op1)
2894 && ! side_effects_p (op1))
2895 return simplify_gen_binary (AND, mode,
2896 simplify_gen_unary (NOT, mode,
2897 XEXP (op0, 0), mode),
2898 op1);
2900 else if (GET_CODE (op0) == AND
2901 && rtx_equal_p (XEXP (op0, 0), op1)
2902 && ! side_effects_p (op1))
2903 return simplify_gen_binary (AND, mode,
2904 simplify_gen_unary (NOT, mode,
2905 XEXP (op0, 1), mode),
2906 op1);
2908 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2909 we can transform like this:
2910 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2911 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2912 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2913 Attempt a few simplifications when B and C are both constants. */
2914 if (GET_CODE (op0) == AND
2915 && CONST_INT_P (op1)
2916 && CONST_INT_P (XEXP (op0, 1)))
2918 rtx a = XEXP (op0, 0);
2919 rtx b = XEXP (op0, 1);
2920 rtx c = op1;
2921 HOST_WIDE_INT bval = INTVAL (b);
2922 HOST_WIDE_INT cval = INTVAL (c);
2924 rtx na_c
2925 = simplify_binary_operation (AND, mode,
2926 simplify_gen_unary (NOT, mode, a, mode),
2928 if ((~cval & bval) == 0)
2930 /* Try to simplify ~A&C | ~B&C. */
2931 if (na_c != NULL_RTX)
2932 return simplify_gen_binary (IOR, mode, na_c,
2933 GEN_INT (~bval & cval));
2935 else
2937 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2938 if (na_c == const0_rtx)
2940 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2941 GEN_INT (~cval & bval));
2942 return simplify_gen_binary (IOR, mode, a_nc_b,
2943 GEN_INT (~bval & cval));
2948 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2949 comparison if STORE_FLAG_VALUE is 1. */
2950 if (STORE_FLAG_VALUE == 1
2951 && trueop1 == const1_rtx
2952 && COMPARISON_P (op0)
2953 && (reversed = reversed_comparison (op0, mode)))
2954 return reversed;
2956 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2957 is (lt foo (const_int 0)), so we can perform the above
2958 simplification if STORE_FLAG_VALUE is 1. */
2960 if (STORE_FLAG_VALUE == 1
2961 && trueop1 == const1_rtx
2962 && GET_CODE (op0) == LSHIFTRT
2963 && CONST_INT_P (XEXP (op0, 1))
2964 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2965 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2967 /* (xor (comparison foo bar) (const_int sign-bit))
2968 when STORE_FLAG_VALUE is the sign bit. */
2969 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2970 && trueop1 == const_true_rtx
2971 && COMPARISON_P (op0)
2972 && (reversed = reversed_comparison (op0, mode)))
2973 return reversed;
2975 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2976 if (tem)
2977 return tem;
2979 tem = simplify_associative_operation (code, mode, op0, op1);
2980 if (tem)
2981 return tem;
2982 break;
2984 case AND:
2985 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2986 return trueop1;
2987 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2988 return op0;
2989 if (HWI_COMPUTABLE_MODE_P (mode))
2991 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2992 HOST_WIDE_INT nzop1;
2993 if (CONST_INT_P (trueop1))
2995 HOST_WIDE_INT val1 = INTVAL (trueop1);
2996 /* If we are turning off bits already known off in OP0, we need
2997 not do an AND. */
2998 if ((nzop0 & ~val1) == 0)
2999 return op0;
3001 nzop1 = nonzero_bits (trueop1, mode);
3002 /* If we are clearing all the nonzero bits, the result is zero. */
3003 if ((nzop1 & nzop0) == 0
3004 && !side_effects_p (op0) && !side_effects_p (op1))
3005 return CONST0_RTX (mode);
3007 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
3008 && GET_MODE_CLASS (mode) != MODE_CC)
3009 return op0;
3010 /* A & (~A) -> 0 */
3011 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3012 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3013 && ! side_effects_p (op0)
3014 && GET_MODE_CLASS (mode) != MODE_CC)
3015 return CONST0_RTX (mode);
3017 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3018 there are no nonzero bits of C outside of X's mode. */
3019 if ((GET_CODE (op0) == SIGN_EXTEND
3020 || GET_CODE (op0) == ZERO_EXTEND)
3021 && CONST_INT_P (trueop1)
3022 && HWI_COMPUTABLE_MODE_P (mode)
3023 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
3024 & UINTVAL (trueop1)) == 0)
3026 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
3027 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
3028 gen_int_mode (INTVAL (trueop1),
3029 imode));
3030 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
3033 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3034 we might be able to further simplify the AND with X and potentially
3035 remove the truncation altogether. */
3036 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
3038 rtx x = XEXP (op0, 0);
3039 enum machine_mode xmode = GET_MODE (x);
3040 tem = simplify_gen_binary (AND, xmode, x,
3041 gen_int_mode (INTVAL (trueop1), xmode));
3042 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
3045 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3046 if (GET_CODE (op0) == IOR
3047 && CONST_INT_P (trueop1)
3048 && CONST_INT_P (XEXP (op0, 1)))
3050 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
3051 return simplify_gen_binary (IOR, mode,
3052 simplify_gen_binary (AND, mode,
3053 XEXP (op0, 0), op1),
3054 gen_int_mode (tmp, mode));
3057 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3058 insn (and may simplify more). */
3059 if (GET_CODE (op0) == XOR
3060 && rtx_equal_p (XEXP (op0, 0), op1)
3061 && ! side_effects_p (op1))
3062 return simplify_gen_binary (AND, mode,
3063 simplify_gen_unary (NOT, mode,
3064 XEXP (op0, 1), mode),
3065 op1);
3067 if (GET_CODE (op0) == XOR
3068 && rtx_equal_p (XEXP (op0, 1), op1)
3069 && ! side_effects_p (op1))
3070 return simplify_gen_binary (AND, mode,
3071 simplify_gen_unary (NOT, mode,
3072 XEXP (op0, 0), mode),
3073 op1);
3075 /* Similarly for (~(A ^ B)) & A. */
3076 if (GET_CODE (op0) == NOT
3077 && GET_CODE (XEXP (op0, 0)) == XOR
3078 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3079 && ! side_effects_p (op1))
3080 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3082 if (GET_CODE (op0) == NOT
3083 && GET_CODE (XEXP (op0, 0)) == XOR
3084 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3085 && ! side_effects_p (op1))
3086 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3088 /* Convert (A | B) & A to A. */
3089 if (GET_CODE (op0) == IOR
3090 && (rtx_equal_p (XEXP (op0, 0), op1)
3091 || rtx_equal_p (XEXP (op0, 1), op1))
3092 && ! side_effects_p (XEXP (op0, 0))
3093 && ! side_effects_p (XEXP (op0, 1)))
3094 return op1;
3096 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3097 ((A & N) + B) & M -> (A + B) & M
3098 Similarly if (N & M) == 0,
3099 ((A | N) + B) & M -> (A + B) & M
3100 and for - instead of + and/or ^ instead of |.
3101 Also, if (N & M) == 0, then
3102 (A +- N) & M -> A & M. */
3103 if (CONST_INT_P (trueop1)
3104 && HWI_COMPUTABLE_MODE_P (mode)
3105 && ~UINTVAL (trueop1)
3106 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3107 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3109 rtx pmop[2];
3110 int which;
3112 pmop[0] = XEXP (op0, 0);
3113 pmop[1] = XEXP (op0, 1);
3115 if (CONST_INT_P (pmop[1])
3116 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3117 return simplify_gen_binary (AND, mode, pmop[0], op1);
3119 for (which = 0; which < 2; which++)
3121 tem = pmop[which];
3122 switch (GET_CODE (tem))
3124 case AND:
3125 if (CONST_INT_P (XEXP (tem, 1))
3126 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3127 == UINTVAL (trueop1))
3128 pmop[which] = XEXP (tem, 0);
3129 break;
3130 case IOR:
3131 case XOR:
3132 if (CONST_INT_P (XEXP (tem, 1))
3133 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3134 pmop[which] = XEXP (tem, 0);
3135 break;
3136 default:
3137 break;
3141 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3143 tem = simplify_gen_binary (GET_CODE (op0), mode,
3144 pmop[0], pmop[1]);
3145 return simplify_gen_binary (code, mode, tem, op1);
3149 /* (and X (ior (not X) Y) -> (and X Y) */
3150 if (GET_CODE (op1) == IOR
3151 && GET_CODE (XEXP (op1, 0)) == NOT
3152 && op0 == XEXP (XEXP (op1, 0), 0))
3153 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3155 /* (and (ior (not X) Y) X) -> (and X Y) */
3156 if (GET_CODE (op0) == IOR
3157 && GET_CODE (XEXP (op0, 0)) == NOT
3158 && op1 == XEXP (XEXP (op0, 0), 0))
3159 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3161 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3162 if (tem)
3163 return tem;
3165 tem = simplify_associative_operation (code, mode, op0, op1);
3166 if (tem)
3167 return tem;
3168 break;
3170 case UDIV:
3171 /* 0/x is 0 (or x&0 if x has side-effects). */
3172 if (trueop0 == CONST0_RTX (mode))
3174 if (side_effects_p (op1))
3175 return simplify_gen_binary (AND, mode, op1, trueop0);
3176 return trueop0;
3178 /* x/1 is x. */
3179 if (trueop1 == CONST1_RTX (mode))
3181 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3182 if (tem)
3183 return tem;
3185 /* Convert divide by power of two into shift. */
3186 if (CONST_INT_P (trueop1)
3187 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3188 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3189 break;
3191 case DIV:
3192 /* Handle floating point and integers separately. */
3193 if (SCALAR_FLOAT_MODE_P (mode))
3195 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3196 safe for modes with NaNs, since 0.0 / 0.0 will then be
3197 NaN rather than 0.0. Nor is it safe for modes with signed
3198 zeros, since dividing 0 by a negative number gives -0.0 */
3199 if (trueop0 == CONST0_RTX (mode)
3200 && !HONOR_NANS (mode)
3201 && !HONOR_SIGNED_ZEROS (mode)
3202 && ! side_effects_p (op1))
3203 return op0;
3204 /* x/1.0 is x. */
3205 if (trueop1 == CONST1_RTX (mode)
3206 && !HONOR_SNANS (mode))
3207 return op0;
3209 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3210 && trueop1 != CONST0_RTX (mode))
3212 REAL_VALUE_TYPE d;
3213 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
3215 /* x/-1.0 is -x. */
3216 if (REAL_VALUES_EQUAL (d, dconstm1)
3217 && !HONOR_SNANS (mode))
3218 return simplify_gen_unary (NEG, mode, op0, mode);
3220 /* Change FP division by a constant into multiplication.
3221 Only do this with -freciprocal-math. */
3222 if (flag_reciprocal_math
3223 && !REAL_VALUES_EQUAL (d, dconst0))
3225 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
3226 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3227 return simplify_gen_binary (MULT, mode, op0, tem);
3231 else if (SCALAR_INT_MODE_P (mode))
3233 /* 0/x is 0 (or x&0 if x has side-effects). */
3234 if (trueop0 == CONST0_RTX (mode)
3235 && !cfun->can_throw_non_call_exceptions)
3237 if (side_effects_p (op1))
3238 return simplify_gen_binary (AND, mode, op1, trueop0);
3239 return trueop0;
3241 /* x/1 is x. */
3242 if (trueop1 == CONST1_RTX (mode))
3244 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3245 if (tem)
3246 return tem;
3248 /* x/-1 is -x. */
3249 if (trueop1 == constm1_rtx)
3251 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3252 if (x)
3253 return simplify_gen_unary (NEG, mode, x, mode);
3256 break;
3258 case UMOD:
3259 /* 0%x is 0 (or x&0 if x has side-effects). */
3260 if (trueop0 == CONST0_RTX (mode))
3262 if (side_effects_p (op1))
3263 return simplify_gen_binary (AND, mode, op1, trueop0);
3264 return trueop0;
3266 /* x%1 is 0 (of x&0 if x has side-effects). */
3267 if (trueop1 == CONST1_RTX (mode))
3269 if (side_effects_p (op0))
3270 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3271 return CONST0_RTX (mode);
3273 /* Implement modulus by power of two as AND. */
3274 if (CONST_INT_P (trueop1)
3275 && exact_log2 (UINTVAL (trueop1)) > 0)
3276 return simplify_gen_binary (AND, mode, op0,
3277 GEN_INT (INTVAL (op1) - 1));
3278 break;
3280 case MOD:
3281 /* 0%x is 0 (or x&0 if x has side-effects). */
3282 if (trueop0 == CONST0_RTX (mode))
3284 if (side_effects_p (op1))
3285 return simplify_gen_binary (AND, mode, op1, trueop0);
3286 return trueop0;
3288 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3289 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3291 if (side_effects_p (op0))
3292 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3293 return CONST0_RTX (mode);
3295 break;
3297 case ROTATERT:
3298 case ROTATE:
3299 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3300 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3301 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3302 amount instead. */
3303 if (CONST_INT_P (trueop1)
3304 && IN_RANGE (INTVAL (trueop1),
3305 GET_MODE_BITSIZE (mode) / 2 + (code == ROTATE),
3306 GET_MODE_BITSIZE (mode) - 1))
3307 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3308 mode, op0, GEN_INT (GET_MODE_BITSIZE (mode)
3309 - INTVAL (trueop1)));
3310 /* FALLTHRU */
3311 case ASHIFTRT:
3312 if (trueop1 == CONST0_RTX (mode))
3313 return op0;
3314 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3315 return op0;
3316 /* Rotating ~0 always results in ~0. */
3317 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3318 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3319 && ! side_effects_p (op1))
3320 return op0;
3321 canonicalize_shift:
3322 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3324 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
3325 if (val != INTVAL (op1))
3326 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3328 break;
3330 case ASHIFT:
3331 case SS_ASHIFT:
3332 case US_ASHIFT:
3333 if (trueop1 == CONST0_RTX (mode))
3334 return op0;
3335 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3336 return op0;
3337 goto canonicalize_shift;
3339 case LSHIFTRT:
3340 if (trueop1 == CONST0_RTX (mode))
3341 return op0;
3342 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3343 return op0;
3344 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3345 if (GET_CODE (op0) == CLZ
3346 && CONST_INT_P (trueop1)
3347 && STORE_FLAG_VALUE == 1
3348 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3350 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
3351 unsigned HOST_WIDE_INT zero_val = 0;
3353 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3354 && zero_val == GET_MODE_PRECISION (imode)
3355 && INTVAL (trueop1) == exact_log2 (zero_val))
3356 return simplify_gen_relational (EQ, mode, imode,
3357 XEXP (op0, 0), const0_rtx);
3359 goto canonicalize_shift;
3361 case SMIN:
3362 if (width <= HOST_BITS_PER_WIDE_INT
3363 && mode_signbit_p (mode, trueop1)
3364 && ! side_effects_p (op0))
3365 return op1;
3366 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3367 return op0;
3368 tem = simplify_associative_operation (code, mode, op0, op1);
3369 if (tem)
3370 return tem;
3371 break;
3373 case SMAX:
3374 if (width <= HOST_BITS_PER_WIDE_INT
3375 && CONST_INT_P (trueop1)
3376 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3377 && ! side_effects_p (op0))
3378 return op1;
3379 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3380 return op0;
3381 tem = simplify_associative_operation (code, mode, op0, op1);
3382 if (tem)
3383 return tem;
3384 break;
3386 case UMIN:
3387 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3388 return op1;
3389 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3390 return op0;
3391 tem = simplify_associative_operation (code, mode, op0, op1);
3392 if (tem)
3393 return tem;
3394 break;
3396 case UMAX:
3397 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3398 return op1;
3399 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3400 return op0;
3401 tem = simplify_associative_operation (code, mode, op0, op1);
3402 if (tem)
3403 return tem;
3404 break;
3406 case SS_PLUS:
3407 case US_PLUS:
3408 case SS_MINUS:
3409 case US_MINUS:
3410 case SS_MULT:
3411 case US_MULT:
3412 case SS_DIV:
3413 case US_DIV:
3414 /* ??? There are simplifications that can be done. */
3415 return 0;
3417 case VEC_SELECT:
3418 if (!VECTOR_MODE_P (mode))
3420 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3421 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3422 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3423 gcc_assert (XVECLEN (trueop1, 0) == 1);
3424 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3426 if (GET_CODE (trueop0) == CONST_VECTOR)
3427 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3428 (trueop1, 0, 0)));
3430 /* Extract a scalar element from a nested VEC_SELECT expression
3431 (with optional nested VEC_CONCAT expression). Some targets
3432 (i386) extract scalar element from a vector using chain of
3433 nested VEC_SELECT expressions. When input operand is a memory
3434 operand, this operation can be simplified to a simple scalar
3435 load from an offseted memory address. */
3436 if (GET_CODE (trueop0) == VEC_SELECT)
3438 rtx op0 = XEXP (trueop0, 0);
3439 rtx op1 = XEXP (trueop0, 1);
3441 enum machine_mode opmode = GET_MODE (op0);
3442 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3443 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3445 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3446 int elem;
3448 rtvec vec;
3449 rtx tmp_op, tmp;
3451 gcc_assert (GET_CODE (op1) == PARALLEL);
3452 gcc_assert (i < n_elts);
3454 /* Select element, pointed by nested selector. */
3455 elem = INTVAL (XVECEXP (op1, 0, i));
3457 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3458 if (GET_CODE (op0) == VEC_CONCAT)
3460 rtx op00 = XEXP (op0, 0);
3461 rtx op01 = XEXP (op0, 1);
3463 enum machine_mode mode00, mode01;
3464 int n_elts00, n_elts01;
3466 mode00 = GET_MODE (op00);
3467 mode01 = GET_MODE (op01);
3469 /* Find out number of elements of each operand. */
3470 if (VECTOR_MODE_P (mode00))
3472 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3473 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3475 else
3476 n_elts00 = 1;
3478 if (VECTOR_MODE_P (mode01))
3480 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3481 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3483 else
3484 n_elts01 = 1;
3486 gcc_assert (n_elts == n_elts00 + n_elts01);
3488 /* Select correct operand of VEC_CONCAT
3489 and adjust selector. */
3490 if (elem < n_elts01)
3491 tmp_op = op00;
3492 else
3494 tmp_op = op01;
3495 elem -= n_elts00;
3498 else
3499 tmp_op = op0;
3501 vec = rtvec_alloc (1);
3502 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3504 tmp = gen_rtx_fmt_ee (code, mode,
3505 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3506 return tmp;
3508 if (GET_CODE (trueop0) == VEC_DUPLICATE
3509 && GET_MODE (XEXP (trueop0, 0)) == mode)
3510 return XEXP (trueop0, 0);
3512 else
3514 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3515 gcc_assert (GET_MODE_INNER (mode)
3516 == GET_MODE_INNER (GET_MODE (trueop0)));
3517 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3519 if (GET_CODE (trueop0) == CONST_VECTOR)
3521 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3522 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3523 rtvec v = rtvec_alloc (n_elts);
3524 unsigned int i;
3526 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3527 for (i = 0; i < n_elts; i++)
3529 rtx x = XVECEXP (trueop1, 0, i);
3531 gcc_assert (CONST_INT_P (x));
3532 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3533 INTVAL (x));
3536 return gen_rtx_CONST_VECTOR (mode, v);
3539 /* Recognize the identity. */
3540 if (GET_MODE (trueop0) == mode)
3542 bool maybe_ident = true;
3543 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3545 rtx j = XVECEXP (trueop1, 0, i);
3546 if (!CONST_INT_P (j) || INTVAL (j) != i)
3548 maybe_ident = false;
3549 break;
3552 if (maybe_ident)
3553 return trueop0;
3556 /* If we build {a,b} then permute it, build the result directly. */
3557 if (XVECLEN (trueop1, 0) == 2
3558 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3559 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3560 && GET_CODE (trueop0) == VEC_CONCAT
3561 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3562 && GET_MODE (XEXP (trueop0, 0)) == mode
3563 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3564 && GET_MODE (XEXP (trueop0, 1)) == mode)
3566 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3567 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3568 rtx subop0, subop1;
3570 gcc_assert (i0 < 4 && i1 < 4);
3571 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3572 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3574 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3577 if (XVECLEN (trueop1, 0) == 2
3578 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3579 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3580 && GET_CODE (trueop0) == VEC_CONCAT
3581 && GET_MODE (trueop0) == mode)
3583 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3584 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3585 rtx subop0, subop1;
3587 gcc_assert (i0 < 2 && i1 < 2);
3588 subop0 = XEXP (trueop0, i0);
3589 subop1 = XEXP (trueop0, i1);
3591 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3595 if (XVECLEN (trueop1, 0) == 1
3596 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3597 && GET_CODE (trueop0) == VEC_CONCAT)
3599 rtx vec = trueop0;
3600 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3602 /* Try to find the element in the VEC_CONCAT. */
3603 while (GET_MODE (vec) != mode
3604 && GET_CODE (vec) == VEC_CONCAT)
3606 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3607 if (offset < vec_size)
3608 vec = XEXP (vec, 0);
3609 else
3611 offset -= vec_size;
3612 vec = XEXP (vec, 1);
3614 vec = avoid_constant_pool_reference (vec);
3617 if (GET_MODE (vec) == mode)
3618 return vec;
3621 /* If we select elements in a vec_merge that all come from the same
3622 operand, select from that operand directly. */
3623 if (GET_CODE (op0) == VEC_MERGE)
3625 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3626 if (CONST_INT_P (trueop02))
3628 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3629 bool all_operand0 = true;
3630 bool all_operand1 = true;
3631 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3633 rtx j = XVECEXP (trueop1, 0, i);
3634 if (sel & (1 << UINTVAL (j)))
3635 all_operand1 = false;
3636 else
3637 all_operand0 = false;
3639 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3640 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3641 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3642 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3646 return 0;
3647 case VEC_CONCAT:
3649 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3650 ? GET_MODE (trueop0)
3651 : GET_MODE_INNER (mode));
3652 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3653 ? GET_MODE (trueop1)
3654 : GET_MODE_INNER (mode));
3656 gcc_assert (VECTOR_MODE_P (mode));
3657 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3658 == GET_MODE_SIZE (mode));
3660 if (VECTOR_MODE_P (op0_mode))
3661 gcc_assert (GET_MODE_INNER (mode)
3662 == GET_MODE_INNER (op0_mode));
3663 else
3664 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3666 if (VECTOR_MODE_P (op1_mode))
3667 gcc_assert (GET_MODE_INNER (mode)
3668 == GET_MODE_INNER (op1_mode));
3669 else
3670 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3672 if ((GET_CODE (trueop0) == CONST_VECTOR
3673 || CONST_SCALAR_INT_P (trueop0)
3674 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3675 && (GET_CODE (trueop1) == CONST_VECTOR
3676 || CONST_SCALAR_INT_P (trueop1)
3677 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3679 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3680 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3681 rtvec v = rtvec_alloc (n_elts);
3682 unsigned int i;
3683 unsigned in_n_elts = 1;
3685 if (VECTOR_MODE_P (op0_mode))
3686 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3687 for (i = 0; i < n_elts; i++)
3689 if (i < in_n_elts)
3691 if (!VECTOR_MODE_P (op0_mode))
3692 RTVEC_ELT (v, i) = trueop0;
3693 else
3694 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3696 else
3698 if (!VECTOR_MODE_P (op1_mode))
3699 RTVEC_ELT (v, i) = trueop1;
3700 else
3701 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3702 i - in_n_elts);
3706 return gen_rtx_CONST_VECTOR (mode, v);
3709 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3710 Restrict the transformation to avoid generating a VEC_SELECT with a
3711 mode unrelated to its operand. */
3712 if (GET_CODE (trueop0) == VEC_SELECT
3713 && GET_CODE (trueop1) == VEC_SELECT
3714 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3715 && GET_MODE (XEXP (trueop0, 0)) == mode)
3717 rtx par0 = XEXP (trueop0, 1);
3718 rtx par1 = XEXP (trueop1, 1);
3719 int len0 = XVECLEN (par0, 0);
3720 int len1 = XVECLEN (par1, 0);
3721 rtvec vec = rtvec_alloc (len0 + len1);
3722 for (int i = 0; i < len0; i++)
3723 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3724 for (int i = 0; i < len1; i++)
3725 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3726 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3727 gen_rtx_PARALLEL (VOIDmode, vec));
3730 return 0;
3732 default:
3733 gcc_unreachable ();
3736 return 0;
3740 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3741 rtx op0, rtx op1)
3743 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3744 HOST_WIDE_INT val;
3745 unsigned int width = GET_MODE_PRECISION (mode);
3747 if (VECTOR_MODE_P (mode)
3748 && code != VEC_CONCAT
3749 && GET_CODE (op0) == CONST_VECTOR
3750 && GET_CODE (op1) == CONST_VECTOR)
3752 unsigned n_elts = GET_MODE_NUNITS (mode);
3753 enum machine_mode op0mode = GET_MODE (op0);
3754 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3755 enum machine_mode op1mode = GET_MODE (op1);
3756 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3757 rtvec v = rtvec_alloc (n_elts);
3758 unsigned int i;
3760 gcc_assert (op0_n_elts == n_elts);
3761 gcc_assert (op1_n_elts == n_elts);
3762 for (i = 0; i < n_elts; i++)
3764 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3765 CONST_VECTOR_ELT (op0, i),
3766 CONST_VECTOR_ELT (op1, i));
3767 if (!x)
3768 return 0;
3769 RTVEC_ELT (v, i) = x;
3772 return gen_rtx_CONST_VECTOR (mode, v);
3775 if (VECTOR_MODE_P (mode)
3776 && code == VEC_CONCAT
3777 && (CONST_SCALAR_INT_P (op0)
3778 || GET_CODE (op0) == CONST_FIXED
3779 || CONST_DOUBLE_AS_FLOAT_P (op0))
3780 && (CONST_SCALAR_INT_P (op1)
3781 || CONST_DOUBLE_AS_FLOAT_P (op1)
3782 || GET_CODE (op1) == CONST_FIXED))
3784 unsigned n_elts = GET_MODE_NUNITS (mode);
3785 rtvec v = rtvec_alloc (n_elts);
3787 gcc_assert (n_elts >= 2);
3788 if (n_elts == 2)
3790 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3791 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3793 RTVEC_ELT (v, 0) = op0;
3794 RTVEC_ELT (v, 1) = op1;
3796 else
3798 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3799 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3800 unsigned i;
3802 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3803 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3804 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3806 for (i = 0; i < op0_n_elts; ++i)
3807 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3808 for (i = 0; i < op1_n_elts; ++i)
3809 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3812 return gen_rtx_CONST_VECTOR (mode, v);
3815 if (SCALAR_FLOAT_MODE_P (mode)
3816 && CONST_DOUBLE_AS_FLOAT_P (op0)
3817 && CONST_DOUBLE_AS_FLOAT_P (op1)
3818 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3820 if (code == AND
3821 || code == IOR
3822 || code == XOR)
3824 long tmp0[4];
3825 long tmp1[4];
3826 REAL_VALUE_TYPE r;
3827 int i;
3829 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3830 GET_MODE (op0));
3831 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3832 GET_MODE (op1));
3833 for (i = 0; i < 4; i++)
3835 switch (code)
3837 case AND:
3838 tmp0[i] &= tmp1[i];
3839 break;
3840 case IOR:
3841 tmp0[i] |= tmp1[i];
3842 break;
3843 case XOR:
3844 tmp0[i] ^= tmp1[i];
3845 break;
3846 default:
3847 gcc_unreachable ();
3850 real_from_target (&r, tmp0, mode);
3851 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3853 else
3855 REAL_VALUE_TYPE f0, f1, value, result;
3856 bool inexact;
3858 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3859 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3860 real_convert (&f0, mode, &f0);
3861 real_convert (&f1, mode, &f1);
3863 if (HONOR_SNANS (mode)
3864 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3865 return 0;
3867 if (code == DIV
3868 && REAL_VALUES_EQUAL (f1, dconst0)
3869 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3870 return 0;
3872 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3873 && flag_trapping_math
3874 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3876 int s0 = REAL_VALUE_NEGATIVE (f0);
3877 int s1 = REAL_VALUE_NEGATIVE (f1);
3879 switch (code)
3881 case PLUS:
3882 /* Inf + -Inf = NaN plus exception. */
3883 if (s0 != s1)
3884 return 0;
3885 break;
3886 case MINUS:
3887 /* Inf - Inf = NaN plus exception. */
3888 if (s0 == s1)
3889 return 0;
3890 break;
3891 case DIV:
3892 /* Inf / Inf = NaN plus exception. */
3893 return 0;
3894 default:
3895 break;
3899 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3900 && flag_trapping_math
3901 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3902 || (REAL_VALUE_ISINF (f1)
3903 && REAL_VALUES_EQUAL (f0, dconst0))))
3904 /* Inf * 0 = NaN plus exception. */
3905 return 0;
3907 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3908 &f0, &f1);
3909 real_convert (&result, mode, &value);
3911 /* Don't constant fold this floating point operation if
3912 the result has overflowed and flag_trapping_math. */
3914 if (flag_trapping_math
3915 && MODE_HAS_INFINITIES (mode)
3916 && REAL_VALUE_ISINF (result)
3917 && !REAL_VALUE_ISINF (f0)
3918 && !REAL_VALUE_ISINF (f1))
3919 /* Overflow plus exception. */
3920 return 0;
3922 /* Don't constant fold this floating point operation if the
3923 result may dependent upon the run-time rounding mode and
3924 flag_rounding_math is set, or if GCC's software emulation
3925 is unable to accurately represent the result. */
3927 if ((flag_rounding_math
3928 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3929 && (inexact || !real_identical (&result, &value)))
3930 return NULL_RTX;
3932 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3936 /* We can fold some multi-word operations. */
3937 if (GET_MODE_CLASS (mode) == MODE_INT
3938 && width == HOST_BITS_PER_DOUBLE_INT
3939 && (CONST_DOUBLE_AS_INT_P (op0) || CONST_INT_P (op0))
3940 && (CONST_DOUBLE_AS_INT_P (op1) || CONST_INT_P (op1)))
3942 double_int o0, o1, res, tmp;
3943 bool overflow;
3945 o0 = rtx_to_double_int (op0);
3946 o1 = rtx_to_double_int (op1);
3948 switch (code)
3950 case MINUS:
3951 /* A - B == A + (-B). */
3952 o1 = -o1;
3954 /* Fall through.... */
3956 case PLUS:
3957 res = o0 + o1;
3958 break;
3960 case MULT:
3961 res = o0 * o1;
3962 break;
3964 case DIV:
3965 res = o0.divmod_with_overflow (o1, false, TRUNC_DIV_EXPR,
3966 &tmp, &overflow);
3967 if (overflow)
3968 return 0;
3969 break;
3971 case MOD:
3972 tmp = o0.divmod_with_overflow (o1, false, TRUNC_DIV_EXPR,
3973 &res, &overflow);
3974 if (overflow)
3975 return 0;
3976 break;
3978 case UDIV:
3979 res = o0.divmod_with_overflow (o1, true, TRUNC_DIV_EXPR,
3980 &tmp, &overflow);
3981 if (overflow)
3982 return 0;
3983 break;
3985 case UMOD:
3986 tmp = o0.divmod_with_overflow (o1, true, TRUNC_DIV_EXPR,
3987 &res, &overflow);
3988 if (overflow)
3989 return 0;
3990 break;
3992 case AND:
3993 res = o0 & o1;
3994 break;
3996 case IOR:
3997 res = o0 | o1;
3998 break;
4000 case XOR:
4001 res = o0 ^ o1;
4002 break;
4004 case SMIN:
4005 res = o0.smin (o1);
4006 break;
4008 case SMAX:
4009 res = o0.smax (o1);
4010 break;
4012 case UMIN:
4013 res = o0.umin (o1);
4014 break;
4016 case UMAX:
4017 res = o0.umax (o1);
4018 break;
4020 case LSHIFTRT: case ASHIFTRT:
4021 case ASHIFT:
4022 case ROTATE: case ROTATERT:
4024 unsigned HOST_WIDE_INT cnt;
4026 if (SHIFT_COUNT_TRUNCATED)
4028 o1.high = 0;
4029 o1.low &= GET_MODE_PRECISION (mode) - 1;
4032 if (!o1.fits_uhwi ()
4033 || o1.to_uhwi () >= GET_MODE_PRECISION (mode))
4034 return 0;
4036 cnt = o1.to_uhwi ();
4037 unsigned short prec = GET_MODE_PRECISION (mode);
4039 if (code == LSHIFTRT || code == ASHIFTRT)
4040 res = o0.rshift (cnt, prec, code == ASHIFTRT);
4041 else if (code == ASHIFT)
4042 res = o0.alshift (cnt, prec);
4043 else if (code == ROTATE)
4044 res = o0.lrotate (cnt, prec);
4045 else /* code == ROTATERT */
4046 res = o0.rrotate (cnt, prec);
4048 break;
4050 default:
4051 return 0;
4054 return immed_double_int_const (res, mode);
4057 if (CONST_INT_P (op0) && CONST_INT_P (op1)
4058 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
4060 /* Get the integer argument values in two forms:
4061 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
4063 arg0 = INTVAL (op0);
4064 arg1 = INTVAL (op1);
4066 if (width < HOST_BITS_PER_WIDE_INT)
4068 arg0 &= GET_MODE_MASK (mode);
4069 arg1 &= GET_MODE_MASK (mode);
4071 arg0s = arg0;
4072 if (val_signbit_known_set_p (mode, arg0s))
4073 arg0s |= ~GET_MODE_MASK (mode);
4075 arg1s = arg1;
4076 if (val_signbit_known_set_p (mode, arg1s))
4077 arg1s |= ~GET_MODE_MASK (mode);
4079 else
4081 arg0s = arg0;
4082 arg1s = arg1;
4085 /* Compute the value of the arithmetic. */
4087 switch (code)
4089 case PLUS:
4090 val = arg0s + arg1s;
4091 break;
4093 case MINUS:
4094 val = arg0s - arg1s;
4095 break;
4097 case MULT:
4098 val = arg0s * arg1s;
4099 break;
4101 case DIV:
4102 if (arg1s == 0
4103 || ((unsigned HOST_WIDE_INT) arg0s
4104 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4105 && arg1s == -1))
4106 return 0;
4107 val = arg0s / arg1s;
4108 break;
4110 case MOD:
4111 if (arg1s == 0
4112 || ((unsigned HOST_WIDE_INT) arg0s
4113 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4114 && arg1s == -1))
4115 return 0;
4116 val = arg0s % arg1s;
4117 break;
4119 case UDIV:
4120 if (arg1 == 0
4121 || ((unsigned HOST_WIDE_INT) arg0s
4122 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4123 && arg1s == -1))
4124 return 0;
4125 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
4126 break;
4128 case UMOD:
4129 if (arg1 == 0
4130 || ((unsigned HOST_WIDE_INT) arg0s
4131 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4132 && arg1s == -1))
4133 return 0;
4134 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
4135 break;
4137 case AND:
4138 val = arg0 & arg1;
4139 break;
4141 case IOR:
4142 val = arg0 | arg1;
4143 break;
4145 case XOR:
4146 val = arg0 ^ arg1;
4147 break;
4149 case LSHIFTRT:
4150 case ASHIFT:
4151 case ASHIFTRT:
4152 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
4153 the value is in range. We can't return any old value for
4154 out-of-range arguments because either the middle-end (via
4155 shift_truncation_mask) or the back-end might be relying on
4156 target-specific knowledge. Nor can we rely on
4157 shift_truncation_mask, since the shift might not be part of an
4158 ashlM3, lshrM3 or ashrM3 instruction. */
4159 if (SHIFT_COUNT_TRUNCATED)
4160 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
4161 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
4162 return 0;
4164 val = (code == ASHIFT
4165 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
4166 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
4168 /* Sign-extend the result for arithmetic right shifts. */
4169 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
4170 val |= ((unsigned HOST_WIDE_INT) (-1)) << (width - arg1);
4171 break;
4173 case ROTATERT:
4174 if (arg1 < 0)
4175 return 0;
4177 arg1 %= width;
4178 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
4179 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
4180 break;
4182 case ROTATE:
4183 if (arg1 < 0)
4184 return 0;
4186 arg1 %= width;
4187 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
4188 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
4189 break;
4191 case COMPARE:
4192 /* Do nothing here. */
4193 return 0;
4195 case SMIN:
4196 val = arg0s <= arg1s ? arg0s : arg1s;
4197 break;
4199 case UMIN:
4200 val = ((unsigned HOST_WIDE_INT) arg0
4201 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4202 break;
4204 case SMAX:
4205 val = arg0s > arg1s ? arg0s : arg1s;
4206 break;
4208 case UMAX:
4209 val = ((unsigned HOST_WIDE_INT) arg0
4210 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4211 break;
4213 case SS_PLUS:
4214 case US_PLUS:
4215 case SS_MINUS:
4216 case US_MINUS:
4217 case SS_MULT:
4218 case US_MULT:
4219 case SS_DIV:
4220 case US_DIV:
4221 case SS_ASHIFT:
4222 case US_ASHIFT:
4223 /* ??? There are simplifications that can be done. */
4224 return 0;
4226 default:
4227 gcc_unreachable ();
4230 return gen_int_mode (val, mode);
4233 return NULL_RTX;
4238 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
4239 PLUS or MINUS.
4241 Rather than test for specific case, we do this by a brute-force method
4242 and do all possible simplifications until no more changes occur. Then
4243 we rebuild the operation. */
4245 struct simplify_plus_minus_op_data
4247 rtx op;
4248 short neg;
4251 static bool
4252 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4254 int result;
4256 result = (commutative_operand_precedence (y)
4257 - commutative_operand_precedence (x));
4258 if (result)
4259 return result > 0;
4261 /* Group together equal REGs to do more simplification. */
4262 if (REG_P (x) && REG_P (y))
4263 return REGNO (x) > REGNO (y);
4264 else
4265 return false;
4268 static rtx
4269 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
4270 rtx op1)
4272 struct simplify_plus_minus_op_data ops[8];
4273 rtx result, tem;
4274 int n_ops = 2, input_ops = 2;
4275 int changed, n_constants = 0, canonicalized = 0;
4276 int i, j;
4278 memset (ops, 0, sizeof ops);
4280 /* Set up the two operands and then expand them until nothing has been
4281 changed. If we run out of room in our array, give up; this should
4282 almost never happen. */
4284 ops[0].op = op0;
4285 ops[0].neg = 0;
4286 ops[1].op = op1;
4287 ops[1].neg = (code == MINUS);
4291 changed = 0;
4293 for (i = 0; i < n_ops; i++)
4295 rtx this_op = ops[i].op;
4296 int this_neg = ops[i].neg;
4297 enum rtx_code this_code = GET_CODE (this_op);
4299 switch (this_code)
4301 case PLUS:
4302 case MINUS:
4303 if (n_ops == 7)
4304 return NULL_RTX;
4306 ops[n_ops].op = XEXP (this_op, 1);
4307 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4308 n_ops++;
4310 ops[i].op = XEXP (this_op, 0);
4311 input_ops++;
4312 changed = 1;
4313 canonicalized |= this_neg;
4314 break;
4316 case NEG:
4317 ops[i].op = XEXP (this_op, 0);
4318 ops[i].neg = ! this_neg;
4319 changed = 1;
4320 canonicalized = 1;
4321 break;
4323 case CONST:
4324 if (n_ops < 7
4325 && GET_CODE (XEXP (this_op, 0)) == PLUS
4326 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4327 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4329 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4330 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4331 ops[n_ops].neg = this_neg;
4332 n_ops++;
4333 changed = 1;
4334 canonicalized = 1;
4336 break;
4338 case NOT:
4339 /* ~a -> (-a - 1) */
4340 if (n_ops != 7)
4342 ops[n_ops].op = CONSTM1_RTX (mode);
4343 ops[n_ops++].neg = this_neg;
4344 ops[i].op = XEXP (this_op, 0);
4345 ops[i].neg = !this_neg;
4346 changed = 1;
4347 canonicalized = 1;
4349 break;
4351 case CONST_INT:
4352 n_constants++;
4353 if (this_neg)
4355 ops[i].op = neg_const_int (mode, this_op);
4356 ops[i].neg = 0;
4357 changed = 1;
4358 canonicalized = 1;
4360 break;
4362 default:
4363 break;
4367 while (changed);
4369 if (n_constants > 1)
4370 canonicalized = 1;
4372 gcc_assert (n_ops >= 2);
4374 /* If we only have two operands, we can avoid the loops. */
4375 if (n_ops == 2)
4377 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4378 rtx lhs, rhs;
4380 /* Get the two operands. Be careful with the order, especially for
4381 the cases where code == MINUS. */
4382 if (ops[0].neg && ops[1].neg)
4384 lhs = gen_rtx_NEG (mode, ops[0].op);
4385 rhs = ops[1].op;
4387 else if (ops[0].neg)
4389 lhs = ops[1].op;
4390 rhs = ops[0].op;
4392 else
4394 lhs = ops[0].op;
4395 rhs = ops[1].op;
4398 return simplify_const_binary_operation (code, mode, lhs, rhs);
4401 /* Now simplify each pair of operands until nothing changes. */
4404 /* Insertion sort is good enough for an eight-element array. */
4405 for (i = 1; i < n_ops; i++)
4407 struct simplify_plus_minus_op_data save;
4408 j = i - 1;
4409 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
4410 continue;
4412 canonicalized = 1;
4413 save = ops[i];
4415 ops[j + 1] = ops[j];
4416 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
4417 ops[j + 1] = save;
4420 changed = 0;
4421 for (i = n_ops - 1; i > 0; i--)
4422 for (j = i - 1; j >= 0; j--)
4424 rtx lhs = ops[j].op, rhs = ops[i].op;
4425 int lneg = ops[j].neg, rneg = ops[i].neg;
4427 if (lhs != 0 && rhs != 0)
4429 enum rtx_code ncode = PLUS;
4431 if (lneg != rneg)
4433 ncode = MINUS;
4434 if (lneg)
4435 tem = lhs, lhs = rhs, rhs = tem;
4437 else if (swap_commutative_operands_p (lhs, rhs))
4438 tem = lhs, lhs = rhs, rhs = tem;
4440 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4441 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4443 rtx tem_lhs, tem_rhs;
4445 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4446 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4447 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
4449 if (tem && !CONSTANT_P (tem))
4450 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4452 else
4453 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4455 /* Reject "simplifications" that just wrap the two
4456 arguments in a CONST. Failure to do so can result
4457 in infinite recursion with simplify_binary_operation
4458 when it calls us to simplify CONST operations. */
4459 if (tem
4460 && ! (GET_CODE (tem) == CONST
4461 && GET_CODE (XEXP (tem, 0)) == ncode
4462 && XEXP (XEXP (tem, 0), 0) == lhs
4463 && XEXP (XEXP (tem, 0), 1) == rhs))
4465 lneg &= rneg;
4466 if (GET_CODE (tem) == NEG)
4467 tem = XEXP (tem, 0), lneg = !lneg;
4468 if (CONST_INT_P (tem) && lneg)
4469 tem = neg_const_int (mode, tem), lneg = 0;
4471 ops[i].op = tem;
4472 ops[i].neg = lneg;
4473 ops[j].op = NULL_RTX;
4474 changed = 1;
4475 canonicalized = 1;
4480 /* If nothing changed, fail. */
4481 if (!canonicalized)
4482 return NULL_RTX;
4484 /* Pack all the operands to the lower-numbered entries. */
4485 for (i = 0, j = 0; j < n_ops; j++)
4486 if (ops[j].op)
4488 ops[i] = ops[j];
4489 i++;
4491 n_ops = i;
4493 while (changed);
4495 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4496 if (n_ops == 2
4497 && CONST_INT_P (ops[1].op)
4498 && CONSTANT_P (ops[0].op)
4499 && ops[0].neg)
4500 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4502 /* We suppressed creation of trivial CONST expressions in the
4503 combination loop to avoid recursion. Create one manually now.
4504 The combination loop should have ensured that there is exactly
4505 one CONST_INT, and the sort will have ensured that it is last
4506 in the array and that any other constant will be next-to-last. */
4508 if (n_ops > 1
4509 && CONST_INT_P (ops[n_ops - 1].op)
4510 && CONSTANT_P (ops[n_ops - 2].op))
4512 rtx value = ops[n_ops - 1].op;
4513 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4514 value = neg_const_int (mode, value);
4515 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4516 INTVAL (value));
4517 n_ops--;
4520 /* Put a non-negated operand first, if possible. */
4522 for (i = 0; i < n_ops && ops[i].neg; i++)
4523 continue;
4524 if (i == n_ops)
4525 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4526 else if (i != 0)
4528 tem = ops[0].op;
4529 ops[0] = ops[i];
4530 ops[i].op = tem;
4531 ops[i].neg = 1;
4534 /* Now make the result by performing the requested operations. */
4535 result = ops[0].op;
4536 for (i = 1; i < n_ops; i++)
4537 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4538 mode, result, ops[i].op);
4540 return result;
4543 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4544 static bool
4545 plus_minus_operand_p (const_rtx x)
4547 return GET_CODE (x) == PLUS
4548 || GET_CODE (x) == MINUS
4549 || (GET_CODE (x) == CONST
4550 && GET_CODE (XEXP (x, 0)) == PLUS
4551 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4552 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4555 /* Like simplify_binary_operation except used for relational operators.
4556 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4557 not also be VOIDmode.
4559 CMP_MODE specifies in which mode the comparison is done in, so it is
4560 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4561 the operands or, if both are VOIDmode, the operands are compared in
4562 "infinite precision". */
4564 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
4565 enum machine_mode cmp_mode, rtx op0, rtx op1)
4567 rtx tem, trueop0, trueop1;
4569 if (cmp_mode == VOIDmode)
4570 cmp_mode = GET_MODE (op0);
4571 if (cmp_mode == VOIDmode)
4572 cmp_mode = GET_MODE (op1);
4574 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4575 if (tem)
4577 if (SCALAR_FLOAT_MODE_P (mode))
4579 if (tem == const0_rtx)
4580 return CONST0_RTX (mode);
4581 #ifdef FLOAT_STORE_FLAG_VALUE
4583 REAL_VALUE_TYPE val;
4584 val = FLOAT_STORE_FLAG_VALUE (mode);
4585 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4587 #else
4588 return NULL_RTX;
4589 #endif
4591 if (VECTOR_MODE_P (mode))
4593 if (tem == const0_rtx)
4594 return CONST0_RTX (mode);
4595 #ifdef VECTOR_STORE_FLAG_VALUE
4597 int i, units;
4598 rtvec v;
4600 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4601 if (val == NULL_RTX)
4602 return NULL_RTX;
4603 if (val == const1_rtx)
4604 return CONST1_RTX (mode);
4606 units = GET_MODE_NUNITS (mode);
4607 v = rtvec_alloc (units);
4608 for (i = 0; i < units; i++)
4609 RTVEC_ELT (v, i) = val;
4610 return gen_rtx_raw_CONST_VECTOR (mode, v);
4612 #else
4613 return NULL_RTX;
4614 #endif
4617 return tem;
4620 /* For the following tests, ensure const0_rtx is op1. */
4621 if (swap_commutative_operands_p (op0, op1)
4622 || (op0 == const0_rtx && op1 != const0_rtx))
4623 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4625 /* If op0 is a compare, extract the comparison arguments from it. */
4626 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4627 return simplify_gen_relational (code, mode, VOIDmode,
4628 XEXP (op0, 0), XEXP (op0, 1));
4630 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4631 || CC0_P (op0))
4632 return NULL_RTX;
4634 trueop0 = avoid_constant_pool_reference (op0);
4635 trueop1 = avoid_constant_pool_reference (op1);
4636 return simplify_relational_operation_1 (code, mode, cmp_mode,
4637 trueop0, trueop1);
4640 /* This part of simplify_relational_operation is only used when CMP_MODE
4641 is not in class MODE_CC (i.e. it is a real comparison).
4643 MODE is the mode of the result, while CMP_MODE specifies in which
4644 mode the comparison is done in, so it is the mode of the operands. */
4646 static rtx
4647 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
4648 enum machine_mode cmp_mode, rtx op0, rtx op1)
4650 enum rtx_code op0code = GET_CODE (op0);
4652 if (op1 == const0_rtx && COMPARISON_P (op0))
4654 /* If op0 is a comparison, extract the comparison arguments
4655 from it. */
4656 if (code == NE)
4658 if (GET_MODE (op0) == mode)
4659 return simplify_rtx (op0);
4660 else
4661 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4662 XEXP (op0, 0), XEXP (op0, 1));
4664 else if (code == EQ)
4666 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4667 if (new_code != UNKNOWN)
4668 return simplify_gen_relational (new_code, mode, VOIDmode,
4669 XEXP (op0, 0), XEXP (op0, 1));
4673 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4674 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4675 if ((code == LTU || code == GEU)
4676 && GET_CODE (op0) == PLUS
4677 && CONST_INT_P (XEXP (op0, 1))
4678 && (rtx_equal_p (op1, XEXP (op0, 0))
4679 || rtx_equal_p (op1, XEXP (op0, 1)))
4680 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4681 && XEXP (op0, 1) != const0_rtx)
4683 rtx new_cmp
4684 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4685 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4686 cmp_mode, XEXP (op0, 0), new_cmp);
4689 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4690 if ((code == LTU || code == GEU)
4691 && GET_CODE (op0) == PLUS
4692 && rtx_equal_p (op1, XEXP (op0, 1))
4693 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4694 && !rtx_equal_p (op1, XEXP (op0, 0)))
4695 return simplify_gen_relational (code, mode, cmp_mode, op0,
4696 copy_rtx (XEXP (op0, 0)));
4698 if (op1 == const0_rtx)
4700 /* Canonicalize (GTU x 0) as (NE x 0). */
4701 if (code == GTU)
4702 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4703 /* Canonicalize (LEU x 0) as (EQ x 0). */
4704 if (code == LEU)
4705 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4707 else if (op1 == const1_rtx)
4709 switch (code)
4711 case GE:
4712 /* Canonicalize (GE x 1) as (GT x 0). */
4713 return simplify_gen_relational (GT, mode, cmp_mode,
4714 op0, const0_rtx);
4715 case GEU:
4716 /* Canonicalize (GEU x 1) as (NE x 0). */
4717 return simplify_gen_relational (NE, mode, cmp_mode,
4718 op0, const0_rtx);
4719 case LT:
4720 /* Canonicalize (LT x 1) as (LE x 0). */
4721 return simplify_gen_relational (LE, mode, cmp_mode,
4722 op0, const0_rtx);
4723 case LTU:
4724 /* Canonicalize (LTU x 1) as (EQ x 0). */
4725 return simplify_gen_relational (EQ, mode, cmp_mode,
4726 op0, const0_rtx);
4727 default:
4728 break;
4731 else if (op1 == constm1_rtx)
4733 /* Canonicalize (LE x -1) as (LT x 0). */
4734 if (code == LE)
4735 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4736 /* Canonicalize (GT x -1) as (GE x 0). */
4737 if (code == GT)
4738 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4741 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4742 if ((code == EQ || code == NE)
4743 && (op0code == PLUS || op0code == MINUS)
4744 && CONSTANT_P (op1)
4745 && CONSTANT_P (XEXP (op0, 1))
4746 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4748 rtx x = XEXP (op0, 0);
4749 rtx c = XEXP (op0, 1);
4750 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4751 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4753 /* Detect an infinite recursive condition, where we oscillate at this
4754 simplification case between:
4755 A + B == C <---> C - B == A,
4756 where A, B, and C are all constants with non-simplifiable expressions,
4757 usually SYMBOL_REFs. */
4758 if (GET_CODE (tem) == invcode
4759 && CONSTANT_P (x)
4760 && rtx_equal_p (c, XEXP (tem, 1)))
4761 return NULL_RTX;
4763 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4766 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4767 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4768 if (code == NE
4769 && op1 == const0_rtx
4770 && GET_MODE_CLASS (mode) == MODE_INT
4771 && cmp_mode != VOIDmode
4772 /* ??? Work-around BImode bugs in the ia64 backend. */
4773 && mode != BImode
4774 && cmp_mode != BImode
4775 && nonzero_bits (op0, cmp_mode) == 1
4776 && STORE_FLAG_VALUE == 1)
4777 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4778 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4779 : lowpart_subreg (mode, op0, cmp_mode);
4781 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4782 if ((code == EQ || code == NE)
4783 && op1 == const0_rtx
4784 && op0code == XOR)
4785 return simplify_gen_relational (code, mode, cmp_mode,
4786 XEXP (op0, 0), XEXP (op0, 1));
4788 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4789 if ((code == EQ || code == NE)
4790 && op0code == XOR
4791 && rtx_equal_p (XEXP (op0, 0), op1)
4792 && !side_effects_p (XEXP (op0, 0)))
4793 return simplify_gen_relational (code, mode, cmp_mode,
4794 XEXP (op0, 1), const0_rtx);
4796 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4797 if ((code == EQ || code == NE)
4798 && op0code == XOR
4799 && rtx_equal_p (XEXP (op0, 1), op1)
4800 && !side_effects_p (XEXP (op0, 1)))
4801 return simplify_gen_relational (code, mode, cmp_mode,
4802 XEXP (op0, 0), const0_rtx);
4804 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4805 if ((code == EQ || code == NE)
4806 && op0code == XOR
4807 && CONST_SCALAR_INT_P (op1)
4808 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4809 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4810 simplify_gen_binary (XOR, cmp_mode,
4811 XEXP (op0, 1), op1));
4813 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4814 if ((code == EQ || code == NE)
4815 && GET_CODE (op0) == BSWAP
4816 && CONST_SCALAR_INT_P (op1))
4817 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4818 simplify_gen_unary (BSWAP, cmp_mode,
4819 op1, cmp_mode));
4821 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4822 if ((code == EQ || code == NE)
4823 && GET_CODE (op0) == BSWAP
4824 && GET_CODE (op1) == BSWAP)
4825 return simplify_gen_relational (code, mode, cmp_mode,
4826 XEXP (op0, 0), XEXP (op1, 0));
4828 if (op0code == POPCOUNT && op1 == const0_rtx)
4829 switch (code)
4831 case EQ:
4832 case LE:
4833 case LEU:
4834 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4835 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4836 XEXP (op0, 0), const0_rtx);
4838 case NE:
4839 case GT:
4840 case GTU:
4841 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4842 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4843 XEXP (op0, 0), const0_rtx);
4845 default:
4846 break;
4849 return NULL_RTX;
4852 enum
4854 CMP_EQ = 1,
4855 CMP_LT = 2,
4856 CMP_GT = 4,
4857 CMP_LTU = 8,
4858 CMP_GTU = 16
4862 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4863 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4864 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4865 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4866 For floating-point comparisons, assume that the operands were ordered. */
4868 static rtx
4869 comparison_result (enum rtx_code code, int known_results)
4871 switch (code)
4873 case EQ:
4874 case UNEQ:
4875 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4876 case NE:
4877 case LTGT:
4878 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4880 case LT:
4881 case UNLT:
4882 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4883 case GE:
4884 case UNGE:
4885 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4887 case GT:
4888 case UNGT:
4889 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4890 case LE:
4891 case UNLE:
4892 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4894 case LTU:
4895 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4896 case GEU:
4897 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4899 case GTU:
4900 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4901 case LEU:
4902 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4904 case ORDERED:
4905 return const_true_rtx;
4906 case UNORDERED:
4907 return const0_rtx;
4908 default:
4909 gcc_unreachable ();
4913 /* Check if the given comparison (done in the given MODE) is actually a
4914 tautology or a contradiction.
4915 If no simplification is possible, this function returns zero.
4916 Otherwise, it returns either const_true_rtx or const0_rtx. */
4919 simplify_const_relational_operation (enum rtx_code code,
4920 enum machine_mode mode,
4921 rtx op0, rtx op1)
4923 rtx tem;
4924 rtx trueop0;
4925 rtx trueop1;
4927 gcc_assert (mode != VOIDmode
4928 || (GET_MODE (op0) == VOIDmode
4929 && GET_MODE (op1) == VOIDmode));
4931 /* If op0 is a compare, extract the comparison arguments from it. */
4932 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4934 op1 = XEXP (op0, 1);
4935 op0 = XEXP (op0, 0);
4937 if (GET_MODE (op0) != VOIDmode)
4938 mode = GET_MODE (op0);
4939 else if (GET_MODE (op1) != VOIDmode)
4940 mode = GET_MODE (op1);
4941 else
4942 return 0;
4945 /* We can't simplify MODE_CC values since we don't know what the
4946 actual comparison is. */
4947 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4948 return 0;
4950 /* Make sure the constant is second. */
4951 if (swap_commutative_operands_p (op0, op1))
4953 tem = op0, op0 = op1, op1 = tem;
4954 code = swap_condition (code);
4957 trueop0 = avoid_constant_pool_reference (op0);
4958 trueop1 = avoid_constant_pool_reference (op1);
4960 /* For integer comparisons of A and B maybe we can simplify A - B and can
4961 then simplify a comparison of that with zero. If A and B are both either
4962 a register or a CONST_INT, this can't help; testing for these cases will
4963 prevent infinite recursion here and speed things up.
4965 We can only do this for EQ and NE comparisons as otherwise we may
4966 lose or introduce overflow which we cannot disregard as undefined as
4967 we do not know the signedness of the operation on either the left or
4968 the right hand side of the comparison. */
4970 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4971 && (code == EQ || code == NE)
4972 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4973 && (REG_P (op1) || CONST_INT_P (trueop1)))
4974 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4975 /* We cannot do this if tem is a nonzero address. */
4976 && ! nonzero_address_p (tem))
4977 return simplify_const_relational_operation (signed_condition (code),
4978 mode, tem, const0_rtx);
4980 if (! HONOR_NANS (mode) && code == ORDERED)
4981 return const_true_rtx;
4983 if (! HONOR_NANS (mode) && code == UNORDERED)
4984 return const0_rtx;
4986 /* For modes without NaNs, if the two operands are equal, we know the
4987 result except if they have side-effects. Even with NaNs we know
4988 the result of unordered comparisons and, if signaling NaNs are
4989 irrelevant, also the result of LT/GT/LTGT. */
4990 if ((! HONOR_NANS (GET_MODE (trueop0))
4991 || code == UNEQ || code == UNLE || code == UNGE
4992 || ((code == LT || code == GT || code == LTGT)
4993 && ! HONOR_SNANS (GET_MODE (trueop0))))
4994 && rtx_equal_p (trueop0, trueop1)
4995 && ! side_effects_p (trueop0))
4996 return comparison_result (code, CMP_EQ);
4998 /* If the operands are floating-point constants, see if we can fold
4999 the result. */
5000 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
5001 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
5002 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
5004 REAL_VALUE_TYPE d0, d1;
5006 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
5007 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
5009 /* Comparisons are unordered iff at least one of the values is NaN. */
5010 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
5011 switch (code)
5013 case UNEQ:
5014 case UNLT:
5015 case UNGT:
5016 case UNLE:
5017 case UNGE:
5018 case NE:
5019 case UNORDERED:
5020 return const_true_rtx;
5021 case EQ:
5022 case LT:
5023 case GT:
5024 case LE:
5025 case GE:
5026 case LTGT:
5027 case ORDERED:
5028 return const0_rtx;
5029 default:
5030 return 0;
5033 return comparison_result (code,
5034 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
5035 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
5038 /* Otherwise, see if the operands are both integers. */
5039 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
5040 && (CONST_DOUBLE_AS_INT_P (trueop0) || CONST_INT_P (trueop0))
5041 && (CONST_DOUBLE_AS_INT_P (trueop1) || CONST_INT_P (trueop1)))
5043 int width = GET_MODE_PRECISION (mode);
5044 HOST_WIDE_INT l0s, h0s, l1s, h1s;
5045 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
5047 /* Get the two words comprising each integer constant. */
5048 if (CONST_DOUBLE_AS_INT_P (trueop0))
5050 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
5051 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
5053 else
5055 l0u = l0s = INTVAL (trueop0);
5056 h0u = h0s = HWI_SIGN_EXTEND (l0s);
5059 if (CONST_DOUBLE_AS_INT_P (trueop1))
5061 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
5062 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
5064 else
5066 l1u = l1s = INTVAL (trueop1);
5067 h1u = h1s = HWI_SIGN_EXTEND (l1s);
5070 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
5071 we have to sign or zero-extend the values. */
5072 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
5074 l0u &= GET_MODE_MASK (mode);
5075 l1u &= GET_MODE_MASK (mode);
5077 if (val_signbit_known_set_p (mode, l0s))
5078 l0s |= ~GET_MODE_MASK (mode);
5080 if (val_signbit_known_set_p (mode, l1s))
5081 l1s |= ~GET_MODE_MASK (mode);
5083 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
5084 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
5086 if (h0u == h1u && l0u == l1u)
5087 return comparison_result (code, CMP_EQ);
5088 else
5090 int cr;
5091 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
5092 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
5093 return comparison_result (code, cr);
5097 /* Optimize comparisons with upper and lower bounds. */
5098 if (HWI_COMPUTABLE_MODE_P (mode)
5099 && CONST_INT_P (trueop1))
5101 int sign;
5102 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
5103 HOST_WIDE_INT val = INTVAL (trueop1);
5104 HOST_WIDE_INT mmin, mmax;
5106 if (code == GEU
5107 || code == LEU
5108 || code == GTU
5109 || code == LTU)
5110 sign = 0;
5111 else
5112 sign = 1;
5114 /* Get a reduced range if the sign bit is zero. */
5115 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
5117 mmin = 0;
5118 mmax = nonzero;
5120 else
5122 rtx mmin_rtx, mmax_rtx;
5123 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
5125 mmin = INTVAL (mmin_rtx);
5126 mmax = INTVAL (mmax_rtx);
5127 if (sign)
5129 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
5131 mmin >>= (sign_copies - 1);
5132 mmax >>= (sign_copies - 1);
5136 switch (code)
5138 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5139 case GEU:
5140 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5141 return const_true_rtx;
5142 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5143 return const0_rtx;
5144 break;
5145 case GE:
5146 if (val <= mmin)
5147 return const_true_rtx;
5148 if (val > mmax)
5149 return const0_rtx;
5150 break;
5152 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5153 case LEU:
5154 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5155 return const_true_rtx;
5156 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5157 return const0_rtx;
5158 break;
5159 case LE:
5160 if (val >= mmax)
5161 return const_true_rtx;
5162 if (val < mmin)
5163 return const0_rtx;
5164 break;
5166 case EQ:
5167 /* x == y is always false for y out of range. */
5168 if (val < mmin || val > mmax)
5169 return const0_rtx;
5170 break;
5172 /* x > y is always false for y >= mmax, always true for y < mmin. */
5173 case GTU:
5174 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5175 return const0_rtx;
5176 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5177 return const_true_rtx;
5178 break;
5179 case GT:
5180 if (val >= mmax)
5181 return const0_rtx;
5182 if (val < mmin)
5183 return const_true_rtx;
5184 break;
5186 /* x < y is always false for y <= mmin, always true for y > mmax. */
5187 case LTU:
5188 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5189 return const0_rtx;
5190 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5191 return const_true_rtx;
5192 break;
5193 case LT:
5194 if (val <= mmin)
5195 return const0_rtx;
5196 if (val > mmax)
5197 return const_true_rtx;
5198 break;
5200 case NE:
5201 /* x != y is always true for y out of range. */
5202 if (val < mmin || val > mmax)
5203 return const_true_rtx;
5204 break;
5206 default:
5207 break;
5211 /* Optimize integer comparisons with zero. */
5212 if (trueop1 == const0_rtx)
5214 /* Some addresses are known to be nonzero. We don't know
5215 their sign, but equality comparisons are known. */
5216 if (nonzero_address_p (trueop0))
5218 if (code == EQ || code == LEU)
5219 return const0_rtx;
5220 if (code == NE || code == GTU)
5221 return const_true_rtx;
5224 /* See if the first operand is an IOR with a constant. If so, we
5225 may be able to determine the result of this comparison. */
5226 if (GET_CODE (op0) == IOR)
5228 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5229 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5231 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
5232 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5233 && (UINTVAL (inner_const)
5234 & ((unsigned HOST_WIDE_INT) 1
5235 << sign_bitnum)));
5237 switch (code)
5239 case EQ:
5240 case LEU:
5241 return const0_rtx;
5242 case NE:
5243 case GTU:
5244 return const_true_rtx;
5245 case LT:
5246 case LE:
5247 if (has_sign)
5248 return const_true_rtx;
5249 break;
5250 case GT:
5251 case GE:
5252 if (has_sign)
5253 return const0_rtx;
5254 break;
5255 default:
5256 break;
5262 /* Optimize comparison of ABS with zero. */
5263 if (trueop1 == CONST0_RTX (mode)
5264 && (GET_CODE (trueop0) == ABS
5265 || (GET_CODE (trueop0) == FLOAT_EXTEND
5266 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5268 switch (code)
5270 case LT:
5271 /* Optimize abs(x) < 0.0. */
5272 if (!HONOR_SNANS (mode)
5273 && (!INTEGRAL_MODE_P (mode)
5274 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5276 if (INTEGRAL_MODE_P (mode)
5277 && (issue_strict_overflow_warning
5278 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5279 warning (OPT_Wstrict_overflow,
5280 ("assuming signed overflow does not occur when "
5281 "assuming abs (x) < 0 is false"));
5282 return const0_rtx;
5284 break;
5286 case GE:
5287 /* Optimize abs(x) >= 0.0. */
5288 if (!HONOR_NANS (mode)
5289 && (!INTEGRAL_MODE_P (mode)
5290 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5292 if (INTEGRAL_MODE_P (mode)
5293 && (issue_strict_overflow_warning
5294 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5295 warning (OPT_Wstrict_overflow,
5296 ("assuming signed overflow does not occur when "
5297 "assuming abs (x) >= 0 is true"));
5298 return const_true_rtx;
5300 break;
5302 case UNGE:
5303 /* Optimize ! (abs(x) < 0.0). */
5304 return const_true_rtx;
5306 default:
5307 break;
5311 return 0;
5314 /* Simplify CODE, an operation with result mode MODE and three operands,
5315 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5316 a constant. Return 0 if no simplifications is possible. */
5319 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
5320 enum machine_mode op0_mode, rtx op0, rtx op1,
5321 rtx op2)
5323 unsigned int width = GET_MODE_PRECISION (mode);
5324 bool any_change = false;
5325 rtx tem, trueop2;
5327 /* VOIDmode means "infinite" precision. */
5328 if (width == 0)
5329 width = HOST_BITS_PER_WIDE_INT;
5331 switch (code)
5333 case FMA:
5334 /* Simplify negations around the multiplication. */
5335 /* -a * -b + c => a * b + c. */
5336 if (GET_CODE (op0) == NEG)
5338 tem = simplify_unary_operation (NEG, mode, op1, mode);
5339 if (tem)
5340 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5342 else if (GET_CODE (op1) == NEG)
5344 tem = simplify_unary_operation (NEG, mode, op0, mode);
5345 if (tem)
5346 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5349 /* Canonicalize the two multiplication operands. */
5350 /* a * -b + c => -b * a + c. */
5351 if (swap_commutative_operands_p (op0, op1))
5352 tem = op0, op0 = op1, op1 = tem, any_change = true;
5354 if (any_change)
5355 return gen_rtx_FMA (mode, op0, op1, op2);
5356 return NULL_RTX;
5358 case SIGN_EXTRACT:
5359 case ZERO_EXTRACT:
5360 if (CONST_INT_P (op0)
5361 && CONST_INT_P (op1)
5362 && CONST_INT_P (op2)
5363 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5364 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5366 /* Extracting a bit-field from a constant */
5367 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5368 HOST_WIDE_INT op1val = INTVAL (op1);
5369 HOST_WIDE_INT op2val = INTVAL (op2);
5370 if (BITS_BIG_ENDIAN)
5371 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5372 else
5373 val >>= op2val;
5375 if (HOST_BITS_PER_WIDE_INT != op1val)
5377 /* First zero-extend. */
5378 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
5379 /* If desired, propagate sign bit. */
5380 if (code == SIGN_EXTRACT
5381 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
5382 != 0)
5383 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5386 return gen_int_mode (val, mode);
5388 break;
5390 case IF_THEN_ELSE:
5391 if (CONST_INT_P (op0))
5392 return op0 != const0_rtx ? op1 : op2;
5394 /* Convert c ? a : a into "a". */
5395 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5396 return op1;
5398 /* Convert a != b ? a : b into "a". */
5399 if (GET_CODE (op0) == NE
5400 && ! side_effects_p (op0)
5401 && ! HONOR_NANS (mode)
5402 && ! HONOR_SIGNED_ZEROS (mode)
5403 && ((rtx_equal_p (XEXP (op0, 0), op1)
5404 && rtx_equal_p (XEXP (op0, 1), op2))
5405 || (rtx_equal_p (XEXP (op0, 0), op2)
5406 && rtx_equal_p (XEXP (op0, 1), op1))))
5407 return op1;
5409 /* Convert a == b ? a : b into "b". */
5410 if (GET_CODE (op0) == EQ
5411 && ! side_effects_p (op0)
5412 && ! HONOR_NANS (mode)
5413 && ! HONOR_SIGNED_ZEROS (mode)
5414 && ((rtx_equal_p (XEXP (op0, 0), op1)
5415 && rtx_equal_p (XEXP (op0, 1), op2))
5416 || (rtx_equal_p (XEXP (op0, 0), op2)
5417 && rtx_equal_p (XEXP (op0, 1), op1))))
5418 return op2;
5420 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5422 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5423 ? GET_MODE (XEXP (op0, 1))
5424 : GET_MODE (XEXP (op0, 0)));
5425 rtx temp;
5427 /* Look for happy constants in op1 and op2. */
5428 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5430 HOST_WIDE_INT t = INTVAL (op1);
5431 HOST_WIDE_INT f = INTVAL (op2);
5433 if (t == STORE_FLAG_VALUE && f == 0)
5434 code = GET_CODE (op0);
5435 else if (t == 0 && f == STORE_FLAG_VALUE)
5437 enum rtx_code tmp;
5438 tmp = reversed_comparison_code (op0, NULL_RTX);
5439 if (tmp == UNKNOWN)
5440 break;
5441 code = tmp;
5443 else
5444 break;
5446 return simplify_gen_relational (code, mode, cmp_mode,
5447 XEXP (op0, 0), XEXP (op0, 1));
5450 if (cmp_mode == VOIDmode)
5451 cmp_mode = op0_mode;
5452 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5453 cmp_mode, XEXP (op0, 0),
5454 XEXP (op0, 1));
5456 /* See if any simplifications were possible. */
5457 if (temp)
5459 if (CONST_INT_P (temp))
5460 return temp == const0_rtx ? op2 : op1;
5461 else if (temp)
5462 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5465 break;
5467 case VEC_MERGE:
5468 gcc_assert (GET_MODE (op0) == mode);
5469 gcc_assert (GET_MODE (op1) == mode);
5470 gcc_assert (VECTOR_MODE_P (mode));
5471 trueop2 = avoid_constant_pool_reference (op2);
5472 if (CONST_INT_P (trueop2))
5474 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5475 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5476 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5477 unsigned HOST_WIDE_INT mask;
5478 if (n_elts == HOST_BITS_PER_WIDE_INT)
5479 mask = -1;
5480 else
5481 mask = ((unsigned HOST_WIDE_INT) 1 << n_elts) - 1;
5483 if (!(sel & mask) && !side_effects_p (op0))
5484 return op1;
5485 if ((sel & mask) == mask && !side_effects_p (op1))
5486 return op0;
5488 rtx trueop0 = avoid_constant_pool_reference (op0);
5489 rtx trueop1 = avoid_constant_pool_reference (op1);
5490 if (GET_CODE (trueop0) == CONST_VECTOR
5491 && GET_CODE (trueop1) == CONST_VECTOR)
5493 rtvec v = rtvec_alloc (n_elts);
5494 unsigned int i;
5496 for (i = 0; i < n_elts; i++)
5497 RTVEC_ELT (v, i) = ((sel & ((unsigned HOST_WIDE_INT) 1 << i))
5498 ? CONST_VECTOR_ELT (trueop0, i)
5499 : CONST_VECTOR_ELT (trueop1, i));
5500 return gen_rtx_CONST_VECTOR (mode, v);
5503 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5504 if no element from a appears in the result. */
5505 if (GET_CODE (op0) == VEC_MERGE)
5507 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5508 if (CONST_INT_P (tem))
5510 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5511 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5512 return simplify_gen_ternary (code, mode, mode,
5513 XEXP (op0, 1), op1, op2);
5514 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5515 return simplify_gen_ternary (code, mode, mode,
5516 XEXP (op0, 0), op1, op2);
5519 if (GET_CODE (op1) == VEC_MERGE)
5521 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5522 if (CONST_INT_P (tem))
5524 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5525 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5526 return simplify_gen_ternary (code, mode, mode,
5527 op0, XEXP (op1, 1), op2);
5528 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5529 return simplify_gen_ternary (code, mode, mode,
5530 op0, XEXP (op1, 0), op2);
5535 if (rtx_equal_p (op0, op1)
5536 && !side_effects_p (op2) && !side_effects_p (op1))
5537 return op0;
5539 break;
5541 default:
5542 gcc_unreachable ();
5545 return 0;
5548 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
5549 or CONST_VECTOR,
5550 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5552 Works by unpacking OP into a collection of 8-bit values
5553 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5554 and then repacking them again for OUTERMODE. */
5556 static rtx
5557 simplify_immed_subreg (enum machine_mode outermode, rtx op,
5558 enum machine_mode innermode, unsigned int byte)
5560 /* We support up to 512-bit values (for V8DFmode). */
5561 enum {
5562 max_bitsize = 512,
5563 value_bit = 8,
5564 value_mask = (1 << value_bit) - 1
5566 unsigned char value[max_bitsize / value_bit];
5567 int value_start;
5568 int i;
5569 int elem;
5571 int num_elem;
5572 rtx * elems;
5573 int elem_bitsize;
5574 rtx result_s;
5575 rtvec result_v = NULL;
5576 enum mode_class outer_class;
5577 enum machine_mode outer_submode;
5579 /* Some ports misuse CCmode. */
5580 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5581 return op;
5583 /* We have no way to represent a complex constant at the rtl level. */
5584 if (COMPLEX_MODE_P (outermode))
5585 return NULL_RTX;
5587 /* Unpack the value. */
5589 if (GET_CODE (op) == CONST_VECTOR)
5591 num_elem = CONST_VECTOR_NUNITS (op);
5592 elems = &CONST_VECTOR_ELT (op, 0);
5593 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5595 else
5597 num_elem = 1;
5598 elems = &op;
5599 elem_bitsize = max_bitsize;
5601 /* If this asserts, it is too complicated; reducing value_bit may help. */
5602 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5603 /* I don't know how to handle endianness of sub-units. */
5604 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5606 for (elem = 0; elem < num_elem; elem++)
5608 unsigned char * vp;
5609 rtx el = elems[elem];
5611 /* Vectors are kept in target memory order. (This is probably
5612 a mistake.) */
5614 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5615 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5616 / BITS_PER_UNIT);
5617 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5618 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5619 unsigned bytele = (subword_byte % UNITS_PER_WORD
5620 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5621 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5624 switch (GET_CODE (el))
5626 case CONST_INT:
5627 for (i = 0;
5628 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5629 i += value_bit)
5630 *vp++ = INTVAL (el) >> i;
5631 /* CONST_INTs are always logically sign-extended. */
5632 for (; i < elem_bitsize; i += value_bit)
5633 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5634 break;
5636 case CONST_DOUBLE:
5637 if (GET_MODE (el) == VOIDmode)
5639 unsigned char extend = 0;
5640 /* If this triggers, someone should have generated a
5641 CONST_INT instead. */
5642 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5644 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5645 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5646 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5648 *vp++
5649 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5650 i += value_bit;
5653 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5654 extend = -1;
5655 for (; i < elem_bitsize; i += value_bit)
5656 *vp++ = extend;
5658 else
5660 long tmp[max_bitsize / 32];
5661 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5663 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5664 gcc_assert (bitsize <= elem_bitsize);
5665 gcc_assert (bitsize % value_bit == 0);
5667 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5668 GET_MODE (el));
5670 /* real_to_target produces its result in words affected by
5671 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5672 and use WORDS_BIG_ENDIAN instead; see the documentation
5673 of SUBREG in rtl.texi. */
5674 for (i = 0; i < bitsize; i += value_bit)
5676 int ibase;
5677 if (WORDS_BIG_ENDIAN)
5678 ibase = bitsize - 1 - i;
5679 else
5680 ibase = i;
5681 *vp++ = tmp[ibase / 32] >> i % 32;
5684 /* It shouldn't matter what's done here, so fill it with
5685 zero. */
5686 for (; i < elem_bitsize; i += value_bit)
5687 *vp++ = 0;
5689 break;
5691 case CONST_FIXED:
5692 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5694 for (i = 0; i < elem_bitsize; i += value_bit)
5695 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5697 else
5699 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5700 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5701 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5702 i += value_bit)
5703 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5704 >> (i - HOST_BITS_PER_WIDE_INT);
5705 for (; i < elem_bitsize; i += value_bit)
5706 *vp++ = 0;
5708 break;
5710 default:
5711 gcc_unreachable ();
5715 /* Now, pick the right byte to start with. */
5716 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5717 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5718 will already have offset 0. */
5719 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5721 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5722 - byte);
5723 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5724 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5725 byte = (subword_byte % UNITS_PER_WORD
5726 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5729 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5730 so if it's become negative it will instead be very large.) */
5731 gcc_assert (byte < GET_MODE_SIZE (innermode));
5733 /* Convert from bytes to chunks of size value_bit. */
5734 value_start = byte * (BITS_PER_UNIT / value_bit);
5736 /* Re-pack the value. */
5738 if (VECTOR_MODE_P (outermode))
5740 num_elem = GET_MODE_NUNITS (outermode);
5741 result_v = rtvec_alloc (num_elem);
5742 elems = &RTVEC_ELT (result_v, 0);
5743 outer_submode = GET_MODE_INNER (outermode);
5745 else
5747 num_elem = 1;
5748 elems = &result_s;
5749 outer_submode = outermode;
5752 outer_class = GET_MODE_CLASS (outer_submode);
5753 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5755 gcc_assert (elem_bitsize % value_bit == 0);
5756 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5758 for (elem = 0; elem < num_elem; elem++)
5760 unsigned char *vp;
5762 /* Vectors are stored in target memory order. (This is probably
5763 a mistake.) */
5765 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5766 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5767 / BITS_PER_UNIT);
5768 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5769 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5770 unsigned bytele = (subword_byte % UNITS_PER_WORD
5771 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5772 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5775 switch (outer_class)
5777 case MODE_INT:
5778 case MODE_PARTIAL_INT:
5780 unsigned HOST_WIDE_INT hi = 0, lo = 0;
5782 for (i = 0;
5783 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5784 i += value_bit)
5785 lo |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5786 for (; i < elem_bitsize; i += value_bit)
5787 hi |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5788 << (i - HOST_BITS_PER_WIDE_INT);
5790 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5791 know why. */
5792 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5793 elems[elem] = gen_int_mode (lo, outer_submode);
5794 else if (elem_bitsize <= HOST_BITS_PER_DOUBLE_INT)
5795 elems[elem] = immed_double_const (lo, hi, outer_submode);
5796 else
5797 return NULL_RTX;
5799 break;
5801 case MODE_FLOAT:
5802 case MODE_DECIMAL_FLOAT:
5804 REAL_VALUE_TYPE r;
5805 long tmp[max_bitsize / 32];
5807 /* real_from_target wants its input in words affected by
5808 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5809 and use WORDS_BIG_ENDIAN instead; see the documentation
5810 of SUBREG in rtl.texi. */
5811 for (i = 0; i < max_bitsize / 32; i++)
5812 tmp[i] = 0;
5813 for (i = 0; i < elem_bitsize; i += value_bit)
5815 int ibase;
5816 if (WORDS_BIG_ENDIAN)
5817 ibase = elem_bitsize - 1 - i;
5818 else
5819 ibase = i;
5820 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5823 real_from_target (&r, tmp, outer_submode);
5824 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5826 break;
5828 case MODE_FRACT:
5829 case MODE_UFRACT:
5830 case MODE_ACCUM:
5831 case MODE_UACCUM:
5833 FIXED_VALUE_TYPE f;
5834 f.data.low = 0;
5835 f.data.high = 0;
5836 f.mode = outer_submode;
5838 for (i = 0;
5839 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5840 i += value_bit)
5841 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5842 for (; i < elem_bitsize; i += value_bit)
5843 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5844 << (i - HOST_BITS_PER_WIDE_INT));
5846 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5848 break;
5850 default:
5851 gcc_unreachable ();
5854 if (VECTOR_MODE_P (outermode))
5855 return gen_rtx_CONST_VECTOR (outermode, result_v);
5856 else
5857 return result_s;
5860 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5861 Return 0 if no simplifications are possible. */
5863 simplify_subreg (enum machine_mode outermode, rtx op,
5864 enum machine_mode innermode, unsigned int byte)
5866 /* Little bit of sanity checking. */
5867 gcc_assert (innermode != VOIDmode);
5868 gcc_assert (outermode != VOIDmode);
5869 gcc_assert (innermode != BLKmode);
5870 gcc_assert (outermode != BLKmode);
5872 gcc_assert (GET_MODE (op) == innermode
5873 || GET_MODE (op) == VOIDmode);
5875 if ((byte % GET_MODE_SIZE (outermode)) != 0)
5876 return NULL_RTX;
5878 if (byte >= GET_MODE_SIZE (innermode))
5879 return NULL_RTX;
5881 if (outermode == innermode && !byte)
5882 return op;
5884 if (CONST_SCALAR_INT_P (op)
5885 || CONST_DOUBLE_AS_FLOAT_P (op)
5886 || GET_CODE (op) == CONST_FIXED
5887 || GET_CODE (op) == CONST_VECTOR)
5888 return simplify_immed_subreg (outermode, op, innermode, byte);
5890 /* Changing mode twice with SUBREG => just change it once,
5891 or not at all if changing back op starting mode. */
5892 if (GET_CODE (op) == SUBREG)
5894 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5895 int final_offset = byte + SUBREG_BYTE (op);
5896 rtx newx;
5898 if (outermode == innermostmode
5899 && byte == 0 && SUBREG_BYTE (op) == 0)
5900 return SUBREG_REG (op);
5902 /* The SUBREG_BYTE represents offset, as if the value were stored
5903 in memory. Irritating exception is paradoxical subreg, where
5904 we define SUBREG_BYTE to be 0. On big endian machines, this
5905 value should be negative. For a moment, undo this exception. */
5906 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5908 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5909 if (WORDS_BIG_ENDIAN)
5910 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5911 if (BYTES_BIG_ENDIAN)
5912 final_offset += difference % UNITS_PER_WORD;
5914 if (SUBREG_BYTE (op) == 0
5915 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5917 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5918 if (WORDS_BIG_ENDIAN)
5919 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5920 if (BYTES_BIG_ENDIAN)
5921 final_offset += difference % UNITS_PER_WORD;
5924 /* See whether resulting subreg will be paradoxical. */
5925 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5927 /* In nonparadoxical subregs we can't handle negative offsets. */
5928 if (final_offset < 0)
5929 return NULL_RTX;
5930 /* Bail out in case resulting subreg would be incorrect. */
5931 if (final_offset % GET_MODE_SIZE (outermode)
5932 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5933 return NULL_RTX;
5935 else
5937 int offset = 0;
5938 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5940 /* In paradoxical subreg, see if we are still looking on lower part.
5941 If so, our SUBREG_BYTE will be 0. */
5942 if (WORDS_BIG_ENDIAN)
5943 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5944 if (BYTES_BIG_ENDIAN)
5945 offset += difference % UNITS_PER_WORD;
5946 if (offset == final_offset)
5947 final_offset = 0;
5948 else
5949 return NULL_RTX;
5952 /* Recurse for further possible simplifications. */
5953 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5954 final_offset);
5955 if (newx)
5956 return newx;
5957 if (validate_subreg (outermode, innermostmode,
5958 SUBREG_REG (op), final_offset))
5960 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5961 if (SUBREG_PROMOTED_VAR_P (op)
5962 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5963 && GET_MODE_CLASS (outermode) == MODE_INT
5964 && IN_RANGE (GET_MODE_SIZE (outermode),
5965 GET_MODE_SIZE (innermode),
5966 GET_MODE_SIZE (innermostmode))
5967 && subreg_lowpart_p (newx))
5969 SUBREG_PROMOTED_VAR_P (newx) = 1;
5970 SUBREG_PROMOTED_UNSIGNED_SET
5971 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5973 return newx;
5975 return NULL_RTX;
5978 /* SUBREG of a hard register => just change the register number
5979 and/or mode. If the hard register is not valid in that mode,
5980 suppress this simplification. If the hard register is the stack,
5981 frame, or argument pointer, leave this as a SUBREG. */
5983 if (REG_P (op) && HARD_REGISTER_P (op))
5985 unsigned int regno, final_regno;
5987 regno = REGNO (op);
5988 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5989 if (HARD_REGISTER_NUM_P (final_regno))
5991 rtx x;
5992 int final_offset = byte;
5994 /* Adjust offset for paradoxical subregs. */
5995 if (byte == 0
5996 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5998 int difference = (GET_MODE_SIZE (innermode)
5999 - GET_MODE_SIZE (outermode));
6000 if (WORDS_BIG_ENDIAN)
6001 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6002 if (BYTES_BIG_ENDIAN)
6003 final_offset += difference % UNITS_PER_WORD;
6006 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
6008 /* Propagate original regno. We don't have any way to specify
6009 the offset inside original regno, so do so only for lowpart.
6010 The information is used only by alias analysis that can not
6011 grog partial register anyway. */
6013 if (subreg_lowpart_offset (outermode, innermode) == byte)
6014 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
6015 return x;
6019 /* If we have a SUBREG of a register that we are replacing and we are
6020 replacing it with a MEM, make a new MEM and try replacing the
6021 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6022 or if we would be widening it. */
6024 if (MEM_P (op)
6025 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
6026 /* Allow splitting of volatile memory references in case we don't
6027 have instruction to move the whole thing. */
6028 && (! MEM_VOLATILE_P (op)
6029 || ! have_insn_for (SET, innermode))
6030 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
6031 return adjust_address_nv (op, outermode, byte);
6033 /* Handle complex values represented as CONCAT
6034 of real and imaginary part. */
6035 if (GET_CODE (op) == CONCAT)
6037 unsigned int part_size, final_offset;
6038 rtx part, res;
6040 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
6041 if (byte < part_size)
6043 part = XEXP (op, 0);
6044 final_offset = byte;
6046 else
6048 part = XEXP (op, 1);
6049 final_offset = byte - part_size;
6052 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
6053 return NULL_RTX;
6055 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
6056 if (res)
6057 return res;
6058 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
6059 return gen_rtx_SUBREG (outermode, part, final_offset);
6060 return NULL_RTX;
6063 /* A SUBREG resulting from a zero extension may fold to zero if
6064 it extracts higher bits that the ZERO_EXTEND's source bits. */
6065 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
6067 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
6068 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
6069 return CONST0_RTX (outermode);
6072 if (SCALAR_INT_MODE_P (outermode)
6073 && SCALAR_INT_MODE_P (innermode)
6074 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
6075 && byte == subreg_lowpart_offset (outermode, innermode))
6077 rtx tem = simplify_truncation (outermode, op, innermode);
6078 if (tem)
6079 return tem;
6082 return NULL_RTX;
6085 /* Make a SUBREG operation or equivalent if it folds. */
6088 simplify_gen_subreg (enum machine_mode outermode, rtx op,
6089 enum machine_mode innermode, unsigned int byte)
6091 rtx newx;
6093 newx = simplify_subreg (outermode, op, innermode, byte);
6094 if (newx)
6095 return newx;
6097 if (GET_CODE (op) == SUBREG
6098 || GET_CODE (op) == CONCAT
6099 || GET_MODE (op) == VOIDmode)
6100 return NULL_RTX;
6102 if (validate_subreg (outermode, innermode, op, byte))
6103 return gen_rtx_SUBREG (outermode, op, byte);
6105 return NULL_RTX;
6108 /* Simplify X, an rtx expression.
6110 Return the simplified expression or NULL if no simplifications
6111 were possible.
6113 This is the preferred entry point into the simplification routines;
6114 however, we still allow passes to call the more specific routines.
6116 Right now GCC has three (yes, three) major bodies of RTL simplification
6117 code that need to be unified.
6119 1. fold_rtx in cse.c. This code uses various CSE specific
6120 information to aid in RTL simplification.
6122 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6123 it uses combine specific information to aid in RTL
6124 simplification.
6126 3. The routines in this file.
6129 Long term we want to only have one body of simplification code; to
6130 get to that state I recommend the following steps:
6132 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6133 which are not pass dependent state into these routines.
6135 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6136 use this routine whenever possible.
6138 3. Allow for pass dependent state to be provided to these
6139 routines and add simplifications based on the pass dependent
6140 state. Remove code from cse.c & combine.c that becomes
6141 redundant/dead.
6143 It will take time, but ultimately the compiler will be easier to
6144 maintain and improve. It's totally silly that when we add a
6145 simplification that it needs to be added to 4 places (3 for RTL
6146 simplification and 1 for tree simplification. */
6149 simplify_rtx (const_rtx x)
6151 const enum rtx_code code = GET_CODE (x);
6152 const enum machine_mode mode = GET_MODE (x);
6154 switch (GET_RTX_CLASS (code))
6156 case RTX_UNARY:
6157 return simplify_unary_operation (code, mode,
6158 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6159 case RTX_COMM_ARITH:
6160 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6161 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6163 /* Fall through.... */
6165 case RTX_BIN_ARITH:
6166 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6168 case RTX_TERNARY:
6169 case RTX_BITFIELD_OPS:
6170 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6171 XEXP (x, 0), XEXP (x, 1),
6172 XEXP (x, 2));
6174 case RTX_COMPARE:
6175 case RTX_COMM_COMPARE:
6176 return simplify_relational_operation (code, mode,
6177 ((GET_MODE (XEXP (x, 0))
6178 != VOIDmode)
6179 ? GET_MODE (XEXP (x, 0))
6180 : GET_MODE (XEXP (x, 1))),
6181 XEXP (x, 0),
6182 XEXP (x, 1));
6184 case RTX_EXTRA:
6185 if (code == SUBREG)
6186 return simplify_subreg (mode, SUBREG_REG (x),
6187 GET_MODE (SUBREG_REG (x)),
6188 SUBREG_BYTE (x));
6189 break;
6191 case RTX_OBJ:
6192 if (code == LO_SUM)
6194 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6195 if (GET_CODE (XEXP (x, 0)) == HIGH
6196 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6197 return XEXP (x, 1);
6199 break;
6201 default:
6202 break;
6204 return NULL;