* gcc-plugin.h (enum plugin_event): Add PLUGIN_ALL_IPA_PASSES_START,
[official-gcc.git] / gcc / simplify-rtx.c
blobee119bcd65c2e39f3882f0ac4bac5ba0129cb9c4
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, const_rtx);
53 static bool plus_minus_operand_p (const_rtx);
54 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
57 unsigned int);
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
59 rtx, rtx);
60 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
61 enum machine_mode, rtx, rtx);
62 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
63 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
64 rtx, rtx, rtx, rtx);
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
68 static rtx
69 neg_const_int (enum machine_mode mode, const_rtx i)
71 return gen_int_mode (- INTVAL (i), mode);
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
77 bool
78 mode_signbit_p (enum machine_mode mode, const_rtx x)
80 unsigned HOST_WIDE_INT val;
81 unsigned int width;
83 if (GET_MODE_CLASS (mode) != MODE_INT)
84 return false;
86 width = GET_MODE_BITSIZE (mode);
87 if (width == 0)
88 return false;
90 if (width <= HOST_BITS_PER_WIDE_INT
91 && CONST_INT_P (x))
92 val = INTVAL (x);
93 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x) == 0)
97 val = CONST_DOUBLE_HIGH (x);
98 width -= HOST_BITS_PER_WIDE_INT;
100 else
101 return false;
103 if (width < HOST_BITS_PER_WIDE_INT)
104 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
105 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
112 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
113 rtx op1)
115 rtx tem;
117 /* If this simplifies, do it. */
118 tem = simplify_binary_operation (code, mode, op0, op1);
119 if (tem)
120 return tem;
122 /* Put complex operands first and constants second if commutative. */
123 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
124 && swap_commutative_operands_p (op0, op1))
125 tem = op0, op0 = op1, op1 = tem;
127 return gen_rtx_fmt_ee (code, mode, op0, op1);
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
133 avoid_constant_pool_reference (rtx x)
135 rtx c, tmp, addr;
136 enum machine_mode cmode;
137 HOST_WIDE_INT offset = 0;
139 switch (GET_CODE (x))
141 case MEM:
142 break;
144 case FLOAT_EXTEND:
145 /* Handle float extensions of constant pool references. */
146 tmp = XEXP (x, 0);
147 c = avoid_constant_pool_reference (tmp);
148 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
150 REAL_VALUE_TYPE d;
152 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
155 return x;
157 default:
158 return x;
161 if (GET_MODE (x) == BLKmode)
162 return x;
164 addr = XEXP (x, 0);
166 /* Call target hook to avoid the effects of -fpic etc.... */
167 addr = targetm.delegitimize_address (addr);
169 /* Split the address into a base and integer offset. */
170 if (GET_CODE (addr) == CONST
171 && GET_CODE (XEXP (addr, 0)) == PLUS
172 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
174 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
175 addr = XEXP (XEXP (addr, 0), 0);
178 if (GET_CODE (addr) == LO_SUM)
179 addr = XEXP (addr, 1);
181 /* If this is a constant pool reference, we can turn it into its
182 constant and hope that simplifications happen. */
183 if (GET_CODE (addr) == SYMBOL_REF
184 && CONSTANT_POOL_ADDRESS_P (addr))
186 c = get_pool_constant (addr);
187 cmode = get_pool_mode (addr);
189 /* If we're accessing the constant in a different mode than it was
190 originally stored, attempt to fix that up via subreg simplifications.
191 If that fails we have no choice but to return the original memory. */
192 if (offset != 0 || cmode != GET_MODE (x))
194 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
195 if (tem && CONSTANT_P (tem))
196 return tem;
198 else
199 return c;
202 return x;
205 /* Simplify a MEM based on its attributes. This is the default
206 delegitimize_address target hook, and it's recommended that every
207 overrider call it. */
210 delegitimize_mem_from_attrs (rtx x)
212 if (MEM_P (x)
213 && MEM_EXPR (x)
214 && (!MEM_OFFSET (x)
215 || GET_CODE (MEM_OFFSET (x)) == CONST_INT))
217 tree decl = MEM_EXPR (x);
218 enum machine_mode mode = GET_MODE (x);
219 HOST_WIDE_INT offset = 0;
221 switch (TREE_CODE (decl))
223 default:
224 decl = NULL;
225 break;
227 case VAR_DECL:
228 break;
230 case ARRAY_REF:
231 case ARRAY_RANGE_REF:
232 case COMPONENT_REF:
233 case BIT_FIELD_REF:
234 case REALPART_EXPR:
235 case IMAGPART_EXPR:
236 case VIEW_CONVERT_EXPR:
238 HOST_WIDE_INT bitsize, bitpos;
239 tree toffset;
240 int unsignedp = 0, volatilep = 0;
242 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
243 &mode, &unsignedp, &volatilep, false);
244 if (bitsize != GET_MODE_BITSIZE (mode)
245 || (bitpos % BITS_PER_UNIT)
246 || (toffset && !host_integerp (toffset, 0)))
247 decl = NULL;
248 else
250 offset += bitpos / BITS_PER_UNIT;
251 if (toffset)
252 offset += TREE_INT_CST_LOW (toffset);
254 break;
258 if (decl
259 && mode == GET_MODE (x)
260 && TREE_CODE (decl) == VAR_DECL
261 && (TREE_STATIC (decl)
262 || DECL_THREAD_LOCAL_P (decl))
263 && DECL_RTL_SET_P (decl)
264 && MEM_P (DECL_RTL (decl)))
266 rtx newx;
268 if (MEM_OFFSET (x))
269 offset += INTVAL (MEM_OFFSET (x));
271 newx = DECL_RTL (decl);
273 if (MEM_P (newx))
275 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
277 /* Avoid creating a new MEM needlessly if we already had
278 the same address. We do if there's no OFFSET and the
279 old address X is identical to NEWX, or if X is of the
280 form (plus NEWX OFFSET), or the NEWX is of the form
281 (plus Y (const_int Z)) and X is that with the offset
282 added: (plus Y (const_int Z+OFFSET)). */
283 if (!((offset == 0
284 || (GET_CODE (o) == PLUS
285 && GET_CODE (XEXP (o, 1)) == CONST_INT
286 && (offset == INTVAL (XEXP (o, 1))
287 || (GET_CODE (n) == PLUS
288 && GET_CODE (XEXP (n, 1)) == CONST_INT
289 && (INTVAL (XEXP (n, 1)) + offset
290 == INTVAL (XEXP (o, 1)))
291 && (n = XEXP (n, 0))))
292 && (o = XEXP (o, 0))))
293 && rtx_equal_p (o, n)))
294 x = adjust_address_nv (newx, mode, offset);
296 else if (GET_MODE (x) == GET_MODE (newx)
297 && offset == 0)
298 x = newx;
302 return x;
305 /* Make a unary operation by first seeing if it folds and otherwise making
306 the specified operation. */
309 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
310 enum machine_mode op_mode)
312 rtx tem;
314 /* If this simplifies, use it. */
315 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
316 return tem;
318 return gen_rtx_fmt_e (code, mode, op);
321 /* Likewise for ternary operations. */
324 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
325 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
327 rtx tem;
329 /* If this simplifies, use it. */
330 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
331 op0, op1, op2)))
332 return tem;
334 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
337 /* Likewise, for relational operations.
338 CMP_MODE specifies mode comparison is done in. */
341 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
342 enum machine_mode cmp_mode, rtx op0, rtx op1)
344 rtx tem;
346 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
347 op0, op1)))
348 return tem;
350 return gen_rtx_fmt_ee (code, mode, op0, op1);
353 /* Replace all occurrences of OLD_RTX in X with FN (X', DATA), where X'
354 is an expression in X that is equal to OLD_RTX. Canonicalize and
355 simplify the result.
357 If FN is null, assume FN (X', DATA) == copy_rtx (DATA). */
360 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
361 rtx (*fn) (rtx, void *), void *data)
363 enum rtx_code code = GET_CODE (x);
364 enum machine_mode mode = GET_MODE (x);
365 enum machine_mode op_mode;
366 const char *fmt;
367 rtx op0, op1, op2, newx, op;
368 rtvec vec, newvec;
369 int i, j;
371 /* If X is OLD_RTX, return FN (X, DATA), with a null FN. Otherwise,
372 if this is an expression, try to build a new expression, substituting
373 recursively. If we can't do anything, return our input. */
375 if (rtx_equal_p (x, old_rtx))
377 if (fn)
378 return fn (x, data);
379 else
380 return copy_rtx ((rtx) data);
383 switch (GET_RTX_CLASS (code))
385 case RTX_UNARY:
386 op0 = XEXP (x, 0);
387 op_mode = GET_MODE (op0);
388 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
389 if (op0 == XEXP (x, 0))
390 return x;
391 return simplify_gen_unary (code, mode, op0, op_mode);
393 case RTX_BIN_ARITH:
394 case RTX_COMM_ARITH:
395 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
396 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
397 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
398 return x;
399 return simplify_gen_binary (code, mode, op0, op1);
401 case RTX_COMPARE:
402 case RTX_COMM_COMPARE:
403 op0 = XEXP (x, 0);
404 op1 = XEXP (x, 1);
405 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
406 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
407 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
408 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
409 return x;
410 return simplify_gen_relational (code, mode, op_mode, op0, op1);
412 case RTX_TERNARY:
413 case RTX_BITFIELD_OPS:
414 op0 = XEXP (x, 0);
415 op_mode = GET_MODE (op0);
416 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
417 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
418 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
419 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
420 return x;
421 if (op_mode == VOIDmode)
422 op_mode = GET_MODE (op0);
423 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
425 case RTX_EXTRA:
426 if (code == SUBREG)
428 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
429 if (op0 == SUBREG_REG (x))
430 return x;
431 op0 = simplify_gen_subreg (GET_MODE (x), op0,
432 GET_MODE (SUBREG_REG (x)),
433 SUBREG_BYTE (x));
434 return op0 ? op0 : x;
436 break;
438 case RTX_OBJ:
439 if (code == MEM)
441 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
442 if (op0 == XEXP (x, 0))
443 return x;
444 return replace_equiv_address_nv (x, op0);
446 else if (code == LO_SUM)
448 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
449 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
451 /* (lo_sum (high x) x) -> x */
452 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
453 return op1;
455 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
456 return x;
457 return gen_rtx_LO_SUM (mode, op0, op1);
459 break;
461 default:
462 break;
465 newx = x;
466 fmt = GET_RTX_FORMAT (code);
467 for (i = 0; fmt[i]; i++)
468 switch (fmt[i])
470 case 'E':
471 vec = XVEC (x, i);
472 newvec = XVEC (newx, i);
473 for (j = 0; j < GET_NUM_ELEM (vec); j++)
475 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
476 old_rtx, fn, data);
477 if (op != RTVEC_ELT (vec, j))
479 if (newvec == vec)
481 newvec = shallow_copy_rtvec (vec);
482 if (x == newx)
483 newx = shallow_copy_rtx (x);
484 XVEC (newx, i) = newvec;
486 RTVEC_ELT (newvec, j) = op;
489 break;
491 case 'e':
492 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
493 if (op != XEXP (x, i))
495 if (x == newx)
496 newx = shallow_copy_rtx (x);
497 XEXP (newx, i) = op;
499 break;
501 return newx;
504 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
505 resulting RTX. Return a new RTX which is as simplified as possible. */
508 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
510 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
513 /* Try to simplify a unary operation CODE whose output mode is to be
514 MODE with input operand OP whose mode was originally OP_MODE.
515 Return zero if no simplification can be made. */
517 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
518 rtx op, enum machine_mode op_mode)
520 rtx trueop, tem;
522 trueop = avoid_constant_pool_reference (op);
524 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
525 if (tem)
526 return tem;
528 return simplify_unary_operation_1 (code, mode, op);
531 /* Perform some simplifications we can do even if the operands
532 aren't constant. */
533 static rtx
534 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
536 enum rtx_code reversed;
537 rtx temp;
539 switch (code)
541 case NOT:
542 /* (not (not X)) == X. */
543 if (GET_CODE (op) == NOT)
544 return XEXP (op, 0);
546 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
547 comparison is all ones. */
548 if (COMPARISON_P (op)
549 && (mode == BImode || STORE_FLAG_VALUE == -1)
550 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
551 return simplify_gen_relational (reversed, mode, VOIDmode,
552 XEXP (op, 0), XEXP (op, 1));
554 /* (not (plus X -1)) can become (neg X). */
555 if (GET_CODE (op) == PLUS
556 && XEXP (op, 1) == constm1_rtx)
557 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
559 /* Similarly, (not (neg X)) is (plus X -1). */
560 if (GET_CODE (op) == NEG)
561 return plus_constant (XEXP (op, 0), -1);
563 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
564 if (GET_CODE (op) == XOR
565 && CONST_INT_P (XEXP (op, 1))
566 && (temp = simplify_unary_operation (NOT, mode,
567 XEXP (op, 1), mode)) != 0)
568 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
570 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
571 if (GET_CODE (op) == PLUS
572 && CONST_INT_P (XEXP (op, 1))
573 && mode_signbit_p (mode, XEXP (op, 1))
574 && (temp = simplify_unary_operation (NOT, mode,
575 XEXP (op, 1), mode)) != 0)
576 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
579 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
580 operands other than 1, but that is not valid. We could do a
581 similar simplification for (not (lshiftrt C X)) where C is
582 just the sign bit, but this doesn't seem common enough to
583 bother with. */
584 if (GET_CODE (op) == ASHIFT
585 && XEXP (op, 0) == const1_rtx)
587 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
588 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
591 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
592 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
593 so we can perform the above simplification. */
595 if (STORE_FLAG_VALUE == -1
596 && GET_CODE (op) == ASHIFTRT
597 && GET_CODE (XEXP (op, 1))
598 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
599 return simplify_gen_relational (GE, mode, VOIDmode,
600 XEXP (op, 0), const0_rtx);
603 if (GET_CODE (op) == SUBREG
604 && subreg_lowpart_p (op)
605 && (GET_MODE_SIZE (GET_MODE (op))
606 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
607 && GET_CODE (SUBREG_REG (op)) == ASHIFT
608 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
610 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
611 rtx x;
613 x = gen_rtx_ROTATE (inner_mode,
614 simplify_gen_unary (NOT, inner_mode, const1_rtx,
615 inner_mode),
616 XEXP (SUBREG_REG (op), 1));
617 return rtl_hooks.gen_lowpart_no_emit (mode, x);
620 /* Apply De Morgan's laws to reduce number of patterns for machines
621 with negating logical insns (and-not, nand, etc.). If result has
622 only one NOT, put it first, since that is how the patterns are
623 coded. */
625 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
627 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
628 enum machine_mode op_mode;
630 op_mode = GET_MODE (in1);
631 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
633 op_mode = GET_MODE (in2);
634 if (op_mode == VOIDmode)
635 op_mode = mode;
636 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
638 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
640 rtx tem = in2;
641 in2 = in1; in1 = tem;
644 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
645 mode, in1, in2);
647 break;
649 case NEG:
650 /* (neg (neg X)) == X. */
651 if (GET_CODE (op) == NEG)
652 return XEXP (op, 0);
654 /* (neg (plus X 1)) can become (not X). */
655 if (GET_CODE (op) == PLUS
656 && XEXP (op, 1) == const1_rtx)
657 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
659 /* Similarly, (neg (not X)) is (plus X 1). */
660 if (GET_CODE (op) == NOT)
661 return plus_constant (XEXP (op, 0), 1);
663 /* (neg (minus X Y)) can become (minus Y X). This transformation
664 isn't safe for modes with signed zeros, since if X and Y are
665 both +0, (minus Y X) is the same as (minus X Y). If the
666 rounding mode is towards +infinity (or -infinity) then the two
667 expressions will be rounded differently. */
668 if (GET_CODE (op) == MINUS
669 && !HONOR_SIGNED_ZEROS (mode)
670 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
671 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
673 if (GET_CODE (op) == PLUS
674 && !HONOR_SIGNED_ZEROS (mode)
675 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
677 /* (neg (plus A C)) is simplified to (minus -C A). */
678 if (CONST_INT_P (XEXP (op, 1))
679 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
681 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
682 if (temp)
683 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
686 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
687 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
688 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
691 /* (neg (mult A B)) becomes (mult (neg A) B).
692 This works even for floating-point values. */
693 if (GET_CODE (op) == MULT
694 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
696 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
697 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
700 /* NEG commutes with ASHIFT since it is multiplication. Only do
701 this if we can then eliminate the NEG (e.g., if the operand
702 is a constant). */
703 if (GET_CODE (op) == ASHIFT)
705 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
706 if (temp)
707 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
710 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
711 C is equal to the width of MODE minus 1. */
712 if (GET_CODE (op) == ASHIFTRT
713 && CONST_INT_P (XEXP (op, 1))
714 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
715 return simplify_gen_binary (LSHIFTRT, mode,
716 XEXP (op, 0), XEXP (op, 1));
718 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
719 C is equal to the width of MODE minus 1. */
720 if (GET_CODE (op) == LSHIFTRT
721 && CONST_INT_P (XEXP (op, 1))
722 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
723 return simplify_gen_binary (ASHIFTRT, mode,
724 XEXP (op, 0), XEXP (op, 1));
726 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
727 if (GET_CODE (op) == XOR
728 && XEXP (op, 1) == const1_rtx
729 && nonzero_bits (XEXP (op, 0), mode) == 1)
730 return plus_constant (XEXP (op, 0), -1);
732 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
733 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
734 if (GET_CODE (op) == LT
735 && XEXP (op, 1) == const0_rtx
736 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
738 enum machine_mode inner = GET_MODE (XEXP (op, 0));
739 int isize = GET_MODE_BITSIZE (inner);
740 if (STORE_FLAG_VALUE == 1)
742 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
743 GEN_INT (isize - 1));
744 if (mode == inner)
745 return temp;
746 if (GET_MODE_BITSIZE (mode) > isize)
747 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
748 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
750 else if (STORE_FLAG_VALUE == -1)
752 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
753 GEN_INT (isize - 1));
754 if (mode == inner)
755 return temp;
756 if (GET_MODE_BITSIZE (mode) > isize)
757 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
758 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
761 break;
763 case TRUNCATE:
764 /* We can't handle truncation to a partial integer mode here
765 because we don't know the real bitsize of the partial
766 integer mode. */
767 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
768 break;
770 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
771 if ((GET_CODE (op) == SIGN_EXTEND
772 || GET_CODE (op) == ZERO_EXTEND)
773 && GET_MODE (XEXP (op, 0)) == mode)
774 return XEXP (op, 0);
776 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
777 (OP:SI foo:SI) if OP is NEG or ABS. */
778 if ((GET_CODE (op) == ABS
779 || GET_CODE (op) == NEG)
780 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
781 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
782 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
783 return simplify_gen_unary (GET_CODE (op), mode,
784 XEXP (XEXP (op, 0), 0), mode);
786 /* (truncate:A (subreg:B (truncate:C X) 0)) is
787 (truncate:A X). */
788 if (GET_CODE (op) == SUBREG
789 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
790 && subreg_lowpart_p (op))
791 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
792 GET_MODE (XEXP (SUBREG_REG (op), 0)));
794 /* If we know that the value is already truncated, we can
795 replace the TRUNCATE with a SUBREG. Note that this is also
796 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
797 modes we just have to apply a different definition for
798 truncation. But don't do this for an (LSHIFTRT (MULT ...))
799 since this will cause problems with the umulXi3_highpart
800 patterns. */
801 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
802 GET_MODE_BITSIZE (GET_MODE (op)))
803 ? (num_sign_bit_copies (op, GET_MODE (op))
804 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
805 - GET_MODE_BITSIZE (mode)))
806 : truncated_to_mode (mode, op))
807 && ! (GET_CODE (op) == LSHIFTRT
808 && GET_CODE (XEXP (op, 0)) == MULT))
809 return rtl_hooks.gen_lowpart_no_emit (mode, op);
811 /* A truncate of a comparison can be replaced with a subreg if
812 STORE_FLAG_VALUE permits. This is like the previous test,
813 but it works even if the comparison is done in a mode larger
814 than HOST_BITS_PER_WIDE_INT. */
815 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
816 && COMPARISON_P (op)
817 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
818 return rtl_hooks.gen_lowpart_no_emit (mode, op);
819 break;
821 case FLOAT_TRUNCATE:
822 if (DECIMAL_FLOAT_MODE_P (mode))
823 break;
825 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
826 if (GET_CODE (op) == FLOAT_EXTEND
827 && GET_MODE (XEXP (op, 0)) == mode)
828 return XEXP (op, 0);
830 /* (float_truncate:SF (float_truncate:DF foo:XF))
831 = (float_truncate:SF foo:XF).
832 This may eliminate double rounding, so it is unsafe.
834 (float_truncate:SF (float_extend:XF foo:DF))
835 = (float_truncate:SF foo:DF).
837 (float_truncate:DF (float_extend:XF foo:SF))
838 = (float_extend:SF foo:DF). */
839 if ((GET_CODE (op) == FLOAT_TRUNCATE
840 && flag_unsafe_math_optimizations)
841 || GET_CODE (op) == FLOAT_EXTEND)
842 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
843 0)))
844 > GET_MODE_SIZE (mode)
845 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
846 mode,
847 XEXP (op, 0), mode);
849 /* (float_truncate (float x)) is (float x) */
850 if (GET_CODE (op) == FLOAT
851 && (flag_unsafe_math_optimizations
852 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
853 && ((unsigned)significand_size (GET_MODE (op))
854 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
855 - num_sign_bit_copies (XEXP (op, 0),
856 GET_MODE (XEXP (op, 0))))))))
857 return simplify_gen_unary (FLOAT, mode,
858 XEXP (op, 0),
859 GET_MODE (XEXP (op, 0)));
861 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
862 (OP:SF foo:SF) if OP is NEG or ABS. */
863 if ((GET_CODE (op) == ABS
864 || GET_CODE (op) == NEG)
865 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
866 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
867 return simplify_gen_unary (GET_CODE (op), mode,
868 XEXP (XEXP (op, 0), 0), mode);
870 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
871 is (float_truncate:SF x). */
872 if (GET_CODE (op) == SUBREG
873 && subreg_lowpart_p (op)
874 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
875 return SUBREG_REG (op);
876 break;
878 case FLOAT_EXTEND:
879 if (DECIMAL_FLOAT_MODE_P (mode))
880 break;
882 /* (float_extend (float_extend x)) is (float_extend x)
884 (float_extend (float x)) is (float x) assuming that double
885 rounding can't happen.
887 if (GET_CODE (op) == FLOAT_EXTEND
888 || (GET_CODE (op) == FLOAT
889 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
890 && ((unsigned)significand_size (GET_MODE (op))
891 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
892 - num_sign_bit_copies (XEXP (op, 0),
893 GET_MODE (XEXP (op, 0)))))))
894 return simplify_gen_unary (GET_CODE (op), mode,
895 XEXP (op, 0),
896 GET_MODE (XEXP (op, 0)));
898 break;
900 case ABS:
901 /* (abs (neg <foo>)) -> (abs <foo>) */
902 if (GET_CODE (op) == NEG)
903 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
904 GET_MODE (XEXP (op, 0)));
906 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
907 do nothing. */
908 if (GET_MODE (op) == VOIDmode)
909 break;
911 /* If operand is something known to be positive, ignore the ABS. */
912 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
913 || ((GET_MODE_BITSIZE (GET_MODE (op))
914 <= HOST_BITS_PER_WIDE_INT)
915 && ((nonzero_bits (op, GET_MODE (op))
916 & ((HOST_WIDE_INT) 1
917 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
918 == 0)))
919 return op;
921 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
922 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
923 return gen_rtx_NEG (mode, op);
925 break;
927 case FFS:
928 /* (ffs (*_extend <X>)) = (ffs <X>) */
929 if (GET_CODE (op) == SIGN_EXTEND
930 || GET_CODE (op) == ZERO_EXTEND)
931 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
932 GET_MODE (XEXP (op, 0)));
933 break;
935 case POPCOUNT:
936 switch (GET_CODE (op))
938 case BSWAP:
939 case ZERO_EXTEND:
940 /* (popcount (zero_extend <X>)) = (popcount <X>) */
941 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
942 GET_MODE (XEXP (op, 0)));
944 case ROTATE:
945 case ROTATERT:
946 /* Rotations don't affect popcount. */
947 if (!side_effects_p (XEXP (op, 1)))
948 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
949 GET_MODE (XEXP (op, 0)));
950 break;
952 default:
953 break;
955 break;
957 case PARITY:
958 switch (GET_CODE (op))
960 case NOT:
961 case BSWAP:
962 case ZERO_EXTEND:
963 case SIGN_EXTEND:
964 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
965 GET_MODE (XEXP (op, 0)));
967 case ROTATE:
968 case ROTATERT:
969 /* Rotations don't affect parity. */
970 if (!side_effects_p (XEXP (op, 1)))
971 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
972 GET_MODE (XEXP (op, 0)));
973 break;
975 default:
976 break;
978 break;
980 case BSWAP:
981 /* (bswap (bswap x)) -> x. */
982 if (GET_CODE (op) == BSWAP)
983 return XEXP (op, 0);
984 break;
986 case FLOAT:
987 /* (float (sign_extend <X>)) = (float <X>). */
988 if (GET_CODE (op) == SIGN_EXTEND)
989 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
990 GET_MODE (XEXP (op, 0)));
991 break;
993 case SIGN_EXTEND:
994 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
995 becomes just the MINUS if its mode is MODE. This allows
996 folding switch statements on machines using casesi (such as
997 the VAX). */
998 if (GET_CODE (op) == TRUNCATE
999 && GET_MODE (XEXP (op, 0)) == mode
1000 && GET_CODE (XEXP (op, 0)) == MINUS
1001 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1002 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1003 return XEXP (op, 0);
1005 /* Check for a sign extension of a subreg of a promoted
1006 variable, where the promotion is sign-extended, and the
1007 target mode is the same as the variable's promotion. */
1008 if (GET_CODE (op) == SUBREG
1009 && SUBREG_PROMOTED_VAR_P (op)
1010 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1011 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1012 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1014 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1015 /* As we do not know which address space the pointer is refering to,
1016 we can do this only if the target does not support different pointer
1017 or address modes depending on the address space. */
1018 if (target_default_pointer_address_modes_p ()
1019 && ! POINTERS_EXTEND_UNSIGNED
1020 && mode == Pmode && GET_MODE (op) == ptr_mode
1021 && (CONSTANT_P (op)
1022 || (GET_CODE (op) == SUBREG
1023 && REG_P (SUBREG_REG (op))
1024 && REG_POINTER (SUBREG_REG (op))
1025 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1026 return convert_memory_address (Pmode, op);
1027 #endif
1028 break;
1030 case ZERO_EXTEND:
1031 /* Check for a zero extension of a subreg of a promoted
1032 variable, where the promotion is zero-extended, and the
1033 target mode is the same as the variable's promotion. */
1034 if (GET_CODE (op) == SUBREG
1035 && SUBREG_PROMOTED_VAR_P (op)
1036 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1037 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1038 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1040 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1041 /* As we do not know which address space the pointer is refering to,
1042 we can do this only if the target does not support different pointer
1043 or address modes depending on the address space. */
1044 if (target_default_pointer_address_modes_p ()
1045 && POINTERS_EXTEND_UNSIGNED > 0
1046 && mode == Pmode && GET_MODE (op) == ptr_mode
1047 && (CONSTANT_P (op)
1048 || (GET_CODE (op) == SUBREG
1049 && REG_P (SUBREG_REG (op))
1050 && REG_POINTER (SUBREG_REG (op))
1051 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1052 return convert_memory_address (Pmode, op);
1053 #endif
1054 break;
1056 default:
1057 break;
1060 return 0;
1063 /* Try to compute the value of a unary operation CODE whose output mode is to
1064 be MODE with input operand OP whose mode was originally OP_MODE.
1065 Return zero if the value cannot be computed. */
1067 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1068 rtx op, enum machine_mode op_mode)
1070 unsigned int width = GET_MODE_BITSIZE (mode);
1072 if (code == VEC_DUPLICATE)
1074 gcc_assert (VECTOR_MODE_P (mode));
1075 if (GET_MODE (op) != VOIDmode)
1077 if (!VECTOR_MODE_P (GET_MODE (op)))
1078 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1079 else
1080 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1081 (GET_MODE (op)));
1083 if (CONST_INT_P (op) || GET_CODE (op) == CONST_DOUBLE
1084 || GET_CODE (op) == CONST_VECTOR)
1086 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1087 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1088 rtvec v = rtvec_alloc (n_elts);
1089 unsigned int i;
1091 if (GET_CODE (op) != CONST_VECTOR)
1092 for (i = 0; i < n_elts; i++)
1093 RTVEC_ELT (v, i) = op;
1094 else
1096 enum machine_mode inmode = GET_MODE (op);
1097 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1098 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1100 gcc_assert (in_n_elts < n_elts);
1101 gcc_assert ((n_elts % in_n_elts) == 0);
1102 for (i = 0; i < n_elts; i++)
1103 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1105 return gen_rtx_CONST_VECTOR (mode, v);
1109 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1111 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1112 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1113 enum machine_mode opmode = GET_MODE (op);
1114 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1115 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1116 rtvec v = rtvec_alloc (n_elts);
1117 unsigned int i;
1119 gcc_assert (op_n_elts == n_elts);
1120 for (i = 0; i < n_elts; i++)
1122 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1123 CONST_VECTOR_ELT (op, i),
1124 GET_MODE_INNER (opmode));
1125 if (!x)
1126 return 0;
1127 RTVEC_ELT (v, i) = x;
1129 return gen_rtx_CONST_VECTOR (mode, v);
1132 /* The order of these tests is critical so that, for example, we don't
1133 check the wrong mode (input vs. output) for a conversion operation,
1134 such as FIX. At some point, this should be simplified. */
1136 if (code == FLOAT && GET_MODE (op) == VOIDmode
1137 && (GET_CODE (op) == CONST_DOUBLE || CONST_INT_P (op)))
1139 HOST_WIDE_INT hv, lv;
1140 REAL_VALUE_TYPE d;
1142 if (CONST_INT_P (op))
1143 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1144 else
1145 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1147 REAL_VALUE_FROM_INT (d, lv, hv, mode);
1148 d = real_value_truncate (mode, d);
1149 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1151 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
1152 && (GET_CODE (op) == CONST_DOUBLE
1153 || CONST_INT_P (op)))
1155 HOST_WIDE_INT hv, lv;
1156 REAL_VALUE_TYPE d;
1158 if (CONST_INT_P (op))
1159 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1160 else
1161 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1163 if (op_mode == VOIDmode)
1165 /* We don't know how to interpret negative-looking numbers in
1166 this case, so don't try to fold those. */
1167 if (hv < 0)
1168 return 0;
1170 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
1172 else
1173 hv = 0, lv &= GET_MODE_MASK (op_mode);
1175 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1176 d = real_value_truncate (mode, d);
1177 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1180 if (CONST_INT_P (op)
1181 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1183 HOST_WIDE_INT arg0 = INTVAL (op);
1184 HOST_WIDE_INT val;
1186 switch (code)
1188 case NOT:
1189 val = ~ arg0;
1190 break;
1192 case NEG:
1193 val = - arg0;
1194 break;
1196 case ABS:
1197 val = (arg0 >= 0 ? arg0 : - arg0);
1198 break;
1200 case FFS:
1201 /* Don't use ffs here. Instead, get low order bit and then its
1202 number. If arg0 is zero, this will return 0, as desired. */
1203 arg0 &= GET_MODE_MASK (mode);
1204 val = exact_log2 (arg0 & (- arg0)) + 1;
1205 break;
1207 case CLZ:
1208 arg0 &= GET_MODE_MASK (mode);
1209 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1211 else
1212 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1213 break;
1215 case CTZ:
1216 arg0 &= GET_MODE_MASK (mode);
1217 if (arg0 == 0)
1219 /* Even if the value at zero is undefined, we have to come
1220 up with some replacement. Seems good enough. */
1221 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1222 val = GET_MODE_BITSIZE (mode);
1224 else
1225 val = exact_log2 (arg0 & -arg0);
1226 break;
1228 case POPCOUNT:
1229 arg0 &= GET_MODE_MASK (mode);
1230 val = 0;
1231 while (arg0)
1232 val++, arg0 &= arg0 - 1;
1233 break;
1235 case PARITY:
1236 arg0 &= GET_MODE_MASK (mode);
1237 val = 0;
1238 while (arg0)
1239 val++, arg0 &= arg0 - 1;
1240 val &= 1;
1241 break;
1243 case BSWAP:
1245 unsigned int s;
1247 val = 0;
1248 for (s = 0; s < width; s += 8)
1250 unsigned int d = width - s - 8;
1251 unsigned HOST_WIDE_INT byte;
1252 byte = (arg0 >> s) & 0xff;
1253 val |= byte << d;
1256 break;
1258 case TRUNCATE:
1259 val = arg0;
1260 break;
1262 case ZERO_EXTEND:
1263 /* When zero-extending a CONST_INT, we need to know its
1264 original mode. */
1265 gcc_assert (op_mode != VOIDmode);
1266 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1268 /* If we were really extending the mode,
1269 we would have to distinguish between zero-extension
1270 and sign-extension. */
1271 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1272 val = arg0;
1274 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1275 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1276 else
1277 return 0;
1278 break;
1280 case SIGN_EXTEND:
1281 if (op_mode == VOIDmode)
1282 op_mode = mode;
1283 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1285 /* If we were really extending the mode,
1286 we would have to distinguish between zero-extension
1287 and sign-extension. */
1288 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1289 val = arg0;
1291 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1294 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1295 if (val
1296 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1297 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1299 else
1300 return 0;
1301 break;
1303 case SQRT:
1304 case FLOAT_EXTEND:
1305 case FLOAT_TRUNCATE:
1306 case SS_TRUNCATE:
1307 case US_TRUNCATE:
1308 case SS_NEG:
1309 case US_NEG:
1310 case SS_ABS:
1311 return 0;
1313 default:
1314 gcc_unreachable ();
1317 return gen_int_mode (val, mode);
1320 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1321 for a DImode operation on a CONST_INT. */
1322 else if (GET_MODE (op) == VOIDmode
1323 && width <= HOST_BITS_PER_WIDE_INT * 2
1324 && (GET_CODE (op) == CONST_DOUBLE
1325 || CONST_INT_P (op)))
1327 unsigned HOST_WIDE_INT l1, lv;
1328 HOST_WIDE_INT h1, hv;
1330 if (GET_CODE (op) == CONST_DOUBLE)
1331 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1332 else
1333 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1335 switch (code)
1337 case NOT:
1338 lv = ~ l1;
1339 hv = ~ h1;
1340 break;
1342 case NEG:
1343 neg_double (l1, h1, &lv, &hv);
1344 break;
1346 case ABS:
1347 if (h1 < 0)
1348 neg_double (l1, h1, &lv, &hv);
1349 else
1350 lv = l1, hv = h1;
1351 break;
1353 case FFS:
1354 hv = 0;
1355 if (l1 == 0)
1357 if (h1 == 0)
1358 lv = 0;
1359 else
1360 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1362 else
1363 lv = exact_log2 (l1 & -l1) + 1;
1364 break;
1366 case CLZ:
1367 hv = 0;
1368 if (h1 != 0)
1369 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1370 - HOST_BITS_PER_WIDE_INT;
1371 else if (l1 != 0)
1372 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1373 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1374 lv = GET_MODE_BITSIZE (mode);
1375 break;
1377 case CTZ:
1378 hv = 0;
1379 if (l1 != 0)
1380 lv = exact_log2 (l1 & -l1);
1381 else if (h1 != 0)
1382 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1383 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1384 lv = GET_MODE_BITSIZE (mode);
1385 break;
1387 case POPCOUNT:
1388 hv = 0;
1389 lv = 0;
1390 while (l1)
1391 lv++, l1 &= l1 - 1;
1392 while (h1)
1393 lv++, h1 &= h1 - 1;
1394 break;
1396 case PARITY:
1397 hv = 0;
1398 lv = 0;
1399 while (l1)
1400 lv++, l1 &= l1 - 1;
1401 while (h1)
1402 lv++, h1 &= h1 - 1;
1403 lv &= 1;
1404 break;
1406 case BSWAP:
1408 unsigned int s;
1410 hv = 0;
1411 lv = 0;
1412 for (s = 0; s < width; s += 8)
1414 unsigned int d = width - s - 8;
1415 unsigned HOST_WIDE_INT byte;
1417 if (s < HOST_BITS_PER_WIDE_INT)
1418 byte = (l1 >> s) & 0xff;
1419 else
1420 byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1422 if (d < HOST_BITS_PER_WIDE_INT)
1423 lv |= byte << d;
1424 else
1425 hv |= byte << (d - HOST_BITS_PER_WIDE_INT);
1428 break;
1430 case TRUNCATE:
1431 /* This is just a change-of-mode, so do nothing. */
1432 lv = l1, hv = h1;
1433 break;
1435 case ZERO_EXTEND:
1436 gcc_assert (op_mode != VOIDmode);
1438 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1439 return 0;
1441 hv = 0;
1442 lv = l1 & GET_MODE_MASK (op_mode);
1443 break;
1445 case SIGN_EXTEND:
1446 if (op_mode == VOIDmode
1447 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1448 return 0;
1449 else
1451 lv = l1 & GET_MODE_MASK (op_mode);
1452 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1453 && (lv & ((HOST_WIDE_INT) 1
1454 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1455 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1457 hv = HWI_SIGN_EXTEND (lv);
1459 break;
1461 case SQRT:
1462 return 0;
1464 default:
1465 return 0;
1468 return immed_double_const (lv, hv, mode);
1471 else if (GET_CODE (op) == CONST_DOUBLE
1472 && SCALAR_FLOAT_MODE_P (mode))
1474 REAL_VALUE_TYPE d, t;
1475 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1477 switch (code)
1479 case SQRT:
1480 if (HONOR_SNANS (mode) && real_isnan (&d))
1481 return 0;
1482 real_sqrt (&t, mode, &d);
1483 d = t;
1484 break;
1485 case ABS:
1486 d = REAL_VALUE_ABS (d);
1487 break;
1488 case NEG:
1489 d = REAL_VALUE_NEGATE (d);
1490 break;
1491 case FLOAT_TRUNCATE:
1492 d = real_value_truncate (mode, d);
1493 break;
1494 case FLOAT_EXTEND:
1495 /* All this does is change the mode. */
1496 break;
1497 case FIX:
1498 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1499 break;
1500 case NOT:
1502 long tmp[4];
1503 int i;
1505 real_to_target (tmp, &d, GET_MODE (op));
1506 for (i = 0; i < 4; i++)
1507 tmp[i] = ~tmp[i];
1508 real_from_target (&d, tmp, mode);
1509 break;
1511 default:
1512 gcc_unreachable ();
1514 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1517 else if (GET_CODE (op) == CONST_DOUBLE
1518 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1519 && GET_MODE_CLASS (mode) == MODE_INT
1520 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1522 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1523 operators are intentionally left unspecified (to ease implementation
1524 by target backends), for consistency, this routine implements the
1525 same semantics for constant folding as used by the middle-end. */
1527 /* This was formerly used only for non-IEEE float.
1528 eggert@twinsun.com says it is safe for IEEE also. */
1529 HOST_WIDE_INT xh, xl, th, tl;
1530 REAL_VALUE_TYPE x, t;
1531 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1532 switch (code)
1534 case FIX:
1535 if (REAL_VALUE_ISNAN (x))
1536 return const0_rtx;
1538 /* Test against the signed upper bound. */
1539 if (width > HOST_BITS_PER_WIDE_INT)
1541 th = ((unsigned HOST_WIDE_INT) 1
1542 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1543 tl = -1;
1545 else
1547 th = 0;
1548 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1550 real_from_integer (&t, VOIDmode, tl, th, 0);
1551 if (REAL_VALUES_LESS (t, x))
1553 xh = th;
1554 xl = tl;
1555 break;
1558 /* Test against the signed lower bound. */
1559 if (width > HOST_BITS_PER_WIDE_INT)
1561 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1562 tl = 0;
1564 else
1566 th = -1;
1567 tl = (HOST_WIDE_INT) -1 << (width - 1);
1569 real_from_integer (&t, VOIDmode, tl, th, 0);
1570 if (REAL_VALUES_LESS (x, t))
1572 xh = th;
1573 xl = tl;
1574 break;
1576 REAL_VALUE_TO_INT (&xl, &xh, x);
1577 break;
1579 case UNSIGNED_FIX:
1580 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1581 return const0_rtx;
1583 /* Test against the unsigned upper bound. */
1584 if (width == 2*HOST_BITS_PER_WIDE_INT)
1586 th = -1;
1587 tl = -1;
1589 else if (width >= HOST_BITS_PER_WIDE_INT)
1591 th = ((unsigned HOST_WIDE_INT) 1
1592 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1593 tl = -1;
1595 else
1597 th = 0;
1598 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1600 real_from_integer (&t, VOIDmode, tl, th, 1);
1601 if (REAL_VALUES_LESS (t, x))
1603 xh = th;
1604 xl = tl;
1605 break;
1608 REAL_VALUE_TO_INT (&xl, &xh, x);
1609 break;
1611 default:
1612 gcc_unreachable ();
1614 return immed_double_const (xl, xh, mode);
1617 return NULL_RTX;
1620 /* Subroutine of simplify_binary_operation to simplify a commutative,
1621 associative binary operation CODE with result mode MODE, operating
1622 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1623 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1624 canonicalization is possible. */
1626 static rtx
1627 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1628 rtx op0, rtx op1)
1630 rtx tem;
1632 /* Linearize the operator to the left. */
1633 if (GET_CODE (op1) == code)
1635 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1636 if (GET_CODE (op0) == code)
1638 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1639 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1642 /* "a op (b op c)" becomes "(b op c) op a". */
1643 if (! swap_commutative_operands_p (op1, op0))
1644 return simplify_gen_binary (code, mode, op1, op0);
1646 tem = op0;
1647 op0 = op1;
1648 op1 = tem;
1651 if (GET_CODE (op0) == code)
1653 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1654 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1656 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1657 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1660 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1661 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1662 if (tem != 0)
1663 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1665 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1666 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1667 if (tem != 0)
1668 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1671 return 0;
1675 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1676 and OP1. Return 0 if no simplification is possible.
1678 Don't use this for relational operations such as EQ or LT.
1679 Use simplify_relational_operation instead. */
1681 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1682 rtx op0, rtx op1)
1684 rtx trueop0, trueop1;
1685 rtx tem;
1687 /* Relational operations don't work here. We must know the mode
1688 of the operands in order to do the comparison correctly.
1689 Assuming a full word can give incorrect results.
1690 Consider comparing 128 with -128 in QImode. */
1691 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1692 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1694 /* Make sure the constant is second. */
1695 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1696 && swap_commutative_operands_p (op0, op1))
1698 tem = op0, op0 = op1, op1 = tem;
1701 trueop0 = avoid_constant_pool_reference (op0);
1702 trueop1 = avoid_constant_pool_reference (op1);
1704 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1705 if (tem)
1706 return tem;
1707 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1710 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1711 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1712 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1713 actual constants. */
1715 static rtx
1716 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1717 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1719 rtx tem, reversed, opleft, opright;
1720 HOST_WIDE_INT val;
1721 unsigned int width = GET_MODE_BITSIZE (mode);
1723 /* Even if we can't compute a constant result,
1724 there are some cases worth simplifying. */
1726 switch (code)
1728 case PLUS:
1729 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1730 when x is NaN, infinite, or finite and nonzero. They aren't
1731 when x is -0 and the rounding mode is not towards -infinity,
1732 since (-0) + 0 is then 0. */
1733 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1734 return op0;
1736 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1737 transformations are safe even for IEEE. */
1738 if (GET_CODE (op0) == NEG)
1739 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1740 else if (GET_CODE (op1) == NEG)
1741 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1743 /* (~a) + 1 -> -a */
1744 if (INTEGRAL_MODE_P (mode)
1745 && GET_CODE (op0) == NOT
1746 && trueop1 == const1_rtx)
1747 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1749 /* Handle both-operands-constant cases. We can only add
1750 CONST_INTs to constants since the sum of relocatable symbols
1751 can't be handled by most assemblers. Don't add CONST_INT
1752 to CONST_INT since overflow won't be computed properly if wider
1753 than HOST_BITS_PER_WIDE_INT. */
1755 if ((GET_CODE (op0) == CONST
1756 || GET_CODE (op0) == SYMBOL_REF
1757 || GET_CODE (op0) == LABEL_REF)
1758 && CONST_INT_P (op1))
1759 return plus_constant (op0, INTVAL (op1));
1760 else if ((GET_CODE (op1) == CONST
1761 || GET_CODE (op1) == SYMBOL_REF
1762 || GET_CODE (op1) == LABEL_REF)
1763 && CONST_INT_P (op0))
1764 return plus_constant (op1, INTVAL (op0));
1766 /* See if this is something like X * C - X or vice versa or
1767 if the multiplication is written as a shift. If so, we can
1768 distribute and make a new multiply, shift, or maybe just
1769 have X (if C is 2 in the example above). But don't make
1770 something more expensive than we had before. */
1772 if (SCALAR_INT_MODE_P (mode))
1774 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1775 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1776 rtx lhs = op0, rhs = op1;
1778 if (GET_CODE (lhs) == NEG)
1780 coeff0l = -1;
1781 coeff0h = -1;
1782 lhs = XEXP (lhs, 0);
1784 else if (GET_CODE (lhs) == MULT
1785 && CONST_INT_P (XEXP (lhs, 1)))
1787 coeff0l = INTVAL (XEXP (lhs, 1));
1788 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1789 lhs = XEXP (lhs, 0);
1791 else if (GET_CODE (lhs) == ASHIFT
1792 && CONST_INT_P (XEXP (lhs, 1))
1793 && INTVAL (XEXP (lhs, 1)) >= 0
1794 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1796 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1797 coeff0h = 0;
1798 lhs = XEXP (lhs, 0);
1801 if (GET_CODE (rhs) == NEG)
1803 coeff1l = -1;
1804 coeff1h = -1;
1805 rhs = XEXP (rhs, 0);
1807 else if (GET_CODE (rhs) == MULT
1808 && CONST_INT_P (XEXP (rhs, 1)))
1810 coeff1l = INTVAL (XEXP (rhs, 1));
1811 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1812 rhs = XEXP (rhs, 0);
1814 else if (GET_CODE (rhs) == ASHIFT
1815 && CONST_INT_P (XEXP (rhs, 1))
1816 && INTVAL (XEXP (rhs, 1)) >= 0
1817 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1819 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1820 coeff1h = 0;
1821 rhs = XEXP (rhs, 0);
1824 if (rtx_equal_p (lhs, rhs))
1826 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1827 rtx coeff;
1828 unsigned HOST_WIDE_INT l;
1829 HOST_WIDE_INT h;
1830 bool speed = optimize_function_for_speed_p (cfun);
1832 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1833 coeff = immed_double_const (l, h, mode);
1835 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1836 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
1837 ? tem : 0;
1841 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1842 if ((CONST_INT_P (op1)
1843 || GET_CODE (op1) == CONST_DOUBLE)
1844 && GET_CODE (op0) == XOR
1845 && (CONST_INT_P (XEXP (op0, 1))
1846 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1847 && mode_signbit_p (mode, op1))
1848 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1849 simplify_gen_binary (XOR, mode, op1,
1850 XEXP (op0, 1)));
1852 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1853 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1854 && GET_CODE (op0) == MULT
1855 && GET_CODE (XEXP (op0, 0)) == NEG)
1857 rtx in1, in2;
1859 in1 = XEXP (XEXP (op0, 0), 0);
1860 in2 = XEXP (op0, 1);
1861 return simplify_gen_binary (MINUS, mode, op1,
1862 simplify_gen_binary (MULT, mode,
1863 in1, in2));
1866 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1867 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1868 is 1. */
1869 if (COMPARISON_P (op0)
1870 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1871 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1872 && (reversed = reversed_comparison (op0, mode)))
1873 return
1874 simplify_gen_unary (NEG, mode, reversed, mode);
1876 /* If one of the operands is a PLUS or a MINUS, see if we can
1877 simplify this by the associative law.
1878 Don't use the associative law for floating point.
1879 The inaccuracy makes it nonassociative,
1880 and subtle programs can break if operations are associated. */
1882 if (INTEGRAL_MODE_P (mode)
1883 && (plus_minus_operand_p (op0)
1884 || plus_minus_operand_p (op1))
1885 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1886 return tem;
1888 /* Reassociate floating point addition only when the user
1889 specifies associative math operations. */
1890 if (FLOAT_MODE_P (mode)
1891 && flag_associative_math)
1893 tem = simplify_associative_operation (code, mode, op0, op1);
1894 if (tem)
1895 return tem;
1897 break;
1899 case COMPARE:
1900 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1901 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1902 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1903 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1905 rtx xop00 = XEXP (op0, 0);
1906 rtx xop10 = XEXP (op1, 0);
1908 #ifdef HAVE_cc0
1909 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1910 #else
1911 if (REG_P (xop00) && REG_P (xop10)
1912 && GET_MODE (xop00) == GET_MODE (xop10)
1913 && REGNO (xop00) == REGNO (xop10)
1914 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1915 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1916 #endif
1917 return xop00;
1919 break;
1921 case MINUS:
1922 /* We can't assume x-x is 0 even with non-IEEE floating point,
1923 but since it is zero except in very strange circumstances, we
1924 will treat it as zero with -ffinite-math-only. */
1925 if (rtx_equal_p (trueop0, trueop1)
1926 && ! side_effects_p (op0)
1927 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
1928 return CONST0_RTX (mode);
1930 /* Change subtraction from zero into negation. (0 - x) is the
1931 same as -x when x is NaN, infinite, or finite and nonzero.
1932 But if the mode has signed zeros, and does not round towards
1933 -infinity, then 0 - 0 is 0, not -0. */
1934 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1935 return simplify_gen_unary (NEG, mode, op1, mode);
1937 /* (-1 - a) is ~a. */
1938 if (trueop0 == constm1_rtx)
1939 return simplify_gen_unary (NOT, mode, op1, mode);
1941 /* Subtracting 0 has no effect unless the mode has signed zeros
1942 and supports rounding towards -infinity. In such a case,
1943 0 - 0 is -0. */
1944 if (!(HONOR_SIGNED_ZEROS (mode)
1945 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1946 && trueop1 == CONST0_RTX (mode))
1947 return op0;
1949 /* See if this is something like X * C - X or vice versa or
1950 if the multiplication is written as a shift. If so, we can
1951 distribute and make a new multiply, shift, or maybe just
1952 have X (if C is 2 in the example above). But don't make
1953 something more expensive than we had before. */
1955 if (SCALAR_INT_MODE_P (mode))
1957 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1958 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1959 rtx lhs = op0, rhs = op1;
1961 if (GET_CODE (lhs) == NEG)
1963 coeff0l = -1;
1964 coeff0h = -1;
1965 lhs = XEXP (lhs, 0);
1967 else if (GET_CODE (lhs) == MULT
1968 && CONST_INT_P (XEXP (lhs, 1)))
1970 coeff0l = INTVAL (XEXP (lhs, 1));
1971 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1972 lhs = XEXP (lhs, 0);
1974 else if (GET_CODE (lhs) == ASHIFT
1975 && CONST_INT_P (XEXP (lhs, 1))
1976 && INTVAL (XEXP (lhs, 1)) >= 0
1977 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1979 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1980 coeff0h = 0;
1981 lhs = XEXP (lhs, 0);
1984 if (GET_CODE (rhs) == NEG)
1986 negcoeff1l = 1;
1987 negcoeff1h = 0;
1988 rhs = XEXP (rhs, 0);
1990 else if (GET_CODE (rhs) == MULT
1991 && CONST_INT_P (XEXP (rhs, 1)))
1993 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1994 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1995 rhs = XEXP (rhs, 0);
1997 else if (GET_CODE (rhs) == ASHIFT
1998 && CONST_INT_P (XEXP (rhs, 1))
1999 && INTVAL (XEXP (rhs, 1)) >= 0
2000 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2002 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
2003 negcoeff1h = -1;
2004 rhs = XEXP (rhs, 0);
2007 if (rtx_equal_p (lhs, rhs))
2009 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2010 rtx coeff;
2011 unsigned HOST_WIDE_INT l;
2012 HOST_WIDE_INT h;
2013 bool speed = optimize_function_for_speed_p (cfun);
2015 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
2016 coeff = immed_double_const (l, h, mode);
2018 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2019 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
2020 ? tem : 0;
2024 /* (a - (-b)) -> (a + b). True even for IEEE. */
2025 if (GET_CODE (op1) == NEG)
2026 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2028 /* (-x - c) may be simplified as (-c - x). */
2029 if (GET_CODE (op0) == NEG
2030 && (CONST_INT_P (op1)
2031 || GET_CODE (op1) == CONST_DOUBLE))
2033 tem = simplify_unary_operation (NEG, mode, op1, mode);
2034 if (tem)
2035 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2038 /* Don't let a relocatable value get a negative coeff. */
2039 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2040 return simplify_gen_binary (PLUS, mode,
2041 op0,
2042 neg_const_int (mode, op1));
2044 /* (x - (x & y)) -> (x & ~y) */
2045 if (GET_CODE (op1) == AND)
2047 if (rtx_equal_p (op0, XEXP (op1, 0)))
2049 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2050 GET_MODE (XEXP (op1, 1)));
2051 return simplify_gen_binary (AND, mode, op0, tem);
2053 if (rtx_equal_p (op0, XEXP (op1, 1)))
2055 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2056 GET_MODE (XEXP (op1, 0)));
2057 return simplify_gen_binary (AND, mode, op0, tem);
2061 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2062 by reversing the comparison code if valid. */
2063 if (STORE_FLAG_VALUE == 1
2064 && trueop0 == const1_rtx
2065 && COMPARISON_P (op1)
2066 && (reversed = reversed_comparison (op1, mode)))
2067 return reversed;
2069 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2070 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2071 && GET_CODE (op1) == MULT
2072 && GET_CODE (XEXP (op1, 0)) == NEG)
2074 rtx in1, in2;
2076 in1 = XEXP (XEXP (op1, 0), 0);
2077 in2 = XEXP (op1, 1);
2078 return simplify_gen_binary (PLUS, mode,
2079 simplify_gen_binary (MULT, mode,
2080 in1, in2),
2081 op0);
2084 /* Canonicalize (minus (neg A) (mult B C)) to
2085 (minus (mult (neg B) C) A). */
2086 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2087 && GET_CODE (op1) == MULT
2088 && GET_CODE (op0) == NEG)
2090 rtx in1, in2;
2092 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2093 in2 = XEXP (op1, 1);
2094 return simplify_gen_binary (MINUS, mode,
2095 simplify_gen_binary (MULT, mode,
2096 in1, in2),
2097 XEXP (op0, 0));
2100 /* If one of the operands is a PLUS or a MINUS, see if we can
2101 simplify this by the associative law. This will, for example,
2102 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2103 Don't use the associative law for floating point.
2104 The inaccuracy makes it nonassociative,
2105 and subtle programs can break if operations are associated. */
2107 if (INTEGRAL_MODE_P (mode)
2108 && (plus_minus_operand_p (op0)
2109 || plus_minus_operand_p (op1))
2110 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2111 return tem;
2112 break;
2114 case MULT:
2115 if (trueop1 == constm1_rtx)
2116 return simplify_gen_unary (NEG, mode, op0, mode);
2118 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2119 x is NaN, since x * 0 is then also NaN. Nor is it valid
2120 when the mode has signed zeros, since multiplying a negative
2121 number by 0 will give -0, not 0. */
2122 if (!HONOR_NANS (mode)
2123 && !HONOR_SIGNED_ZEROS (mode)
2124 && trueop1 == CONST0_RTX (mode)
2125 && ! side_effects_p (op0))
2126 return op1;
2128 /* In IEEE floating point, x*1 is not equivalent to x for
2129 signalling NaNs. */
2130 if (!HONOR_SNANS (mode)
2131 && trueop1 == CONST1_RTX (mode))
2132 return op0;
2134 /* Convert multiply by constant power of two into shift unless
2135 we are still generating RTL. This test is a kludge. */
2136 if (CONST_INT_P (trueop1)
2137 && (val = exact_log2 (INTVAL (trueop1))) >= 0
2138 /* If the mode is larger than the host word size, and the
2139 uppermost bit is set, then this isn't a power of two due
2140 to implicit sign extension. */
2141 && (width <= HOST_BITS_PER_WIDE_INT
2142 || val != HOST_BITS_PER_WIDE_INT - 1))
2143 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2145 /* Likewise for multipliers wider than a word. */
2146 if (GET_CODE (trueop1) == CONST_DOUBLE
2147 && (GET_MODE (trueop1) == VOIDmode
2148 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
2149 && GET_MODE (op0) == mode
2150 && CONST_DOUBLE_LOW (trueop1) == 0
2151 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
2152 return simplify_gen_binary (ASHIFT, mode, op0,
2153 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2155 /* x*2 is x+x and x*(-1) is -x */
2156 if (GET_CODE (trueop1) == CONST_DOUBLE
2157 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2158 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2159 && GET_MODE (op0) == mode)
2161 REAL_VALUE_TYPE d;
2162 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2164 if (REAL_VALUES_EQUAL (d, dconst2))
2165 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2167 if (!HONOR_SNANS (mode)
2168 && REAL_VALUES_EQUAL (d, dconstm1))
2169 return simplify_gen_unary (NEG, mode, op0, mode);
2172 /* Optimize -x * -x as x * x. */
2173 if (FLOAT_MODE_P (mode)
2174 && GET_CODE (op0) == NEG
2175 && GET_CODE (op1) == NEG
2176 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2177 && !side_effects_p (XEXP (op0, 0)))
2178 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2180 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2181 if (SCALAR_FLOAT_MODE_P (mode)
2182 && GET_CODE (op0) == ABS
2183 && GET_CODE (op1) == ABS
2184 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2185 && !side_effects_p (XEXP (op0, 0)))
2186 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2188 /* Reassociate multiplication, but for floating point MULTs
2189 only when the user specifies unsafe math optimizations. */
2190 if (! FLOAT_MODE_P (mode)
2191 || flag_unsafe_math_optimizations)
2193 tem = simplify_associative_operation (code, mode, op0, op1);
2194 if (tem)
2195 return tem;
2197 break;
2199 case IOR:
2200 if (trueop1 == const0_rtx)
2201 return op0;
2202 if (CONST_INT_P (trueop1)
2203 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2204 == GET_MODE_MASK (mode)))
2205 return op1;
2206 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2207 return op0;
2208 /* A | (~A) -> -1 */
2209 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2210 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2211 && ! side_effects_p (op0)
2212 && SCALAR_INT_MODE_P (mode))
2213 return constm1_rtx;
2215 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2216 if (CONST_INT_P (op1)
2217 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2218 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
2219 return op1;
2221 /* Canonicalize (X & C1) | C2. */
2222 if (GET_CODE (op0) == AND
2223 && CONST_INT_P (trueop1)
2224 && CONST_INT_P (XEXP (op0, 1)))
2226 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2227 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2228 HOST_WIDE_INT c2 = INTVAL (trueop1);
2230 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2231 if ((c1 & c2) == c1
2232 && !side_effects_p (XEXP (op0, 0)))
2233 return trueop1;
2235 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2236 if (((c1|c2) & mask) == mask)
2237 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2239 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2240 if (((c1 & ~c2) & mask) != (c1 & mask))
2242 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2243 gen_int_mode (c1 & ~c2, mode));
2244 return simplify_gen_binary (IOR, mode, tem, op1);
2248 /* Convert (A & B) | A to A. */
2249 if (GET_CODE (op0) == AND
2250 && (rtx_equal_p (XEXP (op0, 0), op1)
2251 || rtx_equal_p (XEXP (op0, 1), op1))
2252 && ! side_effects_p (XEXP (op0, 0))
2253 && ! side_effects_p (XEXP (op0, 1)))
2254 return op1;
2256 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2257 mode size to (rotate A CX). */
2259 if (GET_CODE (op1) == ASHIFT
2260 || GET_CODE (op1) == SUBREG)
2262 opleft = op1;
2263 opright = op0;
2265 else
2267 opright = op1;
2268 opleft = op0;
2271 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2272 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2273 && CONST_INT_P (XEXP (opleft, 1))
2274 && CONST_INT_P (XEXP (opright, 1))
2275 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2276 == GET_MODE_BITSIZE (mode)))
2277 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2279 /* Same, but for ashift that has been "simplified" to a wider mode
2280 by simplify_shift_const. */
2282 if (GET_CODE (opleft) == SUBREG
2283 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2284 && GET_CODE (opright) == LSHIFTRT
2285 && GET_CODE (XEXP (opright, 0)) == SUBREG
2286 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2287 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2288 && (GET_MODE_SIZE (GET_MODE (opleft))
2289 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2290 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2291 SUBREG_REG (XEXP (opright, 0)))
2292 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2293 && CONST_INT_P (XEXP (opright, 1))
2294 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2295 == GET_MODE_BITSIZE (mode)))
2296 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2297 XEXP (SUBREG_REG (opleft), 1));
2299 /* If we have (ior (and (X C1) C2)), simplify this by making
2300 C1 as small as possible if C1 actually changes. */
2301 if (CONST_INT_P (op1)
2302 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2303 || INTVAL (op1) > 0)
2304 && GET_CODE (op0) == AND
2305 && CONST_INT_P (XEXP (op0, 1))
2306 && CONST_INT_P (op1)
2307 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2308 return simplify_gen_binary (IOR, mode,
2309 simplify_gen_binary
2310 (AND, mode, XEXP (op0, 0),
2311 GEN_INT (INTVAL (XEXP (op0, 1))
2312 & ~INTVAL (op1))),
2313 op1);
2315 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2316 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2317 the PLUS does not affect any of the bits in OP1: then we can do
2318 the IOR as a PLUS and we can associate. This is valid if OP1
2319 can be safely shifted left C bits. */
2320 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2321 && GET_CODE (XEXP (op0, 0)) == PLUS
2322 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2323 && CONST_INT_P (XEXP (op0, 1))
2324 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2326 int count = INTVAL (XEXP (op0, 1));
2327 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2329 if (mask >> count == INTVAL (trueop1)
2330 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2331 return simplify_gen_binary (ASHIFTRT, mode,
2332 plus_constant (XEXP (op0, 0), mask),
2333 XEXP (op0, 1));
2336 tem = simplify_associative_operation (code, mode, op0, op1);
2337 if (tem)
2338 return tem;
2339 break;
2341 case XOR:
2342 if (trueop1 == const0_rtx)
2343 return op0;
2344 if (CONST_INT_P (trueop1)
2345 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2346 == GET_MODE_MASK (mode)))
2347 return simplify_gen_unary (NOT, mode, op0, mode);
2348 if (rtx_equal_p (trueop0, trueop1)
2349 && ! side_effects_p (op0)
2350 && GET_MODE_CLASS (mode) != MODE_CC)
2351 return CONST0_RTX (mode);
2353 /* Canonicalize XOR of the most significant bit to PLUS. */
2354 if ((CONST_INT_P (op1)
2355 || GET_CODE (op1) == CONST_DOUBLE)
2356 && mode_signbit_p (mode, op1))
2357 return simplify_gen_binary (PLUS, mode, op0, op1);
2358 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2359 if ((CONST_INT_P (op1)
2360 || GET_CODE (op1) == CONST_DOUBLE)
2361 && GET_CODE (op0) == PLUS
2362 && (CONST_INT_P (XEXP (op0, 1))
2363 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2364 && mode_signbit_p (mode, XEXP (op0, 1)))
2365 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2366 simplify_gen_binary (XOR, mode, op1,
2367 XEXP (op0, 1)));
2369 /* If we are XORing two things that have no bits in common,
2370 convert them into an IOR. This helps to detect rotation encoded
2371 using those methods and possibly other simplifications. */
2373 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2374 && (nonzero_bits (op0, mode)
2375 & nonzero_bits (op1, mode)) == 0)
2376 return (simplify_gen_binary (IOR, mode, op0, op1));
2378 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2379 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2380 (NOT y). */
2382 int num_negated = 0;
2384 if (GET_CODE (op0) == NOT)
2385 num_negated++, op0 = XEXP (op0, 0);
2386 if (GET_CODE (op1) == NOT)
2387 num_negated++, op1 = XEXP (op1, 0);
2389 if (num_negated == 2)
2390 return simplify_gen_binary (XOR, mode, op0, op1);
2391 else if (num_negated == 1)
2392 return simplify_gen_unary (NOT, mode,
2393 simplify_gen_binary (XOR, mode, op0, op1),
2394 mode);
2397 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2398 correspond to a machine insn or result in further simplifications
2399 if B is a constant. */
2401 if (GET_CODE (op0) == AND
2402 && rtx_equal_p (XEXP (op0, 1), op1)
2403 && ! side_effects_p (op1))
2404 return simplify_gen_binary (AND, mode,
2405 simplify_gen_unary (NOT, mode,
2406 XEXP (op0, 0), mode),
2407 op1);
2409 else if (GET_CODE (op0) == AND
2410 && rtx_equal_p (XEXP (op0, 0), op1)
2411 && ! side_effects_p (op1))
2412 return simplify_gen_binary (AND, mode,
2413 simplify_gen_unary (NOT, mode,
2414 XEXP (op0, 1), mode),
2415 op1);
2417 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2418 comparison if STORE_FLAG_VALUE is 1. */
2419 if (STORE_FLAG_VALUE == 1
2420 && trueop1 == const1_rtx
2421 && COMPARISON_P (op0)
2422 && (reversed = reversed_comparison (op0, mode)))
2423 return reversed;
2425 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2426 is (lt foo (const_int 0)), so we can perform the above
2427 simplification if STORE_FLAG_VALUE is 1. */
2429 if (STORE_FLAG_VALUE == 1
2430 && trueop1 == const1_rtx
2431 && GET_CODE (op0) == LSHIFTRT
2432 && CONST_INT_P (XEXP (op0, 1))
2433 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2434 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2436 /* (xor (comparison foo bar) (const_int sign-bit))
2437 when STORE_FLAG_VALUE is the sign bit. */
2438 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2439 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2440 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2441 && trueop1 == const_true_rtx
2442 && COMPARISON_P (op0)
2443 && (reversed = reversed_comparison (op0, mode)))
2444 return reversed;
2446 tem = simplify_associative_operation (code, mode, op0, op1);
2447 if (tem)
2448 return tem;
2449 break;
2451 case AND:
2452 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2453 return trueop1;
2454 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
2456 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2457 HOST_WIDE_INT nzop1;
2458 if (CONST_INT_P (trueop1))
2460 HOST_WIDE_INT val1 = INTVAL (trueop1);
2461 /* If we are turning off bits already known off in OP0, we need
2462 not do an AND. */
2463 if ((nzop0 & ~val1) == 0)
2464 return op0;
2466 nzop1 = nonzero_bits (trueop1, mode);
2467 /* If we are clearing all the nonzero bits, the result is zero. */
2468 if ((nzop1 & nzop0) == 0
2469 && !side_effects_p (op0) && !side_effects_p (op1))
2470 return CONST0_RTX (mode);
2472 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2473 && GET_MODE_CLASS (mode) != MODE_CC)
2474 return op0;
2475 /* A & (~A) -> 0 */
2476 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2477 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2478 && ! side_effects_p (op0)
2479 && GET_MODE_CLASS (mode) != MODE_CC)
2480 return CONST0_RTX (mode);
2482 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2483 there are no nonzero bits of C outside of X's mode. */
2484 if ((GET_CODE (op0) == SIGN_EXTEND
2485 || GET_CODE (op0) == ZERO_EXTEND)
2486 && CONST_INT_P (trueop1)
2487 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2488 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2489 & INTVAL (trueop1)) == 0)
2491 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2492 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2493 gen_int_mode (INTVAL (trueop1),
2494 imode));
2495 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2498 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2499 we might be able to further simplify the AND with X and potentially
2500 remove the truncation altogether. */
2501 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2503 rtx x = XEXP (op0, 0);
2504 enum machine_mode xmode = GET_MODE (x);
2505 tem = simplify_gen_binary (AND, xmode, x,
2506 gen_int_mode (INTVAL (trueop1), xmode));
2507 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2510 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2511 if (GET_CODE (op0) == IOR
2512 && CONST_INT_P (trueop1)
2513 && CONST_INT_P (XEXP (op0, 1)))
2515 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2516 return simplify_gen_binary (IOR, mode,
2517 simplify_gen_binary (AND, mode,
2518 XEXP (op0, 0), op1),
2519 gen_int_mode (tmp, mode));
2522 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2523 insn (and may simplify more). */
2524 if (GET_CODE (op0) == XOR
2525 && rtx_equal_p (XEXP (op0, 0), op1)
2526 && ! side_effects_p (op1))
2527 return simplify_gen_binary (AND, mode,
2528 simplify_gen_unary (NOT, mode,
2529 XEXP (op0, 1), mode),
2530 op1);
2532 if (GET_CODE (op0) == XOR
2533 && rtx_equal_p (XEXP (op0, 1), op1)
2534 && ! side_effects_p (op1))
2535 return simplify_gen_binary (AND, mode,
2536 simplify_gen_unary (NOT, mode,
2537 XEXP (op0, 0), mode),
2538 op1);
2540 /* Similarly for (~(A ^ B)) & A. */
2541 if (GET_CODE (op0) == NOT
2542 && GET_CODE (XEXP (op0, 0)) == XOR
2543 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2544 && ! side_effects_p (op1))
2545 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2547 if (GET_CODE (op0) == NOT
2548 && GET_CODE (XEXP (op0, 0)) == XOR
2549 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2550 && ! side_effects_p (op1))
2551 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2553 /* Convert (A | B) & A to A. */
2554 if (GET_CODE (op0) == IOR
2555 && (rtx_equal_p (XEXP (op0, 0), op1)
2556 || rtx_equal_p (XEXP (op0, 1), op1))
2557 && ! side_effects_p (XEXP (op0, 0))
2558 && ! side_effects_p (XEXP (op0, 1)))
2559 return op1;
2561 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2562 ((A & N) + B) & M -> (A + B) & M
2563 Similarly if (N & M) == 0,
2564 ((A | N) + B) & M -> (A + B) & M
2565 and for - instead of + and/or ^ instead of |.
2566 Also, if (N & M) == 0, then
2567 (A +- N) & M -> A & M. */
2568 if (CONST_INT_P (trueop1)
2569 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2570 && ~INTVAL (trueop1)
2571 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2572 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2574 rtx pmop[2];
2575 int which;
2577 pmop[0] = XEXP (op0, 0);
2578 pmop[1] = XEXP (op0, 1);
2580 if (CONST_INT_P (pmop[1])
2581 && (INTVAL (pmop[1]) & INTVAL (trueop1)) == 0)
2582 return simplify_gen_binary (AND, mode, pmop[0], op1);
2584 for (which = 0; which < 2; which++)
2586 tem = pmop[which];
2587 switch (GET_CODE (tem))
2589 case AND:
2590 if (CONST_INT_P (XEXP (tem, 1))
2591 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2592 == INTVAL (trueop1))
2593 pmop[which] = XEXP (tem, 0);
2594 break;
2595 case IOR:
2596 case XOR:
2597 if (CONST_INT_P (XEXP (tem, 1))
2598 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2599 pmop[which] = XEXP (tem, 0);
2600 break;
2601 default:
2602 break;
2606 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2608 tem = simplify_gen_binary (GET_CODE (op0), mode,
2609 pmop[0], pmop[1]);
2610 return simplify_gen_binary (code, mode, tem, op1);
2614 /* (and X (ior (not X) Y) -> (and X Y) */
2615 if (GET_CODE (op1) == IOR
2616 && GET_CODE (XEXP (op1, 0)) == NOT
2617 && op0 == XEXP (XEXP (op1, 0), 0))
2618 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2620 /* (and (ior (not X) Y) X) -> (and X Y) */
2621 if (GET_CODE (op0) == IOR
2622 && GET_CODE (XEXP (op0, 0)) == NOT
2623 && op1 == XEXP (XEXP (op0, 0), 0))
2624 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2626 tem = simplify_associative_operation (code, mode, op0, op1);
2627 if (tem)
2628 return tem;
2629 break;
2631 case UDIV:
2632 /* 0/x is 0 (or x&0 if x has side-effects). */
2633 if (trueop0 == CONST0_RTX (mode))
2635 if (side_effects_p (op1))
2636 return simplify_gen_binary (AND, mode, op1, trueop0);
2637 return trueop0;
2639 /* x/1 is x. */
2640 if (trueop1 == CONST1_RTX (mode))
2641 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2642 /* Convert divide by power of two into shift. */
2643 if (CONST_INT_P (trueop1)
2644 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2645 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2646 break;
2648 case DIV:
2649 /* Handle floating point and integers separately. */
2650 if (SCALAR_FLOAT_MODE_P (mode))
2652 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2653 safe for modes with NaNs, since 0.0 / 0.0 will then be
2654 NaN rather than 0.0. Nor is it safe for modes with signed
2655 zeros, since dividing 0 by a negative number gives -0.0 */
2656 if (trueop0 == CONST0_RTX (mode)
2657 && !HONOR_NANS (mode)
2658 && !HONOR_SIGNED_ZEROS (mode)
2659 && ! side_effects_p (op1))
2660 return op0;
2661 /* x/1.0 is x. */
2662 if (trueop1 == CONST1_RTX (mode)
2663 && !HONOR_SNANS (mode))
2664 return op0;
2666 if (GET_CODE (trueop1) == CONST_DOUBLE
2667 && trueop1 != CONST0_RTX (mode))
2669 REAL_VALUE_TYPE d;
2670 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2672 /* x/-1.0 is -x. */
2673 if (REAL_VALUES_EQUAL (d, dconstm1)
2674 && !HONOR_SNANS (mode))
2675 return simplify_gen_unary (NEG, mode, op0, mode);
2677 /* Change FP division by a constant into multiplication.
2678 Only do this with -freciprocal-math. */
2679 if (flag_reciprocal_math
2680 && !REAL_VALUES_EQUAL (d, dconst0))
2682 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2683 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2684 return simplify_gen_binary (MULT, mode, op0, tem);
2688 else
2690 /* 0/x is 0 (or x&0 if x has side-effects). */
2691 if (trueop0 == CONST0_RTX (mode))
2693 if (side_effects_p (op1))
2694 return simplify_gen_binary (AND, mode, op1, trueop0);
2695 return trueop0;
2697 /* x/1 is x. */
2698 if (trueop1 == CONST1_RTX (mode))
2699 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2700 /* x/-1 is -x. */
2701 if (trueop1 == constm1_rtx)
2703 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2704 return simplify_gen_unary (NEG, mode, x, mode);
2707 break;
2709 case UMOD:
2710 /* 0%x is 0 (or x&0 if x has side-effects). */
2711 if (trueop0 == CONST0_RTX (mode))
2713 if (side_effects_p (op1))
2714 return simplify_gen_binary (AND, mode, op1, trueop0);
2715 return trueop0;
2717 /* x%1 is 0 (of x&0 if x has side-effects). */
2718 if (trueop1 == CONST1_RTX (mode))
2720 if (side_effects_p (op0))
2721 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2722 return CONST0_RTX (mode);
2724 /* Implement modulus by power of two as AND. */
2725 if (CONST_INT_P (trueop1)
2726 && exact_log2 (INTVAL (trueop1)) > 0)
2727 return simplify_gen_binary (AND, mode, op0,
2728 GEN_INT (INTVAL (op1) - 1));
2729 break;
2731 case MOD:
2732 /* 0%x is 0 (or x&0 if x has side-effects). */
2733 if (trueop0 == CONST0_RTX (mode))
2735 if (side_effects_p (op1))
2736 return simplify_gen_binary (AND, mode, op1, trueop0);
2737 return trueop0;
2739 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2740 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2742 if (side_effects_p (op0))
2743 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2744 return CONST0_RTX (mode);
2746 break;
2748 case ROTATERT:
2749 case ROTATE:
2750 case ASHIFTRT:
2751 if (trueop1 == CONST0_RTX (mode))
2752 return op0;
2753 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2754 return op0;
2755 /* Rotating ~0 always results in ~0. */
2756 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
2757 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2758 && ! side_effects_p (op1))
2759 return op0;
2760 canonicalize_shift:
2761 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
2763 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
2764 if (val != INTVAL (op1))
2765 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
2767 break;
2769 case ASHIFT:
2770 case SS_ASHIFT:
2771 case US_ASHIFT:
2772 if (trueop1 == CONST0_RTX (mode))
2773 return op0;
2774 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2775 return op0;
2776 goto canonicalize_shift;
2778 case LSHIFTRT:
2779 if (trueop1 == CONST0_RTX (mode))
2780 return op0;
2781 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2782 return op0;
2783 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2784 if (GET_CODE (op0) == CLZ
2785 && CONST_INT_P (trueop1)
2786 && STORE_FLAG_VALUE == 1
2787 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2789 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2790 unsigned HOST_WIDE_INT zero_val = 0;
2792 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2793 && zero_val == GET_MODE_BITSIZE (imode)
2794 && INTVAL (trueop1) == exact_log2 (zero_val))
2795 return simplify_gen_relational (EQ, mode, imode,
2796 XEXP (op0, 0), const0_rtx);
2798 goto canonicalize_shift;
2800 case SMIN:
2801 if (width <= HOST_BITS_PER_WIDE_INT
2802 && CONST_INT_P (trueop1)
2803 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2804 && ! side_effects_p (op0))
2805 return op1;
2806 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2807 return op0;
2808 tem = simplify_associative_operation (code, mode, op0, op1);
2809 if (tem)
2810 return tem;
2811 break;
2813 case SMAX:
2814 if (width <= HOST_BITS_PER_WIDE_INT
2815 && CONST_INT_P (trueop1)
2816 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2817 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2818 && ! side_effects_p (op0))
2819 return op1;
2820 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2821 return op0;
2822 tem = simplify_associative_operation (code, mode, op0, op1);
2823 if (tem)
2824 return tem;
2825 break;
2827 case UMIN:
2828 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2829 return op1;
2830 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2831 return op0;
2832 tem = simplify_associative_operation (code, mode, op0, op1);
2833 if (tem)
2834 return tem;
2835 break;
2837 case UMAX:
2838 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2839 return op1;
2840 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2841 return op0;
2842 tem = simplify_associative_operation (code, mode, op0, op1);
2843 if (tem)
2844 return tem;
2845 break;
2847 case SS_PLUS:
2848 case US_PLUS:
2849 case SS_MINUS:
2850 case US_MINUS:
2851 case SS_MULT:
2852 case US_MULT:
2853 case SS_DIV:
2854 case US_DIV:
2855 /* ??? There are simplifications that can be done. */
2856 return 0;
2858 case VEC_SELECT:
2859 if (!VECTOR_MODE_P (mode))
2861 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2862 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2863 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2864 gcc_assert (XVECLEN (trueop1, 0) == 1);
2865 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
2867 if (GET_CODE (trueop0) == CONST_VECTOR)
2868 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2869 (trueop1, 0, 0)));
2871 /* Extract a scalar element from a nested VEC_SELECT expression
2872 (with optional nested VEC_CONCAT expression). Some targets
2873 (i386) extract scalar element from a vector using chain of
2874 nested VEC_SELECT expressions. When input operand is a memory
2875 operand, this operation can be simplified to a simple scalar
2876 load from an offseted memory address. */
2877 if (GET_CODE (trueop0) == VEC_SELECT)
2879 rtx op0 = XEXP (trueop0, 0);
2880 rtx op1 = XEXP (trueop0, 1);
2882 enum machine_mode opmode = GET_MODE (op0);
2883 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
2884 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
2886 int i = INTVAL (XVECEXP (trueop1, 0, 0));
2887 int elem;
2889 rtvec vec;
2890 rtx tmp_op, tmp;
2892 gcc_assert (GET_CODE (op1) == PARALLEL);
2893 gcc_assert (i < n_elts);
2895 /* Select element, pointed by nested selector. */
2896 elem = INTVAL (XVECEXP (op1, 0, i));
2898 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
2899 if (GET_CODE (op0) == VEC_CONCAT)
2901 rtx op00 = XEXP (op0, 0);
2902 rtx op01 = XEXP (op0, 1);
2904 enum machine_mode mode00, mode01;
2905 int n_elts00, n_elts01;
2907 mode00 = GET_MODE (op00);
2908 mode01 = GET_MODE (op01);
2910 /* Find out number of elements of each operand. */
2911 if (VECTOR_MODE_P (mode00))
2913 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
2914 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
2916 else
2917 n_elts00 = 1;
2919 if (VECTOR_MODE_P (mode01))
2921 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
2922 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
2924 else
2925 n_elts01 = 1;
2927 gcc_assert (n_elts == n_elts00 + n_elts01);
2929 /* Select correct operand of VEC_CONCAT
2930 and adjust selector. */
2931 if (elem < n_elts01)
2932 tmp_op = op00;
2933 else
2935 tmp_op = op01;
2936 elem -= n_elts00;
2939 else
2940 tmp_op = op0;
2942 vec = rtvec_alloc (1);
2943 RTVEC_ELT (vec, 0) = GEN_INT (elem);
2945 tmp = gen_rtx_fmt_ee (code, mode,
2946 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
2947 return tmp;
2949 if (GET_CODE (trueop0) == VEC_DUPLICATE
2950 && GET_MODE (XEXP (trueop0, 0)) == mode)
2951 return XEXP (trueop0, 0);
2953 else
2955 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2956 gcc_assert (GET_MODE_INNER (mode)
2957 == GET_MODE_INNER (GET_MODE (trueop0)));
2958 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2960 if (GET_CODE (trueop0) == CONST_VECTOR)
2962 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2963 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2964 rtvec v = rtvec_alloc (n_elts);
2965 unsigned int i;
2967 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2968 for (i = 0; i < n_elts; i++)
2970 rtx x = XVECEXP (trueop1, 0, i);
2972 gcc_assert (CONST_INT_P (x));
2973 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2974 INTVAL (x));
2977 return gen_rtx_CONST_VECTOR (mode, v);
2981 if (XVECLEN (trueop1, 0) == 1
2982 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
2983 && GET_CODE (trueop0) == VEC_CONCAT)
2985 rtx vec = trueop0;
2986 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2988 /* Try to find the element in the VEC_CONCAT. */
2989 while (GET_MODE (vec) != mode
2990 && GET_CODE (vec) == VEC_CONCAT)
2992 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2993 if (offset < vec_size)
2994 vec = XEXP (vec, 0);
2995 else
2997 offset -= vec_size;
2998 vec = XEXP (vec, 1);
3000 vec = avoid_constant_pool_reference (vec);
3003 if (GET_MODE (vec) == mode)
3004 return vec;
3007 return 0;
3008 case VEC_CONCAT:
3010 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3011 ? GET_MODE (trueop0)
3012 : GET_MODE_INNER (mode));
3013 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3014 ? GET_MODE (trueop1)
3015 : GET_MODE_INNER (mode));
3017 gcc_assert (VECTOR_MODE_P (mode));
3018 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3019 == GET_MODE_SIZE (mode));
3021 if (VECTOR_MODE_P (op0_mode))
3022 gcc_assert (GET_MODE_INNER (mode)
3023 == GET_MODE_INNER (op0_mode));
3024 else
3025 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3027 if (VECTOR_MODE_P (op1_mode))
3028 gcc_assert (GET_MODE_INNER (mode)
3029 == GET_MODE_INNER (op1_mode));
3030 else
3031 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3033 if ((GET_CODE (trueop0) == CONST_VECTOR
3034 || CONST_INT_P (trueop0)
3035 || GET_CODE (trueop0) == CONST_DOUBLE)
3036 && (GET_CODE (trueop1) == CONST_VECTOR
3037 || CONST_INT_P (trueop1)
3038 || GET_CODE (trueop1) == CONST_DOUBLE))
3040 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3041 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3042 rtvec v = rtvec_alloc (n_elts);
3043 unsigned int i;
3044 unsigned in_n_elts = 1;
3046 if (VECTOR_MODE_P (op0_mode))
3047 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3048 for (i = 0; i < n_elts; i++)
3050 if (i < in_n_elts)
3052 if (!VECTOR_MODE_P (op0_mode))
3053 RTVEC_ELT (v, i) = trueop0;
3054 else
3055 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3057 else
3059 if (!VECTOR_MODE_P (op1_mode))
3060 RTVEC_ELT (v, i) = trueop1;
3061 else
3062 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3063 i - in_n_elts);
3067 return gen_rtx_CONST_VECTOR (mode, v);
3070 return 0;
3072 default:
3073 gcc_unreachable ();
3076 return 0;
3080 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3081 rtx op0, rtx op1)
3083 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3084 HOST_WIDE_INT val;
3085 unsigned int width = GET_MODE_BITSIZE (mode);
3087 if (VECTOR_MODE_P (mode)
3088 && code != VEC_CONCAT
3089 && GET_CODE (op0) == CONST_VECTOR
3090 && GET_CODE (op1) == CONST_VECTOR)
3092 unsigned n_elts = GET_MODE_NUNITS (mode);
3093 enum machine_mode op0mode = GET_MODE (op0);
3094 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3095 enum machine_mode op1mode = GET_MODE (op1);
3096 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3097 rtvec v = rtvec_alloc (n_elts);
3098 unsigned int i;
3100 gcc_assert (op0_n_elts == n_elts);
3101 gcc_assert (op1_n_elts == n_elts);
3102 for (i = 0; i < n_elts; i++)
3104 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3105 CONST_VECTOR_ELT (op0, i),
3106 CONST_VECTOR_ELT (op1, i));
3107 if (!x)
3108 return 0;
3109 RTVEC_ELT (v, i) = x;
3112 return gen_rtx_CONST_VECTOR (mode, v);
3115 if (VECTOR_MODE_P (mode)
3116 && code == VEC_CONCAT
3117 && (CONST_INT_P (op0)
3118 || GET_CODE (op0) == CONST_DOUBLE
3119 || GET_CODE (op0) == CONST_FIXED)
3120 && (CONST_INT_P (op1)
3121 || GET_CODE (op1) == CONST_DOUBLE
3122 || GET_CODE (op1) == CONST_FIXED))
3124 unsigned n_elts = GET_MODE_NUNITS (mode);
3125 rtvec v = rtvec_alloc (n_elts);
3127 gcc_assert (n_elts >= 2);
3128 if (n_elts == 2)
3130 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3131 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3133 RTVEC_ELT (v, 0) = op0;
3134 RTVEC_ELT (v, 1) = op1;
3136 else
3138 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3139 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3140 unsigned i;
3142 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3143 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3144 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3146 for (i = 0; i < op0_n_elts; ++i)
3147 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3148 for (i = 0; i < op1_n_elts; ++i)
3149 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3152 return gen_rtx_CONST_VECTOR (mode, v);
3155 if (SCALAR_FLOAT_MODE_P (mode)
3156 && GET_CODE (op0) == CONST_DOUBLE
3157 && GET_CODE (op1) == CONST_DOUBLE
3158 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3160 if (code == AND
3161 || code == IOR
3162 || code == XOR)
3164 long tmp0[4];
3165 long tmp1[4];
3166 REAL_VALUE_TYPE r;
3167 int i;
3169 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3170 GET_MODE (op0));
3171 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3172 GET_MODE (op1));
3173 for (i = 0; i < 4; i++)
3175 switch (code)
3177 case AND:
3178 tmp0[i] &= tmp1[i];
3179 break;
3180 case IOR:
3181 tmp0[i] |= tmp1[i];
3182 break;
3183 case XOR:
3184 tmp0[i] ^= tmp1[i];
3185 break;
3186 default:
3187 gcc_unreachable ();
3190 real_from_target (&r, tmp0, mode);
3191 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3193 else
3195 REAL_VALUE_TYPE f0, f1, value, result;
3196 bool inexact;
3198 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3199 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3200 real_convert (&f0, mode, &f0);
3201 real_convert (&f1, mode, &f1);
3203 if (HONOR_SNANS (mode)
3204 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3205 return 0;
3207 if (code == DIV
3208 && REAL_VALUES_EQUAL (f1, dconst0)
3209 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3210 return 0;
3212 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3213 && flag_trapping_math
3214 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3216 int s0 = REAL_VALUE_NEGATIVE (f0);
3217 int s1 = REAL_VALUE_NEGATIVE (f1);
3219 switch (code)
3221 case PLUS:
3222 /* Inf + -Inf = NaN plus exception. */
3223 if (s0 != s1)
3224 return 0;
3225 break;
3226 case MINUS:
3227 /* Inf - Inf = NaN plus exception. */
3228 if (s0 == s1)
3229 return 0;
3230 break;
3231 case DIV:
3232 /* Inf / Inf = NaN plus exception. */
3233 return 0;
3234 default:
3235 break;
3239 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3240 && flag_trapping_math
3241 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3242 || (REAL_VALUE_ISINF (f1)
3243 && REAL_VALUES_EQUAL (f0, dconst0))))
3244 /* Inf * 0 = NaN plus exception. */
3245 return 0;
3247 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3248 &f0, &f1);
3249 real_convert (&result, mode, &value);
3251 /* Don't constant fold this floating point operation if
3252 the result has overflowed and flag_trapping_math. */
3254 if (flag_trapping_math
3255 && MODE_HAS_INFINITIES (mode)
3256 && REAL_VALUE_ISINF (result)
3257 && !REAL_VALUE_ISINF (f0)
3258 && !REAL_VALUE_ISINF (f1))
3259 /* Overflow plus exception. */
3260 return 0;
3262 /* Don't constant fold this floating point operation if the
3263 result may dependent upon the run-time rounding mode and
3264 flag_rounding_math is set, or if GCC's software emulation
3265 is unable to accurately represent the result. */
3267 if ((flag_rounding_math
3268 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3269 && (inexact || !real_identical (&result, &value)))
3270 return NULL_RTX;
3272 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3276 /* We can fold some multi-word operations. */
3277 if (GET_MODE_CLASS (mode) == MODE_INT
3278 && width == HOST_BITS_PER_WIDE_INT * 2
3279 && (GET_CODE (op0) == CONST_DOUBLE || CONST_INT_P (op0))
3280 && (GET_CODE (op1) == CONST_DOUBLE || CONST_INT_P (op1)))
3282 unsigned HOST_WIDE_INT l1, l2, lv, lt;
3283 HOST_WIDE_INT h1, h2, hv, ht;
3285 if (GET_CODE (op0) == CONST_DOUBLE)
3286 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
3287 else
3288 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
3290 if (GET_CODE (op1) == CONST_DOUBLE)
3291 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
3292 else
3293 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
3295 switch (code)
3297 case MINUS:
3298 /* A - B == A + (-B). */
3299 neg_double (l2, h2, &lv, &hv);
3300 l2 = lv, h2 = hv;
3302 /* Fall through.... */
3304 case PLUS:
3305 add_double (l1, h1, l2, h2, &lv, &hv);
3306 break;
3308 case MULT:
3309 mul_double (l1, h1, l2, h2, &lv, &hv);
3310 break;
3312 case DIV:
3313 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3314 &lv, &hv, &lt, &ht))
3315 return 0;
3316 break;
3318 case MOD:
3319 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3320 &lt, &ht, &lv, &hv))
3321 return 0;
3322 break;
3324 case UDIV:
3325 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3326 &lv, &hv, &lt, &ht))
3327 return 0;
3328 break;
3330 case UMOD:
3331 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3332 &lt, &ht, &lv, &hv))
3333 return 0;
3334 break;
3336 case AND:
3337 lv = l1 & l2, hv = h1 & h2;
3338 break;
3340 case IOR:
3341 lv = l1 | l2, hv = h1 | h2;
3342 break;
3344 case XOR:
3345 lv = l1 ^ l2, hv = h1 ^ h2;
3346 break;
3348 case SMIN:
3349 if (h1 < h2
3350 || (h1 == h2
3351 && ((unsigned HOST_WIDE_INT) l1
3352 < (unsigned HOST_WIDE_INT) l2)))
3353 lv = l1, hv = h1;
3354 else
3355 lv = l2, hv = h2;
3356 break;
3358 case SMAX:
3359 if (h1 > h2
3360 || (h1 == h2
3361 && ((unsigned HOST_WIDE_INT) l1
3362 > (unsigned HOST_WIDE_INT) l2)))
3363 lv = l1, hv = h1;
3364 else
3365 lv = l2, hv = h2;
3366 break;
3368 case UMIN:
3369 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
3370 || (h1 == h2
3371 && ((unsigned HOST_WIDE_INT) l1
3372 < (unsigned HOST_WIDE_INT) l2)))
3373 lv = l1, hv = h1;
3374 else
3375 lv = l2, hv = h2;
3376 break;
3378 case UMAX:
3379 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
3380 || (h1 == h2
3381 && ((unsigned HOST_WIDE_INT) l1
3382 > (unsigned HOST_WIDE_INT) l2)))
3383 lv = l1, hv = h1;
3384 else
3385 lv = l2, hv = h2;
3386 break;
3388 case LSHIFTRT: case ASHIFTRT:
3389 case ASHIFT:
3390 case ROTATE: case ROTATERT:
3391 if (SHIFT_COUNT_TRUNCATED)
3392 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
3394 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
3395 return 0;
3397 if (code == LSHIFTRT || code == ASHIFTRT)
3398 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
3399 code == ASHIFTRT);
3400 else if (code == ASHIFT)
3401 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
3402 else if (code == ROTATE)
3403 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3404 else /* code == ROTATERT */
3405 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3406 break;
3408 default:
3409 return 0;
3412 return immed_double_const (lv, hv, mode);
3415 if (CONST_INT_P (op0) && CONST_INT_P (op1)
3416 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3418 /* Get the integer argument values in two forms:
3419 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3421 arg0 = INTVAL (op0);
3422 arg1 = INTVAL (op1);
3424 if (width < HOST_BITS_PER_WIDE_INT)
3426 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
3427 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
3429 arg0s = arg0;
3430 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3431 arg0s |= ((HOST_WIDE_INT) (-1) << width);
3433 arg1s = arg1;
3434 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3435 arg1s |= ((HOST_WIDE_INT) (-1) << width);
3437 else
3439 arg0s = arg0;
3440 arg1s = arg1;
3443 /* Compute the value of the arithmetic. */
3445 switch (code)
3447 case PLUS:
3448 val = arg0s + arg1s;
3449 break;
3451 case MINUS:
3452 val = arg0s - arg1s;
3453 break;
3455 case MULT:
3456 val = arg0s * arg1s;
3457 break;
3459 case DIV:
3460 if (arg1s == 0
3461 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3462 && arg1s == -1))
3463 return 0;
3464 val = arg0s / arg1s;
3465 break;
3467 case MOD:
3468 if (arg1s == 0
3469 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3470 && arg1s == -1))
3471 return 0;
3472 val = arg0s % arg1s;
3473 break;
3475 case UDIV:
3476 if (arg1 == 0
3477 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3478 && arg1s == -1))
3479 return 0;
3480 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3481 break;
3483 case UMOD:
3484 if (arg1 == 0
3485 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3486 && arg1s == -1))
3487 return 0;
3488 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3489 break;
3491 case AND:
3492 val = arg0 & arg1;
3493 break;
3495 case IOR:
3496 val = arg0 | arg1;
3497 break;
3499 case XOR:
3500 val = arg0 ^ arg1;
3501 break;
3503 case LSHIFTRT:
3504 case ASHIFT:
3505 case ASHIFTRT:
3506 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3507 the value is in range. We can't return any old value for
3508 out-of-range arguments because either the middle-end (via
3509 shift_truncation_mask) or the back-end might be relying on
3510 target-specific knowledge. Nor can we rely on
3511 shift_truncation_mask, since the shift might not be part of an
3512 ashlM3, lshrM3 or ashrM3 instruction. */
3513 if (SHIFT_COUNT_TRUNCATED)
3514 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3515 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3516 return 0;
3518 val = (code == ASHIFT
3519 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3520 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3522 /* Sign-extend the result for arithmetic right shifts. */
3523 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3524 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3525 break;
3527 case ROTATERT:
3528 if (arg1 < 0)
3529 return 0;
3531 arg1 %= width;
3532 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3533 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3534 break;
3536 case ROTATE:
3537 if (arg1 < 0)
3538 return 0;
3540 arg1 %= width;
3541 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3542 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3543 break;
3545 case COMPARE:
3546 /* Do nothing here. */
3547 return 0;
3549 case SMIN:
3550 val = arg0s <= arg1s ? arg0s : arg1s;
3551 break;
3553 case UMIN:
3554 val = ((unsigned HOST_WIDE_INT) arg0
3555 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3556 break;
3558 case SMAX:
3559 val = arg0s > arg1s ? arg0s : arg1s;
3560 break;
3562 case UMAX:
3563 val = ((unsigned HOST_WIDE_INT) arg0
3564 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3565 break;
3567 case SS_PLUS:
3568 case US_PLUS:
3569 case SS_MINUS:
3570 case US_MINUS:
3571 case SS_MULT:
3572 case US_MULT:
3573 case SS_DIV:
3574 case US_DIV:
3575 case SS_ASHIFT:
3576 case US_ASHIFT:
3577 /* ??? There are simplifications that can be done. */
3578 return 0;
3580 default:
3581 gcc_unreachable ();
3584 return gen_int_mode (val, mode);
3587 return NULL_RTX;
3592 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3593 PLUS or MINUS.
3595 Rather than test for specific case, we do this by a brute-force method
3596 and do all possible simplifications until no more changes occur. Then
3597 we rebuild the operation. */
3599 struct simplify_plus_minus_op_data
3601 rtx op;
3602 short neg;
3605 static bool
3606 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3608 int result;
3610 result = (commutative_operand_precedence (y)
3611 - commutative_operand_precedence (x));
3612 if (result)
3613 return result > 0;
3615 /* Group together equal REGs to do more simplification. */
3616 if (REG_P (x) && REG_P (y))
3617 return REGNO (x) > REGNO (y);
3618 else
3619 return false;
3622 static rtx
3623 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3624 rtx op1)
3626 struct simplify_plus_minus_op_data ops[8];
3627 rtx result, tem;
3628 int n_ops = 2, input_ops = 2;
3629 int changed, n_constants = 0, canonicalized = 0;
3630 int i, j;
3632 memset (ops, 0, sizeof ops);
3634 /* Set up the two operands and then expand them until nothing has been
3635 changed. If we run out of room in our array, give up; this should
3636 almost never happen. */
3638 ops[0].op = op0;
3639 ops[0].neg = 0;
3640 ops[1].op = op1;
3641 ops[1].neg = (code == MINUS);
3645 changed = 0;
3647 for (i = 0; i < n_ops; i++)
3649 rtx this_op = ops[i].op;
3650 int this_neg = ops[i].neg;
3651 enum rtx_code this_code = GET_CODE (this_op);
3653 switch (this_code)
3655 case PLUS:
3656 case MINUS:
3657 if (n_ops == 7)
3658 return NULL_RTX;
3660 ops[n_ops].op = XEXP (this_op, 1);
3661 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3662 n_ops++;
3664 ops[i].op = XEXP (this_op, 0);
3665 input_ops++;
3666 changed = 1;
3667 canonicalized |= this_neg;
3668 break;
3670 case NEG:
3671 ops[i].op = XEXP (this_op, 0);
3672 ops[i].neg = ! this_neg;
3673 changed = 1;
3674 canonicalized = 1;
3675 break;
3677 case CONST:
3678 if (n_ops < 7
3679 && GET_CODE (XEXP (this_op, 0)) == PLUS
3680 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3681 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3683 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3684 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3685 ops[n_ops].neg = this_neg;
3686 n_ops++;
3687 changed = 1;
3688 canonicalized = 1;
3690 break;
3692 case NOT:
3693 /* ~a -> (-a - 1) */
3694 if (n_ops != 7)
3696 ops[n_ops].op = constm1_rtx;
3697 ops[n_ops++].neg = this_neg;
3698 ops[i].op = XEXP (this_op, 0);
3699 ops[i].neg = !this_neg;
3700 changed = 1;
3701 canonicalized = 1;
3703 break;
3705 case CONST_INT:
3706 n_constants++;
3707 if (this_neg)
3709 ops[i].op = neg_const_int (mode, this_op);
3710 ops[i].neg = 0;
3711 changed = 1;
3712 canonicalized = 1;
3714 break;
3716 default:
3717 break;
3721 while (changed);
3723 if (n_constants > 1)
3724 canonicalized = 1;
3726 gcc_assert (n_ops >= 2);
3728 /* If we only have two operands, we can avoid the loops. */
3729 if (n_ops == 2)
3731 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3732 rtx lhs, rhs;
3734 /* Get the two operands. Be careful with the order, especially for
3735 the cases where code == MINUS. */
3736 if (ops[0].neg && ops[1].neg)
3738 lhs = gen_rtx_NEG (mode, ops[0].op);
3739 rhs = ops[1].op;
3741 else if (ops[0].neg)
3743 lhs = ops[1].op;
3744 rhs = ops[0].op;
3746 else
3748 lhs = ops[0].op;
3749 rhs = ops[1].op;
3752 return simplify_const_binary_operation (code, mode, lhs, rhs);
3755 /* Now simplify each pair of operands until nothing changes. */
3758 /* Insertion sort is good enough for an eight-element array. */
3759 for (i = 1; i < n_ops; i++)
3761 struct simplify_plus_minus_op_data save;
3762 j = i - 1;
3763 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
3764 continue;
3766 canonicalized = 1;
3767 save = ops[i];
3769 ops[j + 1] = ops[j];
3770 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
3771 ops[j + 1] = save;
3774 changed = 0;
3775 for (i = n_ops - 1; i > 0; i--)
3776 for (j = i - 1; j >= 0; j--)
3778 rtx lhs = ops[j].op, rhs = ops[i].op;
3779 int lneg = ops[j].neg, rneg = ops[i].neg;
3781 if (lhs != 0 && rhs != 0)
3783 enum rtx_code ncode = PLUS;
3785 if (lneg != rneg)
3787 ncode = MINUS;
3788 if (lneg)
3789 tem = lhs, lhs = rhs, rhs = tem;
3791 else if (swap_commutative_operands_p (lhs, rhs))
3792 tem = lhs, lhs = rhs, rhs = tem;
3794 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
3795 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
3797 rtx tem_lhs, tem_rhs;
3799 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3800 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3801 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3803 if (tem && !CONSTANT_P (tem))
3804 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3806 else
3807 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3809 /* Reject "simplifications" that just wrap the two
3810 arguments in a CONST. Failure to do so can result
3811 in infinite recursion with simplify_binary_operation
3812 when it calls us to simplify CONST operations. */
3813 if (tem
3814 && ! (GET_CODE (tem) == CONST
3815 && GET_CODE (XEXP (tem, 0)) == ncode
3816 && XEXP (XEXP (tem, 0), 0) == lhs
3817 && XEXP (XEXP (tem, 0), 1) == rhs))
3819 lneg &= rneg;
3820 if (GET_CODE (tem) == NEG)
3821 tem = XEXP (tem, 0), lneg = !lneg;
3822 if (CONST_INT_P (tem) && lneg)
3823 tem = neg_const_int (mode, tem), lneg = 0;
3825 ops[i].op = tem;
3826 ops[i].neg = lneg;
3827 ops[j].op = NULL_RTX;
3828 changed = 1;
3829 canonicalized = 1;
3834 /* If nothing changed, fail. */
3835 if (!canonicalized)
3836 return NULL_RTX;
3838 /* Pack all the operands to the lower-numbered entries. */
3839 for (i = 0, j = 0; j < n_ops; j++)
3840 if (ops[j].op)
3842 ops[i] = ops[j];
3843 i++;
3845 n_ops = i;
3847 while (changed);
3849 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3850 if (n_ops == 2
3851 && CONST_INT_P (ops[1].op)
3852 && CONSTANT_P (ops[0].op)
3853 && ops[0].neg)
3854 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3856 /* We suppressed creation of trivial CONST expressions in the
3857 combination loop to avoid recursion. Create one manually now.
3858 The combination loop should have ensured that there is exactly
3859 one CONST_INT, and the sort will have ensured that it is last
3860 in the array and that any other constant will be next-to-last. */
3862 if (n_ops > 1
3863 && CONST_INT_P (ops[n_ops - 1].op)
3864 && CONSTANT_P (ops[n_ops - 2].op))
3866 rtx value = ops[n_ops - 1].op;
3867 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3868 value = neg_const_int (mode, value);
3869 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3870 n_ops--;
3873 /* Put a non-negated operand first, if possible. */
3875 for (i = 0; i < n_ops && ops[i].neg; i++)
3876 continue;
3877 if (i == n_ops)
3878 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3879 else if (i != 0)
3881 tem = ops[0].op;
3882 ops[0] = ops[i];
3883 ops[i].op = tem;
3884 ops[i].neg = 1;
3887 /* Now make the result by performing the requested operations. */
3888 result = ops[0].op;
3889 for (i = 1; i < n_ops; i++)
3890 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3891 mode, result, ops[i].op);
3893 return result;
3896 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3897 static bool
3898 plus_minus_operand_p (const_rtx x)
3900 return GET_CODE (x) == PLUS
3901 || GET_CODE (x) == MINUS
3902 || (GET_CODE (x) == CONST
3903 && GET_CODE (XEXP (x, 0)) == PLUS
3904 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3905 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3908 /* Like simplify_binary_operation except used for relational operators.
3909 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3910 not also be VOIDmode.
3912 CMP_MODE specifies in which mode the comparison is done in, so it is
3913 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3914 the operands or, if both are VOIDmode, the operands are compared in
3915 "infinite precision". */
3917 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3918 enum machine_mode cmp_mode, rtx op0, rtx op1)
3920 rtx tem, trueop0, trueop1;
3922 if (cmp_mode == VOIDmode)
3923 cmp_mode = GET_MODE (op0);
3924 if (cmp_mode == VOIDmode)
3925 cmp_mode = GET_MODE (op1);
3927 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3928 if (tem)
3930 if (SCALAR_FLOAT_MODE_P (mode))
3932 if (tem == const0_rtx)
3933 return CONST0_RTX (mode);
3934 #ifdef FLOAT_STORE_FLAG_VALUE
3936 REAL_VALUE_TYPE val;
3937 val = FLOAT_STORE_FLAG_VALUE (mode);
3938 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3940 #else
3941 return NULL_RTX;
3942 #endif
3944 if (VECTOR_MODE_P (mode))
3946 if (tem == const0_rtx)
3947 return CONST0_RTX (mode);
3948 #ifdef VECTOR_STORE_FLAG_VALUE
3950 int i, units;
3951 rtvec v;
3953 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3954 if (val == NULL_RTX)
3955 return NULL_RTX;
3956 if (val == const1_rtx)
3957 return CONST1_RTX (mode);
3959 units = GET_MODE_NUNITS (mode);
3960 v = rtvec_alloc (units);
3961 for (i = 0; i < units; i++)
3962 RTVEC_ELT (v, i) = val;
3963 return gen_rtx_raw_CONST_VECTOR (mode, v);
3965 #else
3966 return NULL_RTX;
3967 #endif
3970 return tem;
3973 /* For the following tests, ensure const0_rtx is op1. */
3974 if (swap_commutative_operands_p (op0, op1)
3975 || (op0 == const0_rtx && op1 != const0_rtx))
3976 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3978 /* If op0 is a compare, extract the comparison arguments from it. */
3979 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3980 return simplify_gen_relational (code, mode, VOIDmode,
3981 XEXP (op0, 0), XEXP (op0, 1));
3983 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
3984 || CC0_P (op0))
3985 return NULL_RTX;
3987 trueop0 = avoid_constant_pool_reference (op0);
3988 trueop1 = avoid_constant_pool_reference (op1);
3989 return simplify_relational_operation_1 (code, mode, cmp_mode,
3990 trueop0, trueop1);
3993 /* This part of simplify_relational_operation is only used when CMP_MODE
3994 is not in class MODE_CC (i.e. it is a real comparison).
3996 MODE is the mode of the result, while CMP_MODE specifies in which
3997 mode the comparison is done in, so it is the mode of the operands. */
3999 static rtx
4000 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
4001 enum machine_mode cmp_mode, rtx op0, rtx op1)
4003 enum rtx_code op0code = GET_CODE (op0);
4005 if (op1 == const0_rtx && COMPARISON_P (op0))
4007 /* If op0 is a comparison, extract the comparison arguments
4008 from it. */
4009 if (code == NE)
4011 if (GET_MODE (op0) == mode)
4012 return simplify_rtx (op0);
4013 else
4014 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4015 XEXP (op0, 0), XEXP (op0, 1));
4017 else if (code == EQ)
4019 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4020 if (new_code != UNKNOWN)
4021 return simplify_gen_relational (new_code, mode, VOIDmode,
4022 XEXP (op0, 0), XEXP (op0, 1));
4026 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4027 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4028 if ((code == LTU || code == GEU)
4029 && GET_CODE (op0) == PLUS
4030 && CONST_INT_P (XEXP (op0, 1))
4031 && (rtx_equal_p (op1, XEXP (op0, 0))
4032 || rtx_equal_p (op1, XEXP (op0, 1))))
4034 rtx new_cmp
4035 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4036 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4037 cmp_mode, XEXP (op0, 0), new_cmp);
4040 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4041 if ((code == LTU || code == GEU)
4042 && GET_CODE (op0) == PLUS
4043 && rtx_equal_p (op1, XEXP (op0, 1))
4044 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4045 && !rtx_equal_p (op1, XEXP (op0, 0)))
4046 return simplify_gen_relational (code, mode, cmp_mode, op0, XEXP (op0, 0));
4048 if (op1 == const0_rtx)
4050 /* Canonicalize (GTU x 0) as (NE x 0). */
4051 if (code == GTU)
4052 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4053 /* Canonicalize (LEU x 0) as (EQ x 0). */
4054 if (code == LEU)
4055 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4057 else if (op1 == const1_rtx)
4059 switch (code)
4061 case GE:
4062 /* Canonicalize (GE x 1) as (GT x 0). */
4063 return simplify_gen_relational (GT, mode, cmp_mode,
4064 op0, const0_rtx);
4065 case GEU:
4066 /* Canonicalize (GEU x 1) as (NE x 0). */
4067 return simplify_gen_relational (NE, mode, cmp_mode,
4068 op0, const0_rtx);
4069 case LT:
4070 /* Canonicalize (LT x 1) as (LE x 0). */
4071 return simplify_gen_relational (LE, mode, cmp_mode,
4072 op0, const0_rtx);
4073 case LTU:
4074 /* Canonicalize (LTU x 1) as (EQ x 0). */
4075 return simplify_gen_relational (EQ, mode, cmp_mode,
4076 op0, const0_rtx);
4077 default:
4078 break;
4081 else if (op1 == constm1_rtx)
4083 /* Canonicalize (LE x -1) as (LT x 0). */
4084 if (code == LE)
4085 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4086 /* Canonicalize (GT x -1) as (GE x 0). */
4087 if (code == GT)
4088 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4091 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4092 if ((code == EQ || code == NE)
4093 && (op0code == PLUS || op0code == MINUS)
4094 && CONSTANT_P (op1)
4095 && CONSTANT_P (XEXP (op0, 1))
4096 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4098 rtx x = XEXP (op0, 0);
4099 rtx c = XEXP (op0, 1);
4101 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
4102 cmp_mode, op1, c);
4103 return simplify_gen_relational (code, mode, cmp_mode, x, c);
4106 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4107 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4108 if (code == NE
4109 && op1 == const0_rtx
4110 && GET_MODE_CLASS (mode) == MODE_INT
4111 && cmp_mode != VOIDmode
4112 /* ??? Work-around BImode bugs in the ia64 backend. */
4113 && mode != BImode
4114 && cmp_mode != BImode
4115 && nonzero_bits (op0, cmp_mode) == 1
4116 && STORE_FLAG_VALUE == 1)
4117 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4118 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4119 : lowpart_subreg (mode, op0, cmp_mode);
4121 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4122 if ((code == EQ || code == NE)
4123 && op1 == const0_rtx
4124 && op0code == XOR)
4125 return simplify_gen_relational (code, mode, cmp_mode,
4126 XEXP (op0, 0), XEXP (op0, 1));
4128 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4129 if ((code == EQ || code == NE)
4130 && op0code == XOR
4131 && rtx_equal_p (XEXP (op0, 0), op1)
4132 && !side_effects_p (XEXP (op0, 0)))
4133 return simplify_gen_relational (code, mode, cmp_mode,
4134 XEXP (op0, 1), const0_rtx);
4136 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4137 if ((code == EQ || code == NE)
4138 && op0code == XOR
4139 && rtx_equal_p (XEXP (op0, 1), op1)
4140 && !side_effects_p (XEXP (op0, 1)))
4141 return simplify_gen_relational (code, mode, cmp_mode,
4142 XEXP (op0, 0), const0_rtx);
4144 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4145 if ((code == EQ || code == NE)
4146 && op0code == XOR
4147 && (CONST_INT_P (op1)
4148 || GET_CODE (op1) == CONST_DOUBLE)
4149 && (CONST_INT_P (XEXP (op0, 1))
4150 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
4151 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4152 simplify_gen_binary (XOR, cmp_mode,
4153 XEXP (op0, 1), op1));
4155 if (op0code == POPCOUNT && op1 == const0_rtx)
4156 switch (code)
4158 case EQ:
4159 case LE:
4160 case LEU:
4161 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4162 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4163 XEXP (op0, 0), const0_rtx);
4165 case NE:
4166 case GT:
4167 case GTU:
4168 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4169 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4170 XEXP (op0, 0), const0_rtx);
4172 default:
4173 break;
4176 return NULL_RTX;
4179 enum
4181 CMP_EQ = 1,
4182 CMP_LT = 2,
4183 CMP_GT = 4,
4184 CMP_LTU = 8,
4185 CMP_GTU = 16
4189 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4190 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4191 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4192 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4193 For floating-point comparisons, assume that the operands were ordered. */
4195 static rtx
4196 comparison_result (enum rtx_code code, int known_results)
4198 switch (code)
4200 case EQ:
4201 case UNEQ:
4202 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4203 case NE:
4204 case LTGT:
4205 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4207 case LT:
4208 case UNLT:
4209 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4210 case GE:
4211 case UNGE:
4212 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4214 case GT:
4215 case UNGT:
4216 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4217 case LE:
4218 case UNLE:
4219 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4221 case LTU:
4222 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4223 case GEU:
4224 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4226 case GTU:
4227 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4228 case LEU:
4229 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4231 case ORDERED:
4232 return const_true_rtx;
4233 case UNORDERED:
4234 return const0_rtx;
4235 default:
4236 gcc_unreachable ();
4240 /* Check if the given comparison (done in the given MODE) is actually a
4241 tautology or a contradiction.
4242 If no simplification is possible, this function returns zero.
4243 Otherwise, it returns either const_true_rtx or const0_rtx. */
4246 simplify_const_relational_operation (enum rtx_code code,
4247 enum machine_mode mode,
4248 rtx op0, rtx op1)
4250 rtx tem;
4251 rtx trueop0;
4252 rtx trueop1;
4254 gcc_assert (mode != VOIDmode
4255 || (GET_MODE (op0) == VOIDmode
4256 && GET_MODE (op1) == VOIDmode));
4258 /* If op0 is a compare, extract the comparison arguments from it. */
4259 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4261 op1 = XEXP (op0, 1);
4262 op0 = XEXP (op0, 0);
4264 if (GET_MODE (op0) != VOIDmode)
4265 mode = GET_MODE (op0);
4266 else if (GET_MODE (op1) != VOIDmode)
4267 mode = GET_MODE (op1);
4268 else
4269 return 0;
4272 /* We can't simplify MODE_CC values since we don't know what the
4273 actual comparison is. */
4274 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4275 return 0;
4277 /* Make sure the constant is second. */
4278 if (swap_commutative_operands_p (op0, op1))
4280 tem = op0, op0 = op1, op1 = tem;
4281 code = swap_condition (code);
4284 trueop0 = avoid_constant_pool_reference (op0);
4285 trueop1 = avoid_constant_pool_reference (op1);
4287 /* For integer comparisons of A and B maybe we can simplify A - B and can
4288 then simplify a comparison of that with zero. If A and B are both either
4289 a register or a CONST_INT, this can't help; testing for these cases will
4290 prevent infinite recursion here and speed things up.
4292 We can only do this for EQ and NE comparisons as otherwise we may
4293 lose or introduce overflow which we cannot disregard as undefined as
4294 we do not know the signedness of the operation on either the left or
4295 the right hand side of the comparison. */
4297 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4298 && (code == EQ || code == NE)
4299 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4300 && (REG_P (op1) || CONST_INT_P (trueop1)))
4301 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4302 /* We cannot do this if tem is a nonzero address. */
4303 && ! nonzero_address_p (tem))
4304 return simplify_const_relational_operation (signed_condition (code),
4305 mode, tem, const0_rtx);
4307 if (! HONOR_NANS (mode) && code == ORDERED)
4308 return const_true_rtx;
4310 if (! HONOR_NANS (mode) && code == UNORDERED)
4311 return const0_rtx;
4313 /* For modes without NaNs, if the two operands are equal, we know the
4314 result except if they have side-effects. Even with NaNs we know
4315 the result of unordered comparisons and, if signaling NaNs are
4316 irrelevant, also the result of LT/GT/LTGT. */
4317 if ((! HONOR_NANS (GET_MODE (trueop0))
4318 || code == UNEQ || code == UNLE || code == UNGE
4319 || ((code == LT || code == GT || code == LTGT)
4320 && ! HONOR_SNANS (GET_MODE (trueop0))))
4321 && rtx_equal_p (trueop0, trueop1)
4322 && ! side_effects_p (trueop0))
4323 return comparison_result (code, CMP_EQ);
4325 /* If the operands are floating-point constants, see if we can fold
4326 the result. */
4327 if (GET_CODE (trueop0) == CONST_DOUBLE
4328 && GET_CODE (trueop1) == CONST_DOUBLE
4329 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4331 REAL_VALUE_TYPE d0, d1;
4333 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4334 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4336 /* Comparisons are unordered iff at least one of the values is NaN. */
4337 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4338 switch (code)
4340 case UNEQ:
4341 case UNLT:
4342 case UNGT:
4343 case UNLE:
4344 case UNGE:
4345 case NE:
4346 case UNORDERED:
4347 return const_true_rtx;
4348 case EQ:
4349 case LT:
4350 case GT:
4351 case LE:
4352 case GE:
4353 case LTGT:
4354 case ORDERED:
4355 return const0_rtx;
4356 default:
4357 return 0;
4360 return comparison_result (code,
4361 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4362 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4365 /* Otherwise, see if the operands are both integers. */
4366 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4367 && (GET_CODE (trueop0) == CONST_DOUBLE
4368 || CONST_INT_P (trueop0))
4369 && (GET_CODE (trueop1) == CONST_DOUBLE
4370 || CONST_INT_P (trueop1)))
4372 int width = GET_MODE_BITSIZE (mode);
4373 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4374 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4376 /* Get the two words comprising each integer constant. */
4377 if (GET_CODE (trueop0) == CONST_DOUBLE)
4379 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4380 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4382 else
4384 l0u = l0s = INTVAL (trueop0);
4385 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4388 if (GET_CODE (trueop1) == CONST_DOUBLE)
4390 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4391 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4393 else
4395 l1u = l1s = INTVAL (trueop1);
4396 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4399 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4400 we have to sign or zero-extend the values. */
4401 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4403 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
4404 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
4406 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
4407 l0s |= ((HOST_WIDE_INT) (-1) << width);
4409 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
4410 l1s |= ((HOST_WIDE_INT) (-1) << width);
4412 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4413 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4415 if (h0u == h1u && l0u == l1u)
4416 return comparison_result (code, CMP_EQ);
4417 else
4419 int cr;
4420 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
4421 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
4422 return comparison_result (code, cr);
4426 /* Optimize comparisons with upper and lower bounds. */
4427 if (SCALAR_INT_MODE_P (mode)
4428 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4429 && CONST_INT_P (trueop1))
4431 int sign;
4432 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4433 HOST_WIDE_INT val = INTVAL (trueop1);
4434 HOST_WIDE_INT mmin, mmax;
4436 if (code == GEU
4437 || code == LEU
4438 || code == GTU
4439 || code == LTU)
4440 sign = 0;
4441 else
4442 sign = 1;
4444 /* Get a reduced range if the sign bit is zero. */
4445 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4447 mmin = 0;
4448 mmax = nonzero;
4450 else
4452 rtx mmin_rtx, mmax_rtx;
4453 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4455 mmin = INTVAL (mmin_rtx);
4456 mmax = INTVAL (mmax_rtx);
4457 if (sign)
4459 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4461 mmin >>= (sign_copies - 1);
4462 mmax >>= (sign_copies - 1);
4466 switch (code)
4468 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4469 case GEU:
4470 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4471 return const_true_rtx;
4472 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4473 return const0_rtx;
4474 break;
4475 case GE:
4476 if (val <= mmin)
4477 return const_true_rtx;
4478 if (val > mmax)
4479 return const0_rtx;
4480 break;
4482 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4483 case LEU:
4484 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4485 return const_true_rtx;
4486 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4487 return const0_rtx;
4488 break;
4489 case LE:
4490 if (val >= mmax)
4491 return const_true_rtx;
4492 if (val < mmin)
4493 return const0_rtx;
4494 break;
4496 case EQ:
4497 /* x == y is always false for y out of range. */
4498 if (val < mmin || val > mmax)
4499 return const0_rtx;
4500 break;
4502 /* x > y is always false for y >= mmax, always true for y < mmin. */
4503 case GTU:
4504 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4505 return const0_rtx;
4506 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4507 return const_true_rtx;
4508 break;
4509 case GT:
4510 if (val >= mmax)
4511 return const0_rtx;
4512 if (val < mmin)
4513 return const_true_rtx;
4514 break;
4516 /* x < y is always false for y <= mmin, always true for y > mmax. */
4517 case LTU:
4518 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4519 return const0_rtx;
4520 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4521 return const_true_rtx;
4522 break;
4523 case LT:
4524 if (val <= mmin)
4525 return const0_rtx;
4526 if (val > mmax)
4527 return const_true_rtx;
4528 break;
4530 case NE:
4531 /* x != y is always true for y out of range. */
4532 if (val < mmin || val > mmax)
4533 return const_true_rtx;
4534 break;
4536 default:
4537 break;
4541 /* Optimize integer comparisons with zero. */
4542 if (trueop1 == const0_rtx)
4544 /* Some addresses are known to be nonzero. We don't know
4545 their sign, but equality comparisons are known. */
4546 if (nonzero_address_p (trueop0))
4548 if (code == EQ || code == LEU)
4549 return const0_rtx;
4550 if (code == NE || code == GTU)
4551 return const_true_rtx;
4554 /* See if the first operand is an IOR with a constant. If so, we
4555 may be able to determine the result of this comparison. */
4556 if (GET_CODE (op0) == IOR)
4558 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
4559 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
4561 int sign_bitnum = GET_MODE_BITSIZE (mode) - 1;
4562 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
4563 && (INTVAL (inner_const)
4564 & ((HOST_WIDE_INT) 1 << sign_bitnum)));
4566 switch (code)
4568 case EQ:
4569 case LEU:
4570 return const0_rtx;
4571 case NE:
4572 case GTU:
4573 return const_true_rtx;
4574 case LT:
4575 case LE:
4576 if (has_sign)
4577 return const_true_rtx;
4578 break;
4579 case GT:
4580 case GE:
4581 if (has_sign)
4582 return const0_rtx;
4583 break;
4584 default:
4585 break;
4591 /* Optimize comparison of ABS with zero. */
4592 if (trueop1 == CONST0_RTX (mode)
4593 && (GET_CODE (trueop0) == ABS
4594 || (GET_CODE (trueop0) == FLOAT_EXTEND
4595 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
4597 switch (code)
4599 case LT:
4600 /* Optimize abs(x) < 0.0. */
4601 if (!HONOR_SNANS (mode)
4602 && (!INTEGRAL_MODE_P (mode)
4603 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4605 if (INTEGRAL_MODE_P (mode)
4606 && (issue_strict_overflow_warning
4607 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4608 warning (OPT_Wstrict_overflow,
4609 ("assuming signed overflow does not occur when "
4610 "assuming abs (x) < 0 is false"));
4611 return const0_rtx;
4613 break;
4615 case GE:
4616 /* Optimize abs(x) >= 0.0. */
4617 if (!HONOR_NANS (mode)
4618 && (!INTEGRAL_MODE_P (mode)
4619 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4621 if (INTEGRAL_MODE_P (mode)
4622 && (issue_strict_overflow_warning
4623 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4624 warning (OPT_Wstrict_overflow,
4625 ("assuming signed overflow does not occur when "
4626 "assuming abs (x) >= 0 is true"));
4627 return const_true_rtx;
4629 break;
4631 case UNGE:
4632 /* Optimize ! (abs(x) < 0.0). */
4633 return const_true_rtx;
4635 default:
4636 break;
4640 return 0;
4643 /* Simplify CODE, an operation with result mode MODE and three operands,
4644 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4645 a constant. Return 0 if no simplifications is possible. */
4648 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4649 enum machine_mode op0_mode, rtx op0, rtx op1,
4650 rtx op2)
4652 unsigned int width = GET_MODE_BITSIZE (mode);
4654 /* VOIDmode means "infinite" precision. */
4655 if (width == 0)
4656 width = HOST_BITS_PER_WIDE_INT;
4658 switch (code)
4660 case SIGN_EXTRACT:
4661 case ZERO_EXTRACT:
4662 if (CONST_INT_P (op0)
4663 && CONST_INT_P (op1)
4664 && CONST_INT_P (op2)
4665 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4666 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4668 /* Extracting a bit-field from a constant */
4669 HOST_WIDE_INT val = INTVAL (op0);
4671 if (BITS_BIG_ENDIAN)
4672 val >>= (GET_MODE_BITSIZE (op0_mode)
4673 - INTVAL (op2) - INTVAL (op1));
4674 else
4675 val >>= INTVAL (op2);
4677 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4679 /* First zero-extend. */
4680 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4681 /* If desired, propagate sign bit. */
4682 if (code == SIGN_EXTRACT
4683 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4684 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4687 /* Clear the bits that don't belong in our mode,
4688 unless they and our sign bit are all one.
4689 So we get either a reasonable negative value or a reasonable
4690 unsigned value for this mode. */
4691 if (width < HOST_BITS_PER_WIDE_INT
4692 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4693 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4694 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4696 return gen_int_mode (val, mode);
4698 break;
4700 case IF_THEN_ELSE:
4701 if (CONST_INT_P (op0))
4702 return op0 != const0_rtx ? op1 : op2;
4704 /* Convert c ? a : a into "a". */
4705 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4706 return op1;
4708 /* Convert a != b ? a : b into "a". */
4709 if (GET_CODE (op0) == NE
4710 && ! side_effects_p (op0)
4711 && ! HONOR_NANS (mode)
4712 && ! HONOR_SIGNED_ZEROS (mode)
4713 && ((rtx_equal_p (XEXP (op0, 0), op1)
4714 && rtx_equal_p (XEXP (op0, 1), op2))
4715 || (rtx_equal_p (XEXP (op0, 0), op2)
4716 && rtx_equal_p (XEXP (op0, 1), op1))))
4717 return op1;
4719 /* Convert a == b ? a : b into "b". */
4720 if (GET_CODE (op0) == EQ
4721 && ! side_effects_p (op0)
4722 && ! HONOR_NANS (mode)
4723 && ! HONOR_SIGNED_ZEROS (mode)
4724 && ((rtx_equal_p (XEXP (op0, 0), op1)
4725 && rtx_equal_p (XEXP (op0, 1), op2))
4726 || (rtx_equal_p (XEXP (op0, 0), op2)
4727 && rtx_equal_p (XEXP (op0, 1), op1))))
4728 return op2;
4730 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4732 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4733 ? GET_MODE (XEXP (op0, 1))
4734 : GET_MODE (XEXP (op0, 0)));
4735 rtx temp;
4737 /* Look for happy constants in op1 and op2. */
4738 if (CONST_INT_P (op1) && CONST_INT_P (op2))
4740 HOST_WIDE_INT t = INTVAL (op1);
4741 HOST_WIDE_INT f = INTVAL (op2);
4743 if (t == STORE_FLAG_VALUE && f == 0)
4744 code = GET_CODE (op0);
4745 else if (t == 0 && f == STORE_FLAG_VALUE)
4747 enum rtx_code tmp;
4748 tmp = reversed_comparison_code (op0, NULL_RTX);
4749 if (tmp == UNKNOWN)
4750 break;
4751 code = tmp;
4753 else
4754 break;
4756 return simplify_gen_relational (code, mode, cmp_mode,
4757 XEXP (op0, 0), XEXP (op0, 1));
4760 if (cmp_mode == VOIDmode)
4761 cmp_mode = op0_mode;
4762 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4763 cmp_mode, XEXP (op0, 0),
4764 XEXP (op0, 1));
4766 /* See if any simplifications were possible. */
4767 if (temp)
4769 if (CONST_INT_P (temp))
4770 return temp == const0_rtx ? op2 : op1;
4771 else if (temp)
4772 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4775 break;
4777 case VEC_MERGE:
4778 gcc_assert (GET_MODE (op0) == mode);
4779 gcc_assert (GET_MODE (op1) == mode);
4780 gcc_assert (VECTOR_MODE_P (mode));
4781 op2 = avoid_constant_pool_reference (op2);
4782 if (CONST_INT_P (op2))
4784 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4785 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4786 int mask = (1 << n_elts) - 1;
4788 if (!(INTVAL (op2) & mask))
4789 return op1;
4790 if ((INTVAL (op2) & mask) == mask)
4791 return op0;
4793 op0 = avoid_constant_pool_reference (op0);
4794 op1 = avoid_constant_pool_reference (op1);
4795 if (GET_CODE (op0) == CONST_VECTOR
4796 && GET_CODE (op1) == CONST_VECTOR)
4798 rtvec v = rtvec_alloc (n_elts);
4799 unsigned int i;
4801 for (i = 0; i < n_elts; i++)
4802 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4803 ? CONST_VECTOR_ELT (op0, i)
4804 : CONST_VECTOR_ELT (op1, i));
4805 return gen_rtx_CONST_VECTOR (mode, v);
4808 break;
4810 default:
4811 gcc_unreachable ();
4814 return 0;
4817 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
4818 or CONST_VECTOR,
4819 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
4821 Works by unpacking OP into a collection of 8-bit values
4822 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4823 and then repacking them again for OUTERMODE. */
4825 static rtx
4826 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4827 enum machine_mode innermode, unsigned int byte)
4829 /* We support up to 512-bit values (for V8DFmode). */
4830 enum {
4831 max_bitsize = 512,
4832 value_bit = 8,
4833 value_mask = (1 << value_bit) - 1
4835 unsigned char value[max_bitsize / value_bit];
4836 int value_start;
4837 int i;
4838 int elem;
4840 int num_elem;
4841 rtx * elems;
4842 int elem_bitsize;
4843 rtx result_s;
4844 rtvec result_v = NULL;
4845 enum mode_class outer_class;
4846 enum machine_mode outer_submode;
4848 /* Some ports misuse CCmode. */
4849 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
4850 return op;
4852 /* We have no way to represent a complex constant at the rtl level. */
4853 if (COMPLEX_MODE_P (outermode))
4854 return NULL_RTX;
4856 /* Unpack the value. */
4858 if (GET_CODE (op) == CONST_VECTOR)
4860 num_elem = CONST_VECTOR_NUNITS (op);
4861 elems = &CONST_VECTOR_ELT (op, 0);
4862 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4864 else
4866 num_elem = 1;
4867 elems = &op;
4868 elem_bitsize = max_bitsize;
4870 /* If this asserts, it is too complicated; reducing value_bit may help. */
4871 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4872 /* I don't know how to handle endianness of sub-units. */
4873 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4875 for (elem = 0; elem < num_elem; elem++)
4877 unsigned char * vp;
4878 rtx el = elems[elem];
4880 /* Vectors are kept in target memory order. (This is probably
4881 a mistake.) */
4883 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4884 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4885 / BITS_PER_UNIT);
4886 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4887 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4888 unsigned bytele = (subword_byte % UNITS_PER_WORD
4889 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4890 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4893 switch (GET_CODE (el))
4895 case CONST_INT:
4896 for (i = 0;
4897 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4898 i += value_bit)
4899 *vp++ = INTVAL (el) >> i;
4900 /* CONST_INTs are always logically sign-extended. */
4901 for (; i < elem_bitsize; i += value_bit)
4902 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4903 break;
4905 case CONST_DOUBLE:
4906 if (GET_MODE (el) == VOIDmode)
4908 /* If this triggers, someone should have generated a
4909 CONST_INT instead. */
4910 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4912 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4913 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4914 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4916 *vp++
4917 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4918 i += value_bit;
4920 /* It shouldn't matter what's done here, so fill it with
4921 zero. */
4922 for (; i < elem_bitsize; i += value_bit)
4923 *vp++ = 0;
4925 else
4927 long tmp[max_bitsize / 32];
4928 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4930 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4931 gcc_assert (bitsize <= elem_bitsize);
4932 gcc_assert (bitsize % value_bit == 0);
4934 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4935 GET_MODE (el));
4937 /* real_to_target produces its result in words affected by
4938 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4939 and use WORDS_BIG_ENDIAN instead; see the documentation
4940 of SUBREG in rtl.texi. */
4941 for (i = 0; i < bitsize; i += value_bit)
4943 int ibase;
4944 if (WORDS_BIG_ENDIAN)
4945 ibase = bitsize - 1 - i;
4946 else
4947 ibase = i;
4948 *vp++ = tmp[ibase / 32] >> i % 32;
4951 /* It shouldn't matter what's done here, so fill it with
4952 zero. */
4953 for (; i < elem_bitsize; i += value_bit)
4954 *vp++ = 0;
4956 break;
4958 case CONST_FIXED:
4959 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4961 for (i = 0; i < elem_bitsize; i += value_bit)
4962 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
4964 else
4966 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4967 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
4968 for (; i < 2 * HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4969 i += value_bit)
4970 *vp++ = CONST_FIXED_VALUE_HIGH (el)
4971 >> (i - HOST_BITS_PER_WIDE_INT);
4972 for (; i < elem_bitsize; i += value_bit)
4973 *vp++ = 0;
4975 break;
4977 default:
4978 gcc_unreachable ();
4982 /* Now, pick the right byte to start with. */
4983 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4984 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4985 will already have offset 0. */
4986 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4988 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4989 - byte);
4990 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4991 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4992 byte = (subword_byte % UNITS_PER_WORD
4993 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4996 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4997 so if it's become negative it will instead be very large.) */
4998 gcc_assert (byte < GET_MODE_SIZE (innermode));
5000 /* Convert from bytes to chunks of size value_bit. */
5001 value_start = byte * (BITS_PER_UNIT / value_bit);
5003 /* Re-pack the value. */
5005 if (VECTOR_MODE_P (outermode))
5007 num_elem = GET_MODE_NUNITS (outermode);
5008 result_v = rtvec_alloc (num_elem);
5009 elems = &RTVEC_ELT (result_v, 0);
5010 outer_submode = GET_MODE_INNER (outermode);
5012 else
5014 num_elem = 1;
5015 elems = &result_s;
5016 outer_submode = outermode;
5019 outer_class = GET_MODE_CLASS (outer_submode);
5020 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5022 gcc_assert (elem_bitsize % value_bit == 0);
5023 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5025 for (elem = 0; elem < num_elem; elem++)
5027 unsigned char *vp;
5029 /* Vectors are stored in target memory order. (This is probably
5030 a mistake.) */
5032 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5033 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5034 / BITS_PER_UNIT);
5035 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5036 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5037 unsigned bytele = (subword_byte % UNITS_PER_WORD
5038 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5039 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5042 switch (outer_class)
5044 case MODE_INT:
5045 case MODE_PARTIAL_INT:
5047 unsigned HOST_WIDE_INT hi = 0, lo = 0;
5049 for (i = 0;
5050 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5051 i += value_bit)
5052 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
5053 for (; i < elem_bitsize; i += value_bit)
5054 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
5055 << (i - HOST_BITS_PER_WIDE_INT));
5057 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5058 know why. */
5059 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5060 elems[elem] = gen_int_mode (lo, outer_submode);
5061 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
5062 elems[elem] = immed_double_const (lo, hi, outer_submode);
5063 else
5064 return NULL_RTX;
5066 break;
5068 case MODE_FLOAT:
5069 case MODE_DECIMAL_FLOAT:
5071 REAL_VALUE_TYPE r;
5072 long tmp[max_bitsize / 32];
5074 /* real_from_target wants its input in words affected by
5075 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5076 and use WORDS_BIG_ENDIAN instead; see the documentation
5077 of SUBREG in rtl.texi. */
5078 for (i = 0; i < max_bitsize / 32; i++)
5079 tmp[i] = 0;
5080 for (i = 0; i < elem_bitsize; i += value_bit)
5082 int ibase;
5083 if (WORDS_BIG_ENDIAN)
5084 ibase = elem_bitsize - 1 - i;
5085 else
5086 ibase = i;
5087 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5090 real_from_target (&r, tmp, outer_submode);
5091 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5093 break;
5095 case MODE_FRACT:
5096 case MODE_UFRACT:
5097 case MODE_ACCUM:
5098 case MODE_UACCUM:
5100 FIXED_VALUE_TYPE f;
5101 f.data.low = 0;
5102 f.data.high = 0;
5103 f.mode = outer_submode;
5105 for (i = 0;
5106 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5107 i += value_bit)
5108 f.data.low |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
5109 for (; i < elem_bitsize; i += value_bit)
5110 f.data.high |= ((HOST_WIDE_INT)(*vp++ & value_mask)
5111 << (i - HOST_BITS_PER_WIDE_INT));
5113 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5115 break;
5117 default:
5118 gcc_unreachable ();
5121 if (VECTOR_MODE_P (outermode))
5122 return gen_rtx_CONST_VECTOR (outermode, result_v);
5123 else
5124 return result_s;
5127 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5128 Return 0 if no simplifications are possible. */
5130 simplify_subreg (enum machine_mode outermode, rtx op,
5131 enum machine_mode innermode, unsigned int byte)
5133 /* Little bit of sanity checking. */
5134 gcc_assert (innermode != VOIDmode);
5135 gcc_assert (outermode != VOIDmode);
5136 gcc_assert (innermode != BLKmode);
5137 gcc_assert (outermode != BLKmode);
5139 gcc_assert (GET_MODE (op) == innermode
5140 || GET_MODE (op) == VOIDmode);
5142 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
5143 gcc_assert (byte < GET_MODE_SIZE (innermode));
5145 if (outermode == innermode && !byte)
5146 return op;
5148 if (CONST_INT_P (op)
5149 || GET_CODE (op) == CONST_DOUBLE
5150 || GET_CODE (op) == CONST_FIXED
5151 || GET_CODE (op) == CONST_VECTOR)
5152 return simplify_immed_subreg (outermode, op, innermode, byte);
5154 /* Changing mode twice with SUBREG => just change it once,
5155 or not at all if changing back op starting mode. */
5156 if (GET_CODE (op) == SUBREG)
5158 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5159 int final_offset = byte + SUBREG_BYTE (op);
5160 rtx newx;
5162 if (outermode == innermostmode
5163 && byte == 0 && SUBREG_BYTE (op) == 0)
5164 return SUBREG_REG (op);
5166 /* The SUBREG_BYTE represents offset, as if the value were stored
5167 in memory. Irritating exception is paradoxical subreg, where
5168 we define SUBREG_BYTE to be 0. On big endian machines, this
5169 value should be negative. For a moment, undo this exception. */
5170 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5172 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5173 if (WORDS_BIG_ENDIAN)
5174 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5175 if (BYTES_BIG_ENDIAN)
5176 final_offset += difference % UNITS_PER_WORD;
5178 if (SUBREG_BYTE (op) == 0
5179 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5181 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5182 if (WORDS_BIG_ENDIAN)
5183 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5184 if (BYTES_BIG_ENDIAN)
5185 final_offset += difference % UNITS_PER_WORD;
5188 /* See whether resulting subreg will be paradoxical. */
5189 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5191 /* In nonparadoxical subregs we can't handle negative offsets. */
5192 if (final_offset < 0)
5193 return NULL_RTX;
5194 /* Bail out in case resulting subreg would be incorrect. */
5195 if (final_offset % GET_MODE_SIZE (outermode)
5196 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5197 return NULL_RTX;
5199 else
5201 int offset = 0;
5202 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5204 /* In paradoxical subreg, see if we are still looking on lower part.
5205 If so, our SUBREG_BYTE will be 0. */
5206 if (WORDS_BIG_ENDIAN)
5207 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5208 if (BYTES_BIG_ENDIAN)
5209 offset += difference % UNITS_PER_WORD;
5210 if (offset == final_offset)
5211 final_offset = 0;
5212 else
5213 return NULL_RTX;
5216 /* Recurse for further possible simplifications. */
5217 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5218 final_offset);
5219 if (newx)
5220 return newx;
5221 if (validate_subreg (outermode, innermostmode,
5222 SUBREG_REG (op), final_offset))
5224 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5225 if (SUBREG_PROMOTED_VAR_P (op)
5226 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5227 && GET_MODE_CLASS (outermode) == MODE_INT
5228 && IN_RANGE (GET_MODE_SIZE (outermode),
5229 GET_MODE_SIZE (innermode),
5230 GET_MODE_SIZE (innermostmode))
5231 && subreg_lowpart_p (newx))
5233 SUBREG_PROMOTED_VAR_P (newx) = 1;
5234 SUBREG_PROMOTED_UNSIGNED_SET
5235 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5237 return newx;
5239 return NULL_RTX;
5242 /* Merge implicit and explicit truncations. */
5244 if (GET_CODE (op) == TRUNCATE
5245 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
5246 && subreg_lowpart_offset (outermode, innermode) == byte)
5247 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
5248 GET_MODE (XEXP (op, 0)));
5250 /* SUBREG of a hard register => just change the register number
5251 and/or mode. If the hard register is not valid in that mode,
5252 suppress this simplification. If the hard register is the stack,
5253 frame, or argument pointer, leave this as a SUBREG. */
5255 if (REG_P (op) && HARD_REGISTER_P (op))
5257 unsigned int regno, final_regno;
5259 regno = REGNO (op);
5260 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5261 if (HARD_REGISTER_NUM_P (final_regno))
5263 rtx x;
5264 int final_offset = byte;
5266 /* Adjust offset for paradoxical subregs. */
5267 if (byte == 0
5268 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5270 int difference = (GET_MODE_SIZE (innermode)
5271 - GET_MODE_SIZE (outermode));
5272 if (WORDS_BIG_ENDIAN)
5273 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5274 if (BYTES_BIG_ENDIAN)
5275 final_offset += difference % UNITS_PER_WORD;
5278 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5280 /* Propagate original regno. We don't have any way to specify
5281 the offset inside original regno, so do so only for lowpart.
5282 The information is used only by alias analysis that can not
5283 grog partial register anyway. */
5285 if (subreg_lowpart_offset (outermode, innermode) == byte)
5286 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5287 return x;
5291 /* If we have a SUBREG of a register that we are replacing and we are
5292 replacing it with a MEM, make a new MEM and try replacing the
5293 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5294 or if we would be widening it. */
5296 if (MEM_P (op)
5297 && ! mode_dependent_address_p (XEXP (op, 0))
5298 /* Allow splitting of volatile memory references in case we don't
5299 have instruction to move the whole thing. */
5300 && (! MEM_VOLATILE_P (op)
5301 || ! have_insn_for (SET, innermode))
5302 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5303 return adjust_address_nv (op, outermode, byte);
5305 /* Handle complex values represented as CONCAT
5306 of real and imaginary part. */
5307 if (GET_CODE (op) == CONCAT)
5309 unsigned int part_size, final_offset;
5310 rtx part, res;
5312 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5313 if (byte < part_size)
5315 part = XEXP (op, 0);
5316 final_offset = byte;
5318 else
5320 part = XEXP (op, 1);
5321 final_offset = byte - part_size;
5324 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5325 return NULL_RTX;
5327 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5328 if (res)
5329 return res;
5330 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5331 return gen_rtx_SUBREG (outermode, part, final_offset);
5332 return NULL_RTX;
5335 /* Optimize SUBREG truncations of zero and sign extended values. */
5336 if ((GET_CODE (op) == ZERO_EXTEND
5337 || GET_CODE (op) == SIGN_EXTEND)
5338 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
5340 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5342 /* If we're requesting the lowpart of a zero or sign extension,
5343 there are three possibilities. If the outermode is the same
5344 as the origmode, we can omit both the extension and the subreg.
5345 If the outermode is not larger than the origmode, we can apply
5346 the truncation without the extension. Finally, if the outermode
5347 is larger than the origmode, but both are integer modes, we
5348 can just extend to the appropriate mode. */
5349 if (bitpos == 0)
5351 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
5352 if (outermode == origmode)
5353 return XEXP (op, 0);
5354 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
5355 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
5356 subreg_lowpart_offset (outermode,
5357 origmode));
5358 if (SCALAR_INT_MODE_P (outermode))
5359 return simplify_gen_unary (GET_CODE (op), outermode,
5360 XEXP (op, 0), origmode);
5363 /* A SUBREG resulting from a zero extension may fold to zero if
5364 it extracts higher bits that the ZERO_EXTEND's source bits. */
5365 if (GET_CODE (op) == ZERO_EXTEND
5366 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
5367 return CONST0_RTX (outermode);
5370 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5371 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5372 the outer subreg is effectively a truncation to the original mode. */
5373 if ((GET_CODE (op) == LSHIFTRT
5374 || GET_CODE (op) == ASHIFTRT)
5375 && SCALAR_INT_MODE_P (outermode)
5376 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5377 to avoid the possibility that an outer LSHIFTRT shifts by more
5378 than the sign extension's sign_bit_copies and introduces zeros
5379 into the high bits of the result. */
5380 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
5381 && CONST_INT_P (XEXP (op, 1))
5382 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
5383 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5384 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5385 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5386 return simplify_gen_binary (ASHIFTRT, outermode,
5387 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5389 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5390 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5391 the outer subreg is effectively a truncation to the original mode. */
5392 if ((GET_CODE (op) == LSHIFTRT
5393 || GET_CODE (op) == ASHIFTRT)
5394 && SCALAR_INT_MODE_P (outermode)
5395 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5396 && CONST_INT_P (XEXP (op, 1))
5397 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5398 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5399 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5400 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5401 return simplify_gen_binary (LSHIFTRT, outermode,
5402 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5404 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5405 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5406 the outer subreg is effectively a truncation to the original mode. */
5407 if (GET_CODE (op) == ASHIFT
5408 && SCALAR_INT_MODE_P (outermode)
5409 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5410 && CONST_INT_P (XEXP (op, 1))
5411 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5412 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
5413 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5414 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5415 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5416 return simplify_gen_binary (ASHIFT, outermode,
5417 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5419 /* Recognize a word extraction from a multi-word subreg. */
5420 if ((GET_CODE (op) == LSHIFTRT
5421 || GET_CODE (op) == ASHIFTRT)
5422 && SCALAR_INT_MODE_P (outermode)
5423 && GET_MODE_BITSIZE (outermode) >= BITS_PER_WORD
5424 && GET_MODE_BITSIZE (innermode) >= (2 * GET_MODE_BITSIZE (outermode))
5425 && CONST_INT_P (XEXP (op, 1))
5426 && (INTVAL (XEXP (op, 1)) & (GET_MODE_BITSIZE (outermode) - 1)) == 0
5427 && INTVAL (XEXP (op, 1)) >= 0
5428 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
5429 && byte == subreg_lowpart_offset (outermode, innermode))
5431 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5432 return simplify_gen_subreg (outermode, XEXP (op, 0), innermode,
5433 (WORDS_BIG_ENDIAN
5434 ? byte - shifted_bytes
5435 : byte + shifted_bytes));
5438 return NULL_RTX;
5441 /* Make a SUBREG operation or equivalent if it folds. */
5444 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5445 enum machine_mode innermode, unsigned int byte)
5447 rtx newx;
5449 newx = simplify_subreg (outermode, op, innermode, byte);
5450 if (newx)
5451 return newx;
5453 if (GET_CODE (op) == SUBREG
5454 || GET_CODE (op) == CONCAT
5455 || GET_MODE (op) == VOIDmode)
5456 return NULL_RTX;
5458 if (validate_subreg (outermode, innermode, op, byte))
5459 return gen_rtx_SUBREG (outermode, op, byte);
5461 return NULL_RTX;
5464 /* Simplify X, an rtx expression.
5466 Return the simplified expression or NULL if no simplifications
5467 were possible.
5469 This is the preferred entry point into the simplification routines;
5470 however, we still allow passes to call the more specific routines.
5472 Right now GCC has three (yes, three) major bodies of RTL simplification
5473 code that need to be unified.
5475 1. fold_rtx in cse.c. This code uses various CSE specific
5476 information to aid in RTL simplification.
5478 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5479 it uses combine specific information to aid in RTL
5480 simplification.
5482 3. The routines in this file.
5485 Long term we want to only have one body of simplification code; to
5486 get to that state I recommend the following steps:
5488 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5489 which are not pass dependent state into these routines.
5491 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5492 use this routine whenever possible.
5494 3. Allow for pass dependent state to be provided to these
5495 routines and add simplifications based on the pass dependent
5496 state. Remove code from cse.c & combine.c that becomes
5497 redundant/dead.
5499 It will take time, but ultimately the compiler will be easier to
5500 maintain and improve. It's totally silly that when we add a
5501 simplification that it needs to be added to 4 places (3 for RTL
5502 simplification and 1 for tree simplification. */
5505 simplify_rtx (const_rtx x)
5507 const enum rtx_code code = GET_CODE (x);
5508 const enum machine_mode mode = GET_MODE (x);
5510 switch (GET_RTX_CLASS (code))
5512 case RTX_UNARY:
5513 return simplify_unary_operation (code, mode,
5514 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5515 case RTX_COMM_ARITH:
5516 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5517 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5519 /* Fall through.... */
5521 case RTX_BIN_ARITH:
5522 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5524 case RTX_TERNARY:
5525 case RTX_BITFIELD_OPS:
5526 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5527 XEXP (x, 0), XEXP (x, 1),
5528 XEXP (x, 2));
5530 case RTX_COMPARE:
5531 case RTX_COMM_COMPARE:
5532 return simplify_relational_operation (code, mode,
5533 ((GET_MODE (XEXP (x, 0))
5534 != VOIDmode)
5535 ? GET_MODE (XEXP (x, 0))
5536 : GET_MODE (XEXP (x, 1))),
5537 XEXP (x, 0),
5538 XEXP (x, 1));
5540 case RTX_EXTRA:
5541 if (code == SUBREG)
5542 return simplify_subreg (mode, SUBREG_REG (x),
5543 GET_MODE (SUBREG_REG (x)),
5544 SUBREG_BYTE (x));
5545 break;
5547 case RTX_OBJ:
5548 if (code == LO_SUM)
5550 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5551 if (GET_CODE (XEXP (x, 0)) == HIGH
5552 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5553 return XEXP (x, 1);
5555 break;
5557 default:
5558 break;
5560 return NULL;