* tree-ssa-structalias.h (alias_info): Remove num_references.
[official-gcc.git] / gcc / simplify-rtx.c
blob0ce033df208a9ba71e3169c87ad74f4af94b8049
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
20 02110-1301, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static bool plus_minus_operand_p (rtx);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
57 unsigned int);
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
59 rtx, rtx);
60 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
61 enum machine_mode, rtx, rtx);
62 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
63 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
64 rtx, rtx, rtx, rtx);
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
68 static rtx
69 neg_const_int (enum machine_mode mode, rtx i)
71 return gen_int_mode (- INTVAL (i), mode);
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
77 bool
78 mode_signbit_p (enum machine_mode mode, rtx x)
80 unsigned HOST_WIDE_INT val;
81 unsigned int width;
83 if (GET_MODE_CLASS (mode) != MODE_INT)
84 return false;
86 width = GET_MODE_BITSIZE (mode);
87 if (width == 0)
88 return false;
90 if (width <= HOST_BITS_PER_WIDE_INT
91 && GET_CODE (x) == CONST_INT)
92 val = INTVAL (x);
93 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x) == 0)
97 val = CONST_DOUBLE_HIGH (x);
98 width -= HOST_BITS_PER_WIDE_INT;
100 else
101 return false;
103 if (width < HOST_BITS_PER_WIDE_INT)
104 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
105 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
112 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
113 rtx op1)
115 rtx tem;
117 /* If this simplifies, do it. */
118 tem = simplify_binary_operation (code, mode, op0, op1);
119 if (tem)
120 return tem;
122 /* Put complex operands first and constants second if commutative. */
123 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
124 && swap_commutative_operands_p (op0, op1))
125 tem = op0, op0 = op1, op1 = tem;
127 return gen_rtx_fmt_ee (code, mode, op0, op1);
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
133 avoid_constant_pool_reference (rtx x)
135 rtx c, tmp, addr;
136 enum machine_mode cmode;
137 HOST_WIDE_INT offset = 0;
139 switch (GET_CODE (x))
141 case MEM:
142 break;
144 case FLOAT_EXTEND:
145 /* Handle float extensions of constant pool references. */
146 tmp = XEXP (x, 0);
147 c = avoid_constant_pool_reference (tmp);
148 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
150 REAL_VALUE_TYPE d;
152 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
155 return x;
157 default:
158 return x;
161 addr = XEXP (x, 0);
163 /* Call target hook to avoid the effects of -fpic etc.... */
164 addr = targetm.delegitimize_address (addr);
166 /* Split the address into a base and integer offset. */
167 if (GET_CODE (addr) == CONST
168 && GET_CODE (XEXP (addr, 0)) == PLUS
169 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
171 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
172 addr = XEXP (XEXP (addr, 0), 0);
175 if (GET_CODE (addr) == LO_SUM)
176 addr = XEXP (addr, 1);
178 /* If this is a constant pool reference, we can turn it into its
179 constant and hope that simplifications happen. */
180 if (GET_CODE (addr) == SYMBOL_REF
181 && CONSTANT_POOL_ADDRESS_P (addr))
183 c = get_pool_constant (addr);
184 cmode = get_pool_mode (addr);
186 /* If we're accessing the constant in a different mode than it was
187 originally stored, attempt to fix that up via subreg simplifications.
188 If that fails we have no choice but to return the original memory. */
189 if (offset != 0 || cmode != GET_MODE (x))
191 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
192 if (tem && CONSTANT_P (tem))
193 return tem;
195 else
196 return c;
199 return x;
202 /* Return true if X is a MEM referencing the constant pool. */
204 bool
205 constant_pool_reference_p (rtx x)
207 return avoid_constant_pool_reference (x) != x;
210 /* Make a unary operation by first seeing if it folds and otherwise making
211 the specified operation. */
214 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
215 enum machine_mode op_mode)
217 rtx tem;
219 /* If this simplifies, use it. */
220 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
221 return tem;
223 return gen_rtx_fmt_e (code, mode, op);
226 /* Likewise for ternary operations. */
229 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
230 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
232 rtx tem;
234 /* If this simplifies, use it. */
235 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
236 op0, op1, op2)))
237 return tem;
239 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
242 /* Likewise, for relational operations.
243 CMP_MODE specifies mode comparison is done in. */
246 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
247 enum machine_mode cmp_mode, rtx op0, rtx op1)
249 rtx tem;
251 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
252 op0, op1)))
253 return tem;
255 return gen_rtx_fmt_ee (code, mode, op0, op1);
258 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
259 resulting RTX. Return a new RTX which is as simplified as possible. */
262 simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
264 enum rtx_code code = GET_CODE (x);
265 enum machine_mode mode = GET_MODE (x);
266 enum machine_mode op_mode;
267 rtx op0, op1, op2;
269 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
270 to build a new expression substituting recursively. If we can't do
271 anything, return our input. */
273 if (x == old_rtx)
274 return new_rtx;
276 switch (GET_RTX_CLASS (code))
278 case RTX_UNARY:
279 op0 = XEXP (x, 0);
280 op_mode = GET_MODE (op0);
281 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
282 if (op0 == XEXP (x, 0))
283 return x;
284 return simplify_gen_unary (code, mode, op0, op_mode);
286 case RTX_BIN_ARITH:
287 case RTX_COMM_ARITH:
288 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
289 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
290 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
291 return x;
292 return simplify_gen_binary (code, mode, op0, op1);
294 case RTX_COMPARE:
295 case RTX_COMM_COMPARE:
296 op0 = XEXP (x, 0);
297 op1 = XEXP (x, 1);
298 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
299 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
300 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
301 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
302 return x;
303 return simplify_gen_relational (code, mode, op_mode, op0, op1);
305 case RTX_TERNARY:
306 case RTX_BITFIELD_OPS:
307 op0 = XEXP (x, 0);
308 op_mode = GET_MODE (op0);
309 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
310 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
311 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
312 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
313 return x;
314 if (op_mode == VOIDmode)
315 op_mode = GET_MODE (op0);
316 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
318 case RTX_EXTRA:
319 /* The only case we try to handle is a SUBREG. */
320 if (code == SUBREG)
322 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
323 if (op0 == SUBREG_REG (x))
324 return x;
325 op0 = simplify_gen_subreg (GET_MODE (x), op0,
326 GET_MODE (SUBREG_REG (x)),
327 SUBREG_BYTE (x));
328 return op0 ? op0 : x;
330 break;
332 case RTX_OBJ:
333 if (code == MEM)
335 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
336 if (op0 == XEXP (x, 0))
337 return x;
338 return replace_equiv_address_nv (x, op0);
340 else if (code == LO_SUM)
342 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
343 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
345 /* (lo_sum (high x) x) -> x */
346 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
347 return op1;
349 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
350 return x;
351 return gen_rtx_LO_SUM (mode, op0, op1);
353 else if (code == REG)
355 if (rtx_equal_p (x, old_rtx))
356 return new_rtx;
358 break;
360 default:
361 break;
363 return x;
366 /* Try to simplify a unary operation CODE whose output mode is to be
367 MODE with input operand OP whose mode was originally OP_MODE.
368 Return zero if no simplification can be made. */
370 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
371 rtx op, enum machine_mode op_mode)
373 rtx trueop, tem;
375 if (GET_CODE (op) == CONST)
376 op = XEXP (op, 0);
378 trueop = avoid_constant_pool_reference (op);
380 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
381 if (tem)
382 return tem;
384 return simplify_unary_operation_1 (code, mode, op);
387 /* Perform some simplifications we can do even if the operands
388 aren't constant. */
389 static rtx
390 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
392 enum rtx_code reversed;
393 rtx temp;
395 switch (code)
397 case NOT:
398 /* (not (not X)) == X. */
399 if (GET_CODE (op) == NOT)
400 return XEXP (op, 0);
402 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
403 comparison is all ones. */
404 if (COMPARISON_P (op)
405 && (mode == BImode || STORE_FLAG_VALUE == -1)
406 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
407 return simplify_gen_relational (reversed, mode, VOIDmode,
408 XEXP (op, 0), XEXP (op, 1));
410 /* (not (plus X -1)) can become (neg X). */
411 if (GET_CODE (op) == PLUS
412 && XEXP (op, 1) == constm1_rtx)
413 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
415 /* Similarly, (not (neg X)) is (plus X -1). */
416 if (GET_CODE (op) == NEG)
417 return plus_constant (XEXP (op, 0), -1);
419 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
420 if (GET_CODE (op) == XOR
421 && GET_CODE (XEXP (op, 1)) == CONST_INT
422 && (temp = simplify_unary_operation (NOT, mode,
423 XEXP (op, 1), mode)) != 0)
424 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
426 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
427 if (GET_CODE (op) == PLUS
428 && GET_CODE (XEXP (op, 1)) == CONST_INT
429 && mode_signbit_p (mode, XEXP (op, 1))
430 && (temp = simplify_unary_operation (NOT, mode,
431 XEXP (op, 1), mode)) != 0)
432 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
435 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
436 operands other than 1, but that is not valid. We could do a
437 similar simplification for (not (lshiftrt C X)) where C is
438 just the sign bit, but this doesn't seem common enough to
439 bother with. */
440 if (GET_CODE (op) == ASHIFT
441 && XEXP (op, 0) == const1_rtx)
443 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
444 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
447 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
448 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
449 so we can perform the above simplification. */
451 if (STORE_FLAG_VALUE == -1
452 && GET_CODE (op) == ASHIFTRT
453 && GET_CODE (XEXP (op, 1)) == CONST_INT
454 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
455 return simplify_gen_relational (GE, mode, VOIDmode,
456 XEXP (op, 0), const0_rtx);
459 if (GET_CODE (op) == SUBREG
460 && subreg_lowpart_p (op)
461 && (GET_MODE_SIZE (GET_MODE (op))
462 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
463 && GET_CODE (SUBREG_REG (op)) == ASHIFT
464 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
466 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
467 rtx x;
469 x = gen_rtx_ROTATE (inner_mode,
470 simplify_gen_unary (NOT, inner_mode, const1_rtx,
471 inner_mode),
472 XEXP (SUBREG_REG (op), 1));
473 return rtl_hooks.gen_lowpart_no_emit (mode, x);
476 /* Apply De Morgan's laws to reduce number of patterns for machines
477 with negating logical insns (and-not, nand, etc.). If result has
478 only one NOT, put it first, since that is how the patterns are
479 coded. */
481 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
483 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
484 enum machine_mode op_mode;
486 op_mode = GET_MODE (in1);
487 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
489 op_mode = GET_MODE (in2);
490 if (op_mode == VOIDmode)
491 op_mode = mode;
492 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
494 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
496 rtx tem = in2;
497 in2 = in1; in1 = tem;
500 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
501 mode, in1, in2);
503 break;
505 case NEG:
506 /* (neg (neg X)) == X. */
507 if (GET_CODE (op) == NEG)
508 return XEXP (op, 0);
510 /* (neg (plus X 1)) can become (not X). */
511 if (GET_CODE (op) == PLUS
512 && XEXP (op, 1) == const1_rtx)
513 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
515 /* Similarly, (neg (not X)) is (plus X 1). */
516 if (GET_CODE (op) == NOT)
517 return plus_constant (XEXP (op, 0), 1);
519 /* (neg (minus X Y)) can become (minus Y X). This transformation
520 isn't safe for modes with signed zeros, since if X and Y are
521 both +0, (minus Y X) is the same as (minus X Y). If the
522 rounding mode is towards +infinity (or -infinity) then the two
523 expressions will be rounded differently. */
524 if (GET_CODE (op) == MINUS
525 && !HONOR_SIGNED_ZEROS (mode)
526 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
527 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
529 if (GET_CODE (op) == PLUS
530 && !HONOR_SIGNED_ZEROS (mode)
531 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
533 /* (neg (plus A C)) is simplified to (minus -C A). */
534 if (GET_CODE (XEXP (op, 1)) == CONST_INT
535 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
537 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
538 if (temp)
539 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
542 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
543 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
544 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
547 /* (neg (mult A B)) becomes (mult (neg A) B).
548 This works even for floating-point values. */
549 if (GET_CODE (op) == MULT
550 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
552 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
553 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
556 /* NEG commutes with ASHIFT since it is multiplication. Only do
557 this if we can then eliminate the NEG (e.g., if the operand
558 is a constant). */
559 if (GET_CODE (op) == ASHIFT)
561 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
562 if (temp)
563 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
566 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
567 C is equal to the width of MODE minus 1. */
568 if (GET_CODE (op) == ASHIFTRT
569 && GET_CODE (XEXP (op, 1)) == CONST_INT
570 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
571 return simplify_gen_binary (LSHIFTRT, mode,
572 XEXP (op, 0), XEXP (op, 1));
574 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
575 C is equal to the width of MODE minus 1. */
576 if (GET_CODE (op) == LSHIFTRT
577 && GET_CODE (XEXP (op, 1)) == CONST_INT
578 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
579 return simplify_gen_binary (ASHIFTRT, mode,
580 XEXP (op, 0), XEXP (op, 1));
582 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
583 if (GET_CODE (op) == XOR
584 && XEXP (op, 1) == const1_rtx
585 && nonzero_bits (XEXP (op, 0), mode) == 1)
586 return plus_constant (XEXP (op, 0), -1);
588 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
589 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
590 if (GET_CODE (op) == LT
591 && XEXP (op, 1) == const0_rtx)
593 enum machine_mode inner = GET_MODE (XEXP (op, 0));
594 int isize = GET_MODE_BITSIZE (inner);
595 if (STORE_FLAG_VALUE == 1)
597 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
598 GEN_INT (isize - 1));
599 if (mode == inner)
600 return temp;
601 if (GET_MODE_BITSIZE (mode) > isize)
602 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
603 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
605 else if (STORE_FLAG_VALUE == -1)
607 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
608 GEN_INT (isize - 1));
609 if (mode == inner)
610 return temp;
611 if (GET_MODE_BITSIZE (mode) > isize)
612 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
613 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
616 break;
618 case TRUNCATE:
619 /* We can't handle truncation to a partial integer mode here
620 because we don't know the real bitsize of the partial
621 integer mode. */
622 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
623 break;
625 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
626 if ((GET_CODE (op) == SIGN_EXTEND
627 || GET_CODE (op) == ZERO_EXTEND)
628 && GET_MODE (XEXP (op, 0)) == mode)
629 return XEXP (op, 0);
631 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
632 (OP:SI foo:SI) if OP is NEG or ABS. */
633 if ((GET_CODE (op) == ABS
634 || GET_CODE (op) == NEG)
635 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
636 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
637 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
638 return simplify_gen_unary (GET_CODE (op), mode,
639 XEXP (XEXP (op, 0), 0), mode);
641 /* (truncate:A (subreg:B (truncate:C X) 0)) is
642 (truncate:A X). */
643 if (GET_CODE (op) == SUBREG
644 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
645 && subreg_lowpart_p (op))
646 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
647 GET_MODE (XEXP (SUBREG_REG (op), 0)));
649 /* If we know that the value is already truncated, we can
650 replace the TRUNCATE with a SUBREG. Note that this is also
651 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
652 modes we just have to apply a different definition for
653 truncation. But don't do this for an (LSHIFTRT (MULT ...))
654 since this will cause problems with the umulXi3_highpart
655 patterns. */
656 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
657 GET_MODE_BITSIZE (GET_MODE (op)))
658 ? (num_sign_bit_copies (op, GET_MODE (op))
659 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
660 - GET_MODE_BITSIZE (mode)))
661 : truncated_to_mode (mode, op))
662 && ! (GET_CODE (op) == LSHIFTRT
663 && GET_CODE (XEXP (op, 0)) == MULT))
664 return rtl_hooks.gen_lowpart_no_emit (mode, op);
666 /* A truncate of a comparison can be replaced with a subreg if
667 STORE_FLAG_VALUE permits. This is like the previous test,
668 but it works even if the comparison is done in a mode larger
669 than HOST_BITS_PER_WIDE_INT. */
670 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
671 && COMPARISON_P (op)
672 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
673 return rtl_hooks.gen_lowpart_no_emit (mode, op);
674 break;
676 case FLOAT_TRUNCATE:
677 if (DECIMAL_FLOAT_MODE_P (mode))
678 break;
680 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
681 if (GET_CODE (op) == FLOAT_EXTEND
682 && GET_MODE (XEXP (op, 0)) == mode)
683 return XEXP (op, 0);
685 /* (float_truncate:SF (float_truncate:DF foo:XF))
686 = (float_truncate:SF foo:XF).
687 This may eliminate double rounding, so it is unsafe.
689 (float_truncate:SF (float_extend:XF foo:DF))
690 = (float_truncate:SF foo:DF).
692 (float_truncate:DF (float_extend:XF foo:SF))
693 = (float_extend:SF foo:DF). */
694 if ((GET_CODE (op) == FLOAT_TRUNCATE
695 && flag_unsafe_math_optimizations)
696 || GET_CODE (op) == FLOAT_EXTEND)
697 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
698 0)))
699 > GET_MODE_SIZE (mode)
700 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
701 mode,
702 XEXP (op, 0), mode);
704 /* (float_truncate (float x)) is (float x) */
705 if (GET_CODE (op) == FLOAT
706 && (flag_unsafe_math_optimizations
707 || ((unsigned)significand_size (GET_MODE (op))
708 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
709 - num_sign_bit_copies (XEXP (op, 0),
710 GET_MODE (XEXP (op, 0)))))))
711 return simplify_gen_unary (FLOAT, mode,
712 XEXP (op, 0),
713 GET_MODE (XEXP (op, 0)));
715 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
716 (OP:SF foo:SF) if OP is NEG or ABS. */
717 if ((GET_CODE (op) == ABS
718 || GET_CODE (op) == NEG)
719 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
720 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
721 return simplify_gen_unary (GET_CODE (op), mode,
722 XEXP (XEXP (op, 0), 0), mode);
724 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
725 is (float_truncate:SF x). */
726 if (GET_CODE (op) == SUBREG
727 && subreg_lowpart_p (op)
728 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
729 return SUBREG_REG (op);
730 break;
732 case FLOAT_EXTEND:
733 if (DECIMAL_FLOAT_MODE_P (mode))
734 break;
736 /* (float_extend (float_extend x)) is (float_extend x)
738 (float_extend (float x)) is (float x) assuming that double
739 rounding can't happen.
741 if (GET_CODE (op) == FLOAT_EXTEND
742 || (GET_CODE (op) == FLOAT
743 && ((unsigned)significand_size (GET_MODE (op))
744 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
745 - num_sign_bit_copies (XEXP (op, 0),
746 GET_MODE (XEXP (op, 0)))))))
747 return simplify_gen_unary (GET_CODE (op), mode,
748 XEXP (op, 0),
749 GET_MODE (XEXP (op, 0)));
751 break;
753 case ABS:
754 /* (abs (neg <foo>)) -> (abs <foo>) */
755 if (GET_CODE (op) == NEG)
756 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
757 GET_MODE (XEXP (op, 0)));
759 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
760 do nothing. */
761 if (GET_MODE (op) == VOIDmode)
762 break;
764 /* If operand is something known to be positive, ignore the ABS. */
765 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
766 || ((GET_MODE_BITSIZE (GET_MODE (op))
767 <= HOST_BITS_PER_WIDE_INT)
768 && ((nonzero_bits (op, GET_MODE (op))
769 & ((HOST_WIDE_INT) 1
770 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
771 == 0)))
772 return op;
774 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
775 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
776 return gen_rtx_NEG (mode, op);
778 break;
780 case FFS:
781 /* (ffs (*_extend <X>)) = (ffs <X>) */
782 if (GET_CODE (op) == SIGN_EXTEND
783 || GET_CODE (op) == ZERO_EXTEND)
784 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
785 GET_MODE (XEXP (op, 0)));
786 break;
788 case POPCOUNT:
789 case PARITY:
790 /* (pop* (zero_extend <X>)) = (pop* <X>) */
791 if (GET_CODE (op) == ZERO_EXTEND)
792 return simplify_gen_unary (code, mode, XEXP (op, 0),
793 GET_MODE (XEXP (op, 0)));
794 break;
796 case FLOAT:
797 /* (float (sign_extend <X>)) = (float <X>). */
798 if (GET_CODE (op) == SIGN_EXTEND)
799 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
800 GET_MODE (XEXP (op, 0)));
801 break;
803 case SIGN_EXTEND:
804 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
805 becomes just the MINUS if its mode is MODE. This allows
806 folding switch statements on machines using casesi (such as
807 the VAX). */
808 if (GET_CODE (op) == TRUNCATE
809 && GET_MODE (XEXP (op, 0)) == mode
810 && GET_CODE (XEXP (op, 0)) == MINUS
811 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
812 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
813 return XEXP (op, 0);
815 /* Check for a sign extension of a subreg of a promoted
816 variable, where the promotion is sign-extended, and the
817 target mode is the same as the variable's promotion. */
818 if (GET_CODE (op) == SUBREG
819 && SUBREG_PROMOTED_VAR_P (op)
820 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
821 && GET_MODE (XEXP (op, 0)) == mode)
822 return XEXP (op, 0);
824 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
825 if (! POINTERS_EXTEND_UNSIGNED
826 && mode == Pmode && GET_MODE (op) == ptr_mode
827 && (CONSTANT_P (op)
828 || (GET_CODE (op) == SUBREG
829 && REG_P (SUBREG_REG (op))
830 && REG_POINTER (SUBREG_REG (op))
831 && GET_MODE (SUBREG_REG (op)) == Pmode)))
832 return convert_memory_address (Pmode, op);
833 #endif
834 break;
836 case ZERO_EXTEND:
837 /* Check for a zero extension of a subreg of a promoted
838 variable, where the promotion is zero-extended, and the
839 target mode is the same as the variable's promotion. */
840 if (GET_CODE (op) == SUBREG
841 && SUBREG_PROMOTED_VAR_P (op)
842 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
843 && GET_MODE (XEXP (op, 0)) == mode)
844 return XEXP (op, 0);
846 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
847 if (POINTERS_EXTEND_UNSIGNED > 0
848 && mode == Pmode && GET_MODE (op) == ptr_mode
849 && (CONSTANT_P (op)
850 || (GET_CODE (op) == SUBREG
851 && REG_P (SUBREG_REG (op))
852 && REG_POINTER (SUBREG_REG (op))
853 && GET_MODE (SUBREG_REG (op)) == Pmode)))
854 return convert_memory_address (Pmode, op);
855 #endif
856 break;
858 default:
859 break;
862 return 0;
865 /* Try to compute the value of a unary operation CODE whose output mode is to
866 be MODE with input operand OP whose mode was originally OP_MODE.
867 Return zero if the value cannot be computed. */
869 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
870 rtx op, enum machine_mode op_mode)
872 unsigned int width = GET_MODE_BITSIZE (mode);
874 if (code == VEC_DUPLICATE)
876 gcc_assert (VECTOR_MODE_P (mode));
877 if (GET_MODE (op) != VOIDmode)
879 if (!VECTOR_MODE_P (GET_MODE (op)))
880 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
881 else
882 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
883 (GET_MODE (op)));
885 if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
886 || GET_CODE (op) == CONST_VECTOR)
888 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
889 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
890 rtvec v = rtvec_alloc (n_elts);
891 unsigned int i;
893 if (GET_CODE (op) != CONST_VECTOR)
894 for (i = 0; i < n_elts; i++)
895 RTVEC_ELT (v, i) = op;
896 else
898 enum machine_mode inmode = GET_MODE (op);
899 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
900 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
902 gcc_assert (in_n_elts < n_elts);
903 gcc_assert ((n_elts % in_n_elts) == 0);
904 for (i = 0; i < n_elts; i++)
905 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
907 return gen_rtx_CONST_VECTOR (mode, v);
911 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
913 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
914 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
915 enum machine_mode opmode = GET_MODE (op);
916 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
917 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
918 rtvec v = rtvec_alloc (n_elts);
919 unsigned int i;
921 gcc_assert (op_n_elts == n_elts);
922 for (i = 0; i < n_elts; i++)
924 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
925 CONST_VECTOR_ELT (op, i),
926 GET_MODE_INNER (opmode));
927 if (!x)
928 return 0;
929 RTVEC_ELT (v, i) = x;
931 return gen_rtx_CONST_VECTOR (mode, v);
934 /* The order of these tests is critical so that, for example, we don't
935 check the wrong mode (input vs. output) for a conversion operation,
936 such as FIX. At some point, this should be simplified. */
938 if (code == FLOAT && GET_MODE (op) == VOIDmode
939 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
941 HOST_WIDE_INT hv, lv;
942 REAL_VALUE_TYPE d;
944 if (GET_CODE (op) == CONST_INT)
945 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
946 else
947 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
949 REAL_VALUE_FROM_INT (d, lv, hv, mode);
950 d = real_value_truncate (mode, d);
951 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
953 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
954 && (GET_CODE (op) == CONST_DOUBLE
955 || GET_CODE (op) == CONST_INT))
957 HOST_WIDE_INT hv, lv;
958 REAL_VALUE_TYPE d;
960 if (GET_CODE (op) == CONST_INT)
961 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
962 else
963 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
965 if (op_mode == VOIDmode)
967 /* We don't know how to interpret negative-looking numbers in
968 this case, so don't try to fold those. */
969 if (hv < 0)
970 return 0;
972 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
974 else
975 hv = 0, lv &= GET_MODE_MASK (op_mode);
977 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
978 d = real_value_truncate (mode, d);
979 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
982 if (GET_CODE (op) == CONST_INT
983 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
985 HOST_WIDE_INT arg0 = INTVAL (op);
986 HOST_WIDE_INT val;
988 switch (code)
990 case NOT:
991 val = ~ arg0;
992 break;
994 case NEG:
995 val = - arg0;
996 break;
998 case ABS:
999 val = (arg0 >= 0 ? arg0 : - arg0);
1000 break;
1002 case FFS:
1003 /* Don't use ffs here. Instead, get low order bit and then its
1004 number. If arg0 is zero, this will return 0, as desired. */
1005 arg0 &= GET_MODE_MASK (mode);
1006 val = exact_log2 (arg0 & (- arg0)) + 1;
1007 break;
1009 case CLZ:
1010 arg0 &= GET_MODE_MASK (mode);
1011 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1013 else
1014 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1015 break;
1017 case CTZ:
1018 arg0 &= GET_MODE_MASK (mode);
1019 if (arg0 == 0)
1021 /* Even if the value at zero is undefined, we have to come
1022 up with some replacement. Seems good enough. */
1023 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1024 val = GET_MODE_BITSIZE (mode);
1026 else
1027 val = exact_log2 (arg0 & -arg0);
1028 break;
1030 case POPCOUNT:
1031 arg0 &= GET_MODE_MASK (mode);
1032 val = 0;
1033 while (arg0)
1034 val++, arg0 &= arg0 - 1;
1035 break;
1037 case PARITY:
1038 arg0 &= GET_MODE_MASK (mode);
1039 val = 0;
1040 while (arg0)
1041 val++, arg0 &= arg0 - 1;
1042 val &= 1;
1043 break;
1045 case TRUNCATE:
1046 val = arg0;
1047 break;
1049 case ZERO_EXTEND:
1050 /* When zero-extending a CONST_INT, we need to know its
1051 original mode. */
1052 gcc_assert (op_mode != VOIDmode);
1053 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1055 /* If we were really extending the mode,
1056 we would have to distinguish between zero-extension
1057 and sign-extension. */
1058 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1059 val = arg0;
1061 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1062 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1063 else
1064 return 0;
1065 break;
1067 case SIGN_EXTEND:
1068 if (op_mode == VOIDmode)
1069 op_mode = mode;
1070 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1072 /* If we were really extending the mode,
1073 we would have to distinguish between zero-extension
1074 and sign-extension. */
1075 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1076 val = arg0;
1078 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1081 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1082 if (val
1083 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1084 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1086 else
1087 return 0;
1088 break;
1090 case SQRT:
1091 case FLOAT_EXTEND:
1092 case FLOAT_TRUNCATE:
1093 case SS_TRUNCATE:
1094 case US_TRUNCATE:
1095 return 0;
1097 default:
1098 gcc_unreachable ();
1101 return gen_int_mode (val, mode);
1104 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1105 for a DImode operation on a CONST_INT. */
1106 else if (GET_MODE (op) == VOIDmode
1107 && width <= HOST_BITS_PER_WIDE_INT * 2
1108 && (GET_CODE (op) == CONST_DOUBLE
1109 || GET_CODE (op) == CONST_INT))
1111 unsigned HOST_WIDE_INT l1, lv;
1112 HOST_WIDE_INT h1, hv;
1114 if (GET_CODE (op) == CONST_DOUBLE)
1115 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1116 else
1117 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1119 switch (code)
1121 case NOT:
1122 lv = ~ l1;
1123 hv = ~ h1;
1124 break;
1126 case NEG:
1127 neg_double (l1, h1, &lv, &hv);
1128 break;
1130 case ABS:
1131 if (h1 < 0)
1132 neg_double (l1, h1, &lv, &hv);
1133 else
1134 lv = l1, hv = h1;
1135 break;
1137 case FFS:
1138 hv = 0;
1139 if (l1 == 0)
1141 if (h1 == 0)
1142 lv = 0;
1143 else
1144 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1146 else
1147 lv = exact_log2 (l1 & -l1) + 1;
1148 break;
1150 case CLZ:
1151 hv = 0;
1152 if (h1 != 0)
1153 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1154 - HOST_BITS_PER_WIDE_INT;
1155 else if (l1 != 0)
1156 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1157 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1158 lv = GET_MODE_BITSIZE (mode);
1159 break;
1161 case CTZ:
1162 hv = 0;
1163 if (l1 != 0)
1164 lv = exact_log2 (l1 & -l1);
1165 else if (h1 != 0)
1166 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1167 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1168 lv = GET_MODE_BITSIZE (mode);
1169 break;
1171 case POPCOUNT:
1172 hv = 0;
1173 lv = 0;
1174 while (l1)
1175 lv++, l1 &= l1 - 1;
1176 while (h1)
1177 lv++, h1 &= h1 - 1;
1178 break;
1180 case PARITY:
1181 hv = 0;
1182 lv = 0;
1183 while (l1)
1184 lv++, l1 &= l1 - 1;
1185 while (h1)
1186 lv++, h1 &= h1 - 1;
1187 lv &= 1;
1188 break;
1190 case TRUNCATE:
1191 /* This is just a change-of-mode, so do nothing. */
1192 lv = l1, hv = h1;
1193 break;
1195 case ZERO_EXTEND:
1196 gcc_assert (op_mode != VOIDmode);
1198 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1199 return 0;
1201 hv = 0;
1202 lv = l1 & GET_MODE_MASK (op_mode);
1203 break;
1205 case SIGN_EXTEND:
1206 if (op_mode == VOIDmode
1207 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1208 return 0;
1209 else
1211 lv = l1 & GET_MODE_MASK (op_mode);
1212 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1213 && (lv & ((HOST_WIDE_INT) 1
1214 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1215 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1217 hv = HWI_SIGN_EXTEND (lv);
1219 break;
1221 case SQRT:
1222 return 0;
1224 default:
1225 return 0;
1228 return immed_double_const (lv, hv, mode);
1231 else if (GET_CODE (op) == CONST_DOUBLE
1232 && SCALAR_FLOAT_MODE_P (mode))
1234 REAL_VALUE_TYPE d, t;
1235 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1237 switch (code)
1239 case SQRT:
1240 if (HONOR_SNANS (mode) && real_isnan (&d))
1241 return 0;
1242 real_sqrt (&t, mode, &d);
1243 d = t;
1244 break;
1245 case ABS:
1246 d = REAL_VALUE_ABS (d);
1247 break;
1248 case NEG:
1249 d = REAL_VALUE_NEGATE (d);
1250 break;
1251 case FLOAT_TRUNCATE:
1252 d = real_value_truncate (mode, d);
1253 break;
1254 case FLOAT_EXTEND:
1255 /* All this does is change the mode. */
1256 break;
1257 case FIX:
1258 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1259 break;
1260 case NOT:
1262 long tmp[4];
1263 int i;
1265 real_to_target (tmp, &d, GET_MODE (op));
1266 for (i = 0; i < 4; i++)
1267 tmp[i] = ~tmp[i];
1268 real_from_target (&d, tmp, mode);
1269 break;
1271 default:
1272 gcc_unreachable ();
1274 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1277 else if (GET_CODE (op) == CONST_DOUBLE
1278 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1279 && GET_MODE_CLASS (mode) == MODE_INT
1280 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1282 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1283 operators are intentionally left unspecified (to ease implementation
1284 by target backends), for consistency, this routine implements the
1285 same semantics for constant folding as used by the middle-end. */
1287 /* This was formerly used only for non-IEEE float.
1288 eggert@twinsun.com says it is safe for IEEE also. */
1289 HOST_WIDE_INT xh, xl, th, tl;
1290 REAL_VALUE_TYPE x, t;
1291 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1292 switch (code)
1294 case FIX:
1295 if (REAL_VALUE_ISNAN (x))
1296 return const0_rtx;
1298 /* Test against the signed upper bound. */
1299 if (width > HOST_BITS_PER_WIDE_INT)
1301 th = ((unsigned HOST_WIDE_INT) 1
1302 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1303 tl = -1;
1305 else
1307 th = 0;
1308 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1310 real_from_integer (&t, VOIDmode, tl, th, 0);
1311 if (REAL_VALUES_LESS (t, x))
1313 xh = th;
1314 xl = tl;
1315 break;
1318 /* Test against the signed lower bound. */
1319 if (width > HOST_BITS_PER_WIDE_INT)
1321 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1322 tl = 0;
1324 else
1326 th = -1;
1327 tl = (HOST_WIDE_INT) -1 << (width - 1);
1329 real_from_integer (&t, VOIDmode, tl, th, 0);
1330 if (REAL_VALUES_LESS (x, t))
1332 xh = th;
1333 xl = tl;
1334 break;
1336 REAL_VALUE_TO_INT (&xl, &xh, x);
1337 break;
1339 case UNSIGNED_FIX:
1340 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1341 return const0_rtx;
1343 /* Test against the unsigned upper bound. */
1344 if (width == 2*HOST_BITS_PER_WIDE_INT)
1346 th = -1;
1347 tl = -1;
1349 else if (width >= HOST_BITS_PER_WIDE_INT)
1351 th = ((unsigned HOST_WIDE_INT) 1
1352 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1353 tl = -1;
1355 else
1357 th = 0;
1358 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1360 real_from_integer (&t, VOIDmode, tl, th, 1);
1361 if (REAL_VALUES_LESS (t, x))
1363 xh = th;
1364 xl = tl;
1365 break;
1368 REAL_VALUE_TO_INT (&xl, &xh, x);
1369 break;
1371 default:
1372 gcc_unreachable ();
1374 return immed_double_const (xl, xh, mode);
1377 return NULL_RTX;
1380 /* Subroutine of simplify_binary_operation to simplify a commutative,
1381 associative binary operation CODE with result mode MODE, operating
1382 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1383 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1384 canonicalization is possible. */
1386 static rtx
1387 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1388 rtx op0, rtx op1)
1390 rtx tem;
1392 /* Linearize the operator to the left. */
1393 if (GET_CODE (op1) == code)
1395 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1396 if (GET_CODE (op0) == code)
1398 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1399 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1402 /* "a op (b op c)" becomes "(b op c) op a". */
1403 if (! swap_commutative_operands_p (op1, op0))
1404 return simplify_gen_binary (code, mode, op1, op0);
1406 tem = op0;
1407 op0 = op1;
1408 op1 = tem;
1411 if (GET_CODE (op0) == code)
1413 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1414 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1416 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1417 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1420 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1421 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1422 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1423 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1424 if (tem != 0)
1425 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1427 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1428 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1429 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1430 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1431 if (tem != 0)
1432 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1435 return 0;
1439 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1440 and OP1. Return 0 if no simplification is possible.
1442 Don't use this for relational operations such as EQ or LT.
1443 Use simplify_relational_operation instead. */
1445 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1446 rtx op0, rtx op1)
1448 rtx trueop0, trueop1;
1449 rtx tem;
1451 /* Relational operations don't work here. We must know the mode
1452 of the operands in order to do the comparison correctly.
1453 Assuming a full word can give incorrect results.
1454 Consider comparing 128 with -128 in QImode. */
1455 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1456 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1458 /* Make sure the constant is second. */
1459 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1460 && swap_commutative_operands_p (op0, op1))
1462 tem = op0, op0 = op1, op1 = tem;
1465 trueop0 = avoid_constant_pool_reference (op0);
1466 trueop1 = avoid_constant_pool_reference (op1);
1468 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1469 if (tem)
1470 return tem;
1471 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1474 static rtx
1475 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1476 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1478 rtx tem, reversed, opleft, opright;
1479 HOST_WIDE_INT val;
1480 unsigned int width = GET_MODE_BITSIZE (mode);
1482 /* Even if we can't compute a constant result,
1483 there are some cases worth simplifying. */
1485 switch (code)
1487 case PLUS:
1488 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1489 when x is NaN, infinite, or finite and nonzero. They aren't
1490 when x is -0 and the rounding mode is not towards -infinity,
1491 since (-0) + 0 is then 0. */
1492 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1493 return op0;
1495 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1496 transformations are safe even for IEEE. */
1497 if (GET_CODE (op0) == NEG)
1498 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1499 else if (GET_CODE (op1) == NEG)
1500 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1502 /* (~a) + 1 -> -a */
1503 if (INTEGRAL_MODE_P (mode)
1504 && GET_CODE (op0) == NOT
1505 && trueop1 == const1_rtx)
1506 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1508 /* Handle both-operands-constant cases. We can only add
1509 CONST_INTs to constants since the sum of relocatable symbols
1510 can't be handled by most assemblers. Don't add CONST_INT
1511 to CONST_INT since overflow won't be computed properly if wider
1512 than HOST_BITS_PER_WIDE_INT. */
1514 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1515 && GET_CODE (op1) == CONST_INT)
1516 return plus_constant (op0, INTVAL (op1));
1517 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1518 && GET_CODE (op0) == CONST_INT)
1519 return plus_constant (op1, INTVAL (op0));
1521 /* See if this is something like X * C - X or vice versa or
1522 if the multiplication is written as a shift. If so, we can
1523 distribute and make a new multiply, shift, or maybe just
1524 have X (if C is 2 in the example above). But don't make
1525 something more expensive than we had before. */
1527 if (SCALAR_INT_MODE_P (mode))
1529 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1530 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1531 rtx lhs = op0, rhs = op1;
1533 if (GET_CODE (lhs) == NEG)
1535 coeff0l = -1;
1536 coeff0h = -1;
1537 lhs = XEXP (lhs, 0);
1539 else if (GET_CODE (lhs) == MULT
1540 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1542 coeff0l = INTVAL (XEXP (lhs, 1));
1543 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1544 lhs = XEXP (lhs, 0);
1546 else if (GET_CODE (lhs) == ASHIFT
1547 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1548 && INTVAL (XEXP (lhs, 1)) >= 0
1549 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1551 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1552 coeff0h = 0;
1553 lhs = XEXP (lhs, 0);
1556 if (GET_CODE (rhs) == NEG)
1558 coeff1l = -1;
1559 coeff1h = -1;
1560 rhs = XEXP (rhs, 0);
1562 else if (GET_CODE (rhs) == MULT
1563 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1565 coeff1l = INTVAL (XEXP (rhs, 1));
1566 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1567 rhs = XEXP (rhs, 0);
1569 else if (GET_CODE (rhs) == ASHIFT
1570 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1571 && INTVAL (XEXP (rhs, 1)) >= 0
1572 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1574 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1575 coeff1h = 0;
1576 rhs = XEXP (rhs, 0);
1579 if (rtx_equal_p (lhs, rhs))
1581 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1582 rtx coeff;
1583 unsigned HOST_WIDE_INT l;
1584 HOST_WIDE_INT h;
1586 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1587 coeff = immed_double_const (l, h, mode);
1589 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1590 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1591 ? tem : 0;
1595 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1596 if ((GET_CODE (op1) == CONST_INT
1597 || GET_CODE (op1) == CONST_DOUBLE)
1598 && GET_CODE (op0) == XOR
1599 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1600 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1601 && mode_signbit_p (mode, op1))
1602 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1603 simplify_gen_binary (XOR, mode, op1,
1604 XEXP (op0, 1)));
1606 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1607 if (GET_CODE (op0) == MULT
1608 && GET_CODE (XEXP (op0, 0)) == NEG)
1610 rtx in1, in2;
1612 in1 = XEXP (XEXP (op0, 0), 0);
1613 in2 = XEXP (op0, 1);
1614 return simplify_gen_binary (MINUS, mode, op1,
1615 simplify_gen_binary (MULT, mode,
1616 in1, in2));
1619 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1620 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1621 is 1. */
1622 if (COMPARISON_P (op0)
1623 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1624 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1625 && (reversed = reversed_comparison (op0, mode)))
1626 return
1627 simplify_gen_unary (NEG, mode, reversed, mode);
1629 /* If one of the operands is a PLUS or a MINUS, see if we can
1630 simplify this by the associative law.
1631 Don't use the associative law for floating point.
1632 The inaccuracy makes it nonassociative,
1633 and subtle programs can break if operations are associated. */
1635 if (INTEGRAL_MODE_P (mode)
1636 && (plus_minus_operand_p (op0)
1637 || plus_minus_operand_p (op1))
1638 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1639 return tem;
1641 /* Reassociate floating point addition only when the user
1642 specifies unsafe math optimizations. */
1643 if (FLOAT_MODE_P (mode)
1644 && flag_unsafe_math_optimizations)
1646 tem = simplify_associative_operation (code, mode, op0, op1);
1647 if (tem)
1648 return tem;
1650 break;
1652 case COMPARE:
1653 #ifdef HAVE_cc0
1654 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1655 using cc0, in which case we want to leave it as a COMPARE
1656 so we can distinguish it from a register-register-copy.
1658 In IEEE floating point, x-0 is not the same as x. */
1660 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1661 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1662 && trueop1 == CONST0_RTX (mode))
1663 return op0;
1664 #endif
1666 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1667 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1668 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1669 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1671 rtx xop00 = XEXP (op0, 0);
1672 rtx xop10 = XEXP (op1, 0);
1674 #ifdef HAVE_cc0
1675 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1676 #else
1677 if (REG_P (xop00) && REG_P (xop10)
1678 && GET_MODE (xop00) == GET_MODE (xop10)
1679 && REGNO (xop00) == REGNO (xop10)
1680 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1681 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1682 #endif
1683 return xop00;
1685 break;
1687 case MINUS:
1688 /* We can't assume x-x is 0 even with non-IEEE floating point,
1689 but since it is zero except in very strange circumstances, we
1690 will treat it as zero with -funsafe-math-optimizations. */
1691 if (rtx_equal_p (trueop0, trueop1)
1692 && ! side_effects_p (op0)
1693 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1694 return CONST0_RTX (mode);
1696 /* Change subtraction from zero into negation. (0 - x) is the
1697 same as -x when x is NaN, infinite, or finite and nonzero.
1698 But if the mode has signed zeros, and does not round towards
1699 -infinity, then 0 - 0 is 0, not -0. */
1700 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1701 return simplify_gen_unary (NEG, mode, op1, mode);
1703 /* (-1 - a) is ~a. */
1704 if (trueop0 == constm1_rtx)
1705 return simplify_gen_unary (NOT, mode, op1, mode);
1707 /* Subtracting 0 has no effect unless the mode has signed zeros
1708 and supports rounding towards -infinity. In such a case,
1709 0 - 0 is -0. */
1710 if (!(HONOR_SIGNED_ZEROS (mode)
1711 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1712 && trueop1 == CONST0_RTX (mode))
1713 return op0;
1715 /* See if this is something like X * C - X or vice versa or
1716 if the multiplication is written as a shift. If so, we can
1717 distribute and make a new multiply, shift, or maybe just
1718 have X (if C is 2 in the example above). But don't make
1719 something more expensive than we had before. */
1721 if (SCALAR_INT_MODE_P (mode))
1723 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1724 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1725 rtx lhs = op0, rhs = op1;
1727 if (GET_CODE (lhs) == NEG)
1729 coeff0l = -1;
1730 coeff0h = -1;
1731 lhs = XEXP (lhs, 0);
1733 else if (GET_CODE (lhs) == MULT
1734 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1736 coeff0l = INTVAL (XEXP (lhs, 1));
1737 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1738 lhs = XEXP (lhs, 0);
1740 else if (GET_CODE (lhs) == ASHIFT
1741 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1742 && INTVAL (XEXP (lhs, 1)) >= 0
1743 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1745 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1746 coeff0h = 0;
1747 lhs = XEXP (lhs, 0);
1750 if (GET_CODE (rhs) == NEG)
1752 negcoeff1l = 1;
1753 negcoeff1h = 0;
1754 rhs = XEXP (rhs, 0);
1756 else if (GET_CODE (rhs) == MULT
1757 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1759 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1760 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1761 rhs = XEXP (rhs, 0);
1763 else if (GET_CODE (rhs) == ASHIFT
1764 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1765 && INTVAL (XEXP (rhs, 1)) >= 0
1766 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1768 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1769 negcoeff1h = -1;
1770 rhs = XEXP (rhs, 0);
1773 if (rtx_equal_p (lhs, rhs))
1775 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1776 rtx coeff;
1777 unsigned HOST_WIDE_INT l;
1778 HOST_WIDE_INT h;
1780 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1781 coeff = immed_double_const (l, h, mode);
1783 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1784 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1785 ? tem : 0;
1789 /* (a - (-b)) -> (a + b). True even for IEEE. */
1790 if (GET_CODE (op1) == NEG)
1791 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1793 /* (-x - c) may be simplified as (-c - x). */
1794 if (GET_CODE (op0) == NEG
1795 && (GET_CODE (op1) == CONST_INT
1796 || GET_CODE (op1) == CONST_DOUBLE))
1798 tem = simplify_unary_operation (NEG, mode, op1, mode);
1799 if (tem)
1800 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1803 /* Don't let a relocatable value get a negative coeff. */
1804 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1805 return simplify_gen_binary (PLUS, mode,
1806 op0,
1807 neg_const_int (mode, op1));
1809 /* (x - (x & y)) -> (x & ~y) */
1810 if (GET_CODE (op1) == AND)
1812 if (rtx_equal_p (op0, XEXP (op1, 0)))
1814 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1815 GET_MODE (XEXP (op1, 1)));
1816 return simplify_gen_binary (AND, mode, op0, tem);
1818 if (rtx_equal_p (op0, XEXP (op1, 1)))
1820 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1821 GET_MODE (XEXP (op1, 0)));
1822 return simplify_gen_binary (AND, mode, op0, tem);
1826 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1827 by reversing the comparison code if valid. */
1828 if (STORE_FLAG_VALUE == 1
1829 && trueop0 == const1_rtx
1830 && COMPARISON_P (op1)
1831 && (reversed = reversed_comparison (op1, mode)))
1832 return reversed;
1834 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
1835 if (GET_CODE (op1) == MULT
1836 && GET_CODE (XEXP (op1, 0)) == NEG)
1838 rtx in1, in2;
1840 in1 = XEXP (XEXP (op1, 0), 0);
1841 in2 = XEXP (op1, 1);
1842 return simplify_gen_binary (PLUS, mode,
1843 simplify_gen_binary (MULT, mode,
1844 in1, in2),
1845 op0);
1848 /* Canonicalize (minus (neg A) (mult B C)) to
1849 (minus (mult (neg B) C) A). */
1850 if (GET_CODE (op1) == MULT
1851 && GET_CODE (op0) == NEG)
1853 rtx in1, in2;
1855 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
1856 in2 = XEXP (op1, 1);
1857 return simplify_gen_binary (MINUS, mode,
1858 simplify_gen_binary (MULT, mode,
1859 in1, in2),
1860 XEXP (op0, 0));
1863 /* If one of the operands is a PLUS or a MINUS, see if we can
1864 simplify this by the associative law. This will, for example,
1865 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1866 Don't use the associative law for floating point.
1867 The inaccuracy makes it nonassociative,
1868 and subtle programs can break if operations are associated. */
1870 if (INTEGRAL_MODE_P (mode)
1871 && (plus_minus_operand_p (op0)
1872 || plus_minus_operand_p (op1))
1873 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1874 return tem;
1875 break;
1877 case MULT:
1878 if (trueop1 == constm1_rtx)
1879 return simplify_gen_unary (NEG, mode, op0, mode);
1881 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1882 x is NaN, since x * 0 is then also NaN. Nor is it valid
1883 when the mode has signed zeros, since multiplying a negative
1884 number by 0 will give -0, not 0. */
1885 if (!HONOR_NANS (mode)
1886 && !HONOR_SIGNED_ZEROS (mode)
1887 && trueop1 == CONST0_RTX (mode)
1888 && ! side_effects_p (op0))
1889 return op1;
1891 /* In IEEE floating point, x*1 is not equivalent to x for
1892 signalling NaNs. */
1893 if (!HONOR_SNANS (mode)
1894 && trueop1 == CONST1_RTX (mode))
1895 return op0;
1897 /* Convert multiply by constant power of two into shift unless
1898 we are still generating RTL. This test is a kludge. */
1899 if (GET_CODE (trueop1) == CONST_INT
1900 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1901 /* If the mode is larger than the host word size, and the
1902 uppermost bit is set, then this isn't a power of two due
1903 to implicit sign extension. */
1904 && (width <= HOST_BITS_PER_WIDE_INT
1905 || val != HOST_BITS_PER_WIDE_INT - 1))
1906 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1908 /* Likewise for multipliers wider than a word. */
1909 else if (GET_CODE (trueop1) == CONST_DOUBLE
1910 && (GET_MODE (trueop1) == VOIDmode
1911 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
1912 && GET_MODE (op0) == mode
1913 && CONST_DOUBLE_LOW (trueop1) == 0
1914 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
1915 return simplify_gen_binary (ASHIFT, mode, op0,
1916 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
1918 /* x*2 is x+x and x*(-1) is -x */
1919 if (GET_CODE (trueop1) == CONST_DOUBLE
1920 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
1921 && GET_MODE (op0) == mode)
1923 REAL_VALUE_TYPE d;
1924 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1926 if (REAL_VALUES_EQUAL (d, dconst2))
1927 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1929 if (REAL_VALUES_EQUAL (d, dconstm1))
1930 return simplify_gen_unary (NEG, mode, op0, mode);
1933 /* Reassociate multiplication, but for floating point MULTs
1934 only when the user specifies unsafe math optimizations. */
1935 if (! FLOAT_MODE_P (mode)
1936 || flag_unsafe_math_optimizations)
1938 tem = simplify_associative_operation (code, mode, op0, op1);
1939 if (tem)
1940 return tem;
1942 break;
1944 case IOR:
1945 if (trueop1 == const0_rtx)
1946 return op0;
1947 if (GET_CODE (trueop1) == CONST_INT
1948 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1949 == GET_MODE_MASK (mode)))
1950 return op1;
1951 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1952 return op0;
1953 /* A | (~A) -> -1 */
1954 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1955 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1956 && ! side_effects_p (op0)
1957 && SCALAR_INT_MODE_P (mode))
1958 return constm1_rtx;
1960 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
1961 if (GET_CODE (op1) == CONST_INT
1962 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1963 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
1964 return op1;
1966 /* Convert (A & B) | A to A. */
1967 if (GET_CODE (op0) == AND
1968 && (rtx_equal_p (XEXP (op0, 0), op1)
1969 || rtx_equal_p (XEXP (op0, 1), op1))
1970 && ! side_effects_p (XEXP (op0, 0))
1971 && ! side_effects_p (XEXP (op0, 1)))
1972 return op1;
1974 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
1975 mode size to (rotate A CX). */
1977 if (GET_CODE (op1) == ASHIFT
1978 || GET_CODE (op1) == SUBREG)
1980 opleft = op1;
1981 opright = op0;
1983 else
1985 opright = op1;
1986 opleft = op0;
1989 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
1990 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
1991 && GET_CODE (XEXP (opleft, 1)) == CONST_INT
1992 && GET_CODE (XEXP (opright, 1)) == CONST_INT
1993 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
1994 == GET_MODE_BITSIZE (mode)))
1995 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
1997 /* Same, but for ashift that has been "simplified" to a wider mode
1998 by simplify_shift_const. */
2000 if (GET_CODE (opleft) == SUBREG
2001 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2002 && GET_CODE (opright) == LSHIFTRT
2003 && GET_CODE (XEXP (opright, 0)) == SUBREG
2004 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2005 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2006 && (GET_MODE_SIZE (GET_MODE (opleft))
2007 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2008 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2009 SUBREG_REG (XEXP (opright, 0)))
2010 && GET_CODE (XEXP (SUBREG_REG (opleft), 1)) == CONST_INT
2011 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2012 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2013 == GET_MODE_BITSIZE (mode)))
2014 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2015 XEXP (SUBREG_REG (opleft), 1));
2017 /* If we have (ior (and (X C1) C2)), simplify this by making
2018 C1 as small as possible if C1 actually changes. */
2019 if (GET_CODE (op1) == CONST_INT
2020 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2021 || INTVAL (op1) > 0)
2022 && GET_CODE (op0) == AND
2023 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2024 && GET_CODE (op1) == CONST_INT
2025 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2026 return simplify_gen_binary (IOR, mode,
2027 simplify_gen_binary
2028 (AND, mode, XEXP (op0, 0),
2029 GEN_INT (INTVAL (XEXP (op0, 1))
2030 & ~INTVAL (op1))),
2031 op1);
2033 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2034 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2035 the PLUS does not affect any of the bits in OP1: then we can do
2036 the IOR as a PLUS and we can associate. This is valid if OP1
2037 can be safely shifted left C bits. */
2038 if (GET_CODE (trueop1) == CONST_INT && GET_CODE (op0) == ASHIFTRT
2039 && GET_CODE (XEXP (op0, 0)) == PLUS
2040 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
2041 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2042 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2044 int count = INTVAL (XEXP (op0, 1));
2045 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2047 if (mask >> count == INTVAL (trueop1)
2048 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2049 return simplify_gen_binary (ASHIFTRT, mode,
2050 plus_constant (XEXP (op0, 0), mask),
2051 XEXP (op0, 1));
2054 tem = simplify_associative_operation (code, mode, op0, op1);
2055 if (tem)
2056 return tem;
2057 break;
2059 case XOR:
2060 if (trueop1 == const0_rtx)
2061 return op0;
2062 if (GET_CODE (trueop1) == CONST_INT
2063 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2064 == GET_MODE_MASK (mode)))
2065 return simplify_gen_unary (NOT, mode, op0, mode);
2066 if (rtx_equal_p (trueop0, trueop1)
2067 && ! side_effects_p (op0)
2068 && GET_MODE_CLASS (mode) != MODE_CC)
2069 return CONST0_RTX (mode);
2071 /* Canonicalize XOR of the most significant bit to PLUS. */
2072 if ((GET_CODE (op1) == CONST_INT
2073 || GET_CODE (op1) == CONST_DOUBLE)
2074 && mode_signbit_p (mode, op1))
2075 return simplify_gen_binary (PLUS, mode, op0, op1);
2076 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2077 if ((GET_CODE (op1) == CONST_INT
2078 || GET_CODE (op1) == CONST_DOUBLE)
2079 && GET_CODE (op0) == PLUS
2080 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
2081 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2082 && mode_signbit_p (mode, XEXP (op0, 1)))
2083 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2084 simplify_gen_binary (XOR, mode, op1,
2085 XEXP (op0, 1)));
2087 /* If we are XORing two things that have no bits in common,
2088 convert them into an IOR. This helps to detect rotation encoded
2089 using those methods and possibly other simplifications. */
2091 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2092 && (nonzero_bits (op0, mode)
2093 & nonzero_bits (op1, mode)) == 0)
2094 return (simplify_gen_binary (IOR, mode, op0, op1));
2096 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2097 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2098 (NOT y). */
2100 int num_negated = 0;
2102 if (GET_CODE (op0) == NOT)
2103 num_negated++, op0 = XEXP (op0, 0);
2104 if (GET_CODE (op1) == NOT)
2105 num_negated++, op1 = XEXP (op1, 0);
2107 if (num_negated == 2)
2108 return simplify_gen_binary (XOR, mode, op0, op1);
2109 else if (num_negated == 1)
2110 return simplify_gen_unary (NOT, mode,
2111 simplify_gen_binary (XOR, mode, op0, op1),
2112 mode);
2115 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2116 correspond to a machine insn or result in further simplifications
2117 if B is a constant. */
2119 if (GET_CODE (op0) == AND
2120 && rtx_equal_p (XEXP (op0, 1), op1)
2121 && ! side_effects_p (op1))
2122 return simplify_gen_binary (AND, mode,
2123 simplify_gen_unary (NOT, mode,
2124 XEXP (op0, 0), mode),
2125 op1);
2127 else if (GET_CODE (op0) == AND
2128 && rtx_equal_p (XEXP (op0, 0), op1)
2129 && ! side_effects_p (op1))
2130 return simplify_gen_binary (AND, mode,
2131 simplify_gen_unary (NOT, mode,
2132 XEXP (op0, 1), mode),
2133 op1);
2135 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2136 comparison if STORE_FLAG_VALUE is 1. */
2137 if (STORE_FLAG_VALUE == 1
2138 && trueop1 == const1_rtx
2139 && COMPARISON_P (op0)
2140 && (reversed = reversed_comparison (op0, mode)))
2141 return reversed;
2143 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2144 is (lt foo (const_int 0)), so we can perform the above
2145 simplification if STORE_FLAG_VALUE is 1. */
2147 if (STORE_FLAG_VALUE == 1
2148 && trueop1 == const1_rtx
2149 && GET_CODE (op0) == LSHIFTRT
2150 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2151 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2152 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2154 /* (xor (comparison foo bar) (const_int sign-bit))
2155 when STORE_FLAG_VALUE is the sign bit. */
2156 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2157 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2158 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2159 && trueop1 == const_true_rtx
2160 && COMPARISON_P (op0)
2161 && (reversed = reversed_comparison (op0, mode)))
2162 return reversed;
2164 break;
2166 tem = simplify_associative_operation (code, mode, op0, op1);
2167 if (tem)
2168 return tem;
2169 break;
2171 case AND:
2172 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2173 return trueop1;
2174 /* If we are turning off bits already known off in OP0, we need
2175 not do an AND. */
2176 if (GET_CODE (trueop1) == CONST_INT
2177 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2178 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
2179 return op0;
2180 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2181 && GET_MODE_CLASS (mode) != MODE_CC)
2182 return op0;
2183 /* A & (~A) -> 0 */
2184 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2185 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2186 && ! side_effects_p (op0)
2187 && GET_MODE_CLASS (mode) != MODE_CC)
2188 return CONST0_RTX (mode);
2190 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2191 there are no nonzero bits of C outside of X's mode. */
2192 if ((GET_CODE (op0) == SIGN_EXTEND
2193 || GET_CODE (op0) == ZERO_EXTEND)
2194 && GET_CODE (trueop1) == CONST_INT
2195 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2196 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2197 & INTVAL (trueop1)) == 0)
2199 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2200 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2201 gen_int_mode (INTVAL (trueop1),
2202 imode));
2203 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2206 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2207 insn (and may simplify more). */
2208 if (GET_CODE (op0) == XOR
2209 && rtx_equal_p (XEXP (op0, 0), op1)
2210 && ! side_effects_p (op1))
2211 return simplify_gen_binary (AND, mode,
2212 simplify_gen_unary (NOT, mode,
2213 XEXP (op0, 1), mode),
2214 op1);
2216 if (GET_CODE (op0) == XOR
2217 && rtx_equal_p (XEXP (op0, 1), op1)
2218 && ! side_effects_p (op1))
2219 return simplify_gen_binary (AND, mode,
2220 simplify_gen_unary (NOT, mode,
2221 XEXP (op0, 0), mode),
2222 op1);
2224 /* Similarly for (~(A ^ B)) & A. */
2225 if (GET_CODE (op0) == NOT
2226 && GET_CODE (XEXP (op0, 0)) == XOR
2227 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2228 && ! side_effects_p (op1))
2229 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2231 if (GET_CODE (op0) == NOT
2232 && GET_CODE (XEXP (op0, 0)) == XOR
2233 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2234 && ! side_effects_p (op1))
2235 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2237 /* Convert (A | B) & A to A. */
2238 if (GET_CODE (op0) == IOR
2239 && (rtx_equal_p (XEXP (op0, 0), op1)
2240 || rtx_equal_p (XEXP (op0, 1), op1))
2241 && ! side_effects_p (XEXP (op0, 0))
2242 && ! side_effects_p (XEXP (op0, 1)))
2243 return op1;
2245 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2246 ((A & N) + B) & M -> (A + B) & M
2247 Similarly if (N & M) == 0,
2248 ((A | N) + B) & M -> (A + B) & M
2249 and for - instead of + and/or ^ instead of |. */
2250 if (GET_CODE (trueop1) == CONST_INT
2251 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2252 && ~INTVAL (trueop1)
2253 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2254 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2256 rtx pmop[2];
2257 int which;
2259 pmop[0] = XEXP (op0, 0);
2260 pmop[1] = XEXP (op0, 1);
2262 for (which = 0; which < 2; which++)
2264 tem = pmop[which];
2265 switch (GET_CODE (tem))
2267 case AND:
2268 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2269 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2270 == INTVAL (trueop1))
2271 pmop[which] = XEXP (tem, 0);
2272 break;
2273 case IOR:
2274 case XOR:
2275 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2276 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2277 pmop[which] = XEXP (tem, 0);
2278 break;
2279 default:
2280 break;
2284 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2286 tem = simplify_gen_binary (GET_CODE (op0), mode,
2287 pmop[0], pmop[1]);
2288 return simplify_gen_binary (code, mode, tem, op1);
2291 tem = simplify_associative_operation (code, mode, op0, op1);
2292 if (tem)
2293 return tem;
2294 break;
2296 case UDIV:
2297 /* 0/x is 0 (or x&0 if x has side-effects). */
2298 if (trueop0 == CONST0_RTX (mode))
2300 if (side_effects_p (op1))
2301 return simplify_gen_binary (AND, mode, op1, trueop0);
2302 return trueop0;
2304 /* x/1 is x. */
2305 if (trueop1 == CONST1_RTX (mode))
2306 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2307 /* Convert divide by power of two into shift. */
2308 if (GET_CODE (trueop1) == CONST_INT
2309 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2310 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2311 break;
2313 case DIV:
2314 /* Handle floating point and integers separately. */
2315 if (SCALAR_FLOAT_MODE_P (mode))
2317 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2318 safe for modes with NaNs, since 0.0 / 0.0 will then be
2319 NaN rather than 0.0. Nor is it safe for modes with signed
2320 zeros, since dividing 0 by a negative number gives -0.0 */
2321 if (trueop0 == CONST0_RTX (mode)
2322 && !HONOR_NANS (mode)
2323 && !HONOR_SIGNED_ZEROS (mode)
2324 && ! side_effects_p (op1))
2325 return op0;
2326 /* x/1.0 is x. */
2327 if (trueop1 == CONST1_RTX (mode)
2328 && !HONOR_SNANS (mode))
2329 return op0;
2331 if (GET_CODE (trueop1) == CONST_DOUBLE
2332 && trueop1 != CONST0_RTX (mode))
2334 REAL_VALUE_TYPE d;
2335 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2337 /* x/-1.0 is -x. */
2338 if (REAL_VALUES_EQUAL (d, dconstm1)
2339 && !HONOR_SNANS (mode))
2340 return simplify_gen_unary (NEG, mode, op0, mode);
2342 /* Change FP division by a constant into multiplication.
2343 Only do this with -funsafe-math-optimizations. */
2344 if (flag_unsafe_math_optimizations
2345 && !REAL_VALUES_EQUAL (d, dconst0))
2347 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2348 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2349 return simplify_gen_binary (MULT, mode, op0, tem);
2353 else
2355 /* 0/x is 0 (or x&0 if x has side-effects). */
2356 if (trueop0 == CONST0_RTX (mode))
2358 if (side_effects_p (op1))
2359 return simplify_gen_binary (AND, mode, op1, trueop0);
2360 return trueop0;
2362 /* x/1 is x. */
2363 if (trueop1 == CONST1_RTX (mode))
2364 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2365 /* x/-1 is -x. */
2366 if (trueop1 == constm1_rtx)
2368 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2369 return simplify_gen_unary (NEG, mode, x, mode);
2372 break;
2374 case UMOD:
2375 /* 0%x is 0 (or x&0 if x has side-effects). */
2376 if (trueop0 == CONST0_RTX (mode))
2378 if (side_effects_p (op1))
2379 return simplify_gen_binary (AND, mode, op1, trueop0);
2380 return trueop0;
2382 /* x%1 is 0 (of x&0 if x has side-effects). */
2383 if (trueop1 == CONST1_RTX (mode))
2385 if (side_effects_p (op0))
2386 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2387 return CONST0_RTX (mode);
2389 /* Implement modulus by power of two as AND. */
2390 if (GET_CODE (trueop1) == CONST_INT
2391 && exact_log2 (INTVAL (trueop1)) > 0)
2392 return simplify_gen_binary (AND, mode, op0,
2393 GEN_INT (INTVAL (op1) - 1));
2394 break;
2396 case MOD:
2397 /* 0%x is 0 (or x&0 if x has side-effects). */
2398 if (trueop0 == CONST0_RTX (mode))
2400 if (side_effects_p (op1))
2401 return simplify_gen_binary (AND, mode, op1, trueop0);
2402 return trueop0;
2404 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2405 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2407 if (side_effects_p (op0))
2408 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2409 return CONST0_RTX (mode);
2411 break;
2413 case ROTATERT:
2414 case ROTATE:
2415 case ASHIFTRT:
2416 /* Rotating ~0 always results in ~0. */
2417 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2418 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2419 && ! side_effects_p (op1))
2420 return op0;
2422 /* Fall through.... */
2424 case ASHIFT:
2425 case LSHIFTRT:
2426 if (trueop1 == CONST0_RTX (mode))
2427 return op0;
2428 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2429 return op0;
2430 break;
2432 case SMIN:
2433 if (width <= HOST_BITS_PER_WIDE_INT
2434 && GET_CODE (trueop1) == CONST_INT
2435 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2436 && ! side_effects_p (op0))
2437 return op1;
2438 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2439 return op0;
2440 tem = simplify_associative_operation (code, mode, op0, op1);
2441 if (tem)
2442 return tem;
2443 break;
2445 case SMAX:
2446 if (width <= HOST_BITS_PER_WIDE_INT
2447 && GET_CODE (trueop1) == CONST_INT
2448 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2449 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2450 && ! side_effects_p (op0))
2451 return op1;
2452 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2453 return op0;
2454 tem = simplify_associative_operation (code, mode, op0, op1);
2455 if (tem)
2456 return tem;
2457 break;
2459 case UMIN:
2460 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2461 return op1;
2462 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2463 return op0;
2464 tem = simplify_associative_operation (code, mode, op0, op1);
2465 if (tem)
2466 return tem;
2467 break;
2469 case UMAX:
2470 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2471 return op1;
2472 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2473 return op0;
2474 tem = simplify_associative_operation (code, mode, op0, op1);
2475 if (tem)
2476 return tem;
2477 break;
2479 case SS_PLUS:
2480 case US_PLUS:
2481 case SS_MINUS:
2482 case US_MINUS:
2483 /* ??? There are simplifications that can be done. */
2484 return 0;
2486 case VEC_SELECT:
2487 if (!VECTOR_MODE_P (mode))
2489 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2490 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2491 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2492 gcc_assert (XVECLEN (trueop1, 0) == 1);
2493 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2495 if (GET_CODE (trueop0) == CONST_VECTOR)
2496 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2497 (trueop1, 0, 0)));
2499 else
2501 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2502 gcc_assert (GET_MODE_INNER (mode)
2503 == GET_MODE_INNER (GET_MODE (trueop0)));
2504 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2506 if (GET_CODE (trueop0) == CONST_VECTOR)
2508 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2509 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2510 rtvec v = rtvec_alloc (n_elts);
2511 unsigned int i;
2513 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2514 for (i = 0; i < n_elts; i++)
2516 rtx x = XVECEXP (trueop1, 0, i);
2518 gcc_assert (GET_CODE (x) == CONST_INT);
2519 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2520 INTVAL (x));
2523 return gen_rtx_CONST_VECTOR (mode, v);
2527 if (XVECLEN (trueop1, 0) == 1
2528 && GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT
2529 && GET_CODE (trueop0) == VEC_CONCAT)
2531 rtx vec = trueop0;
2532 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2534 /* Try to find the element in the VEC_CONCAT. */
2535 while (GET_MODE (vec) != mode
2536 && GET_CODE (vec) == VEC_CONCAT)
2538 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2539 if (offset < vec_size)
2540 vec = XEXP (vec, 0);
2541 else
2543 offset -= vec_size;
2544 vec = XEXP (vec, 1);
2546 vec = avoid_constant_pool_reference (vec);
2549 if (GET_MODE (vec) == mode)
2550 return vec;
2553 return 0;
2554 case VEC_CONCAT:
2556 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2557 ? GET_MODE (trueop0)
2558 : GET_MODE_INNER (mode));
2559 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2560 ? GET_MODE (trueop1)
2561 : GET_MODE_INNER (mode));
2563 gcc_assert (VECTOR_MODE_P (mode));
2564 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2565 == GET_MODE_SIZE (mode));
2567 if (VECTOR_MODE_P (op0_mode))
2568 gcc_assert (GET_MODE_INNER (mode)
2569 == GET_MODE_INNER (op0_mode));
2570 else
2571 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2573 if (VECTOR_MODE_P (op1_mode))
2574 gcc_assert (GET_MODE_INNER (mode)
2575 == GET_MODE_INNER (op1_mode));
2576 else
2577 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2579 if ((GET_CODE (trueop0) == CONST_VECTOR
2580 || GET_CODE (trueop0) == CONST_INT
2581 || GET_CODE (trueop0) == CONST_DOUBLE)
2582 && (GET_CODE (trueop1) == CONST_VECTOR
2583 || GET_CODE (trueop1) == CONST_INT
2584 || GET_CODE (trueop1) == CONST_DOUBLE))
2586 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2587 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2588 rtvec v = rtvec_alloc (n_elts);
2589 unsigned int i;
2590 unsigned in_n_elts = 1;
2592 if (VECTOR_MODE_P (op0_mode))
2593 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2594 for (i = 0; i < n_elts; i++)
2596 if (i < in_n_elts)
2598 if (!VECTOR_MODE_P (op0_mode))
2599 RTVEC_ELT (v, i) = trueop0;
2600 else
2601 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2603 else
2605 if (!VECTOR_MODE_P (op1_mode))
2606 RTVEC_ELT (v, i) = trueop1;
2607 else
2608 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2609 i - in_n_elts);
2613 return gen_rtx_CONST_VECTOR (mode, v);
2616 return 0;
2618 default:
2619 gcc_unreachable ();
2622 return 0;
2626 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
2627 rtx op0, rtx op1)
2629 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
2630 HOST_WIDE_INT val;
2631 unsigned int width = GET_MODE_BITSIZE (mode);
2633 if (VECTOR_MODE_P (mode)
2634 && code != VEC_CONCAT
2635 && GET_CODE (op0) == CONST_VECTOR
2636 && GET_CODE (op1) == CONST_VECTOR)
2638 unsigned n_elts = GET_MODE_NUNITS (mode);
2639 enum machine_mode op0mode = GET_MODE (op0);
2640 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2641 enum machine_mode op1mode = GET_MODE (op1);
2642 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2643 rtvec v = rtvec_alloc (n_elts);
2644 unsigned int i;
2646 gcc_assert (op0_n_elts == n_elts);
2647 gcc_assert (op1_n_elts == n_elts);
2648 for (i = 0; i < n_elts; i++)
2650 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2651 CONST_VECTOR_ELT (op0, i),
2652 CONST_VECTOR_ELT (op1, i));
2653 if (!x)
2654 return 0;
2655 RTVEC_ELT (v, i) = x;
2658 return gen_rtx_CONST_VECTOR (mode, v);
2661 if (VECTOR_MODE_P (mode)
2662 && code == VEC_CONCAT
2663 && CONSTANT_P (op0) && CONSTANT_P (op1))
2665 unsigned n_elts = GET_MODE_NUNITS (mode);
2666 rtvec v = rtvec_alloc (n_elts);
2668 gcc_assert (n_elts >= 2);
2669 if (n_elts == 2)
2671 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2672 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2674 RTVEC_ELT (v, 0) = op0;
2675 RTVEC_ELT (v, 1) = op1;
2677 else
2679 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2680 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2681 unsigned i;
2683 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2684 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2685 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2687 for (i = 0; i < op0_n_elts; ++i)
2688 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2689 for (i = 0; i < op1_n_elts; ++i)
2690 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2693 return gen_rtx_CONST_VECTOR (mode, v);
2696 if (SCALAR_FLOAT_MODE_P (mode)
2697 && GET_CODE (op0) == CONST_DOUBLE
2698 && GET_CODE (op1) == CONST_DOUBLE
2699 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2701 if (code == AND
2702 || code == IOR
2703 || code == XOR)
2705 long tmp0[4];
2706 long tmp1[4];
2707 REAL_VALUE_TYPE r;
2708 int i;
2710 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
2711 GET_MODE (op0));
2712 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
2713 GET_MODE (op1));
2714 for (i = 0; i < 4; i++)
2716 switch (code)
2718 case AND:
2719 tmp0[i] &= tmp1[i];
2720 break;
2721 case IOR:
2722 tmp0[i] |= tmp1[i];
2723 break;
2724 case XOR:
2725 tmp0[i] ^= tmp1[i];
2726 break;
2727 default:
2728 gcc_unreachable ();
2731 real_from_target (&r, tmp0, mode);
2732 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
2734 else
2736 REAL_VALUE_TYPE f0, f1, value, result;
2737 bool inexact;
2739 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
2740 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
2741 real_convert (&f0, mode, &f0);
2742 real_convert (&f1, mode, &f1);
2744 if (HONOR_SNANS (mode)
2745 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
2746 return 0;
2748 if (code == DIV
2749 && REAL_VALUES_EQUAL (f1, dconst0)
2750 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
2751 return 0;
2753 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2754 && flag_trapping_math
2755 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
2757 int s0 = REAL_VALUE_NEGATIVE (f0);
2758 int s1 = REAL_VALUE_NEGATIVE (f1);
2760 switch (code)
2762 case PLUS:
2763 /* Inf + -Inf = NaN plus exception. */
2764 if (s0 != s1)
2765 return 0;
2766 break;
2767 case MINUS:
2768 /* Inf - Inf = NaN plus exception. */
2769 if (s0 == s1)
2770 return 0;
2771 break;
2772 case DIV:
2773 /* Inf / Inf = NaN plus exception. */
2774 return 0;
2775 default:
2776 break;
2780 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2781 && flag_trapping_math
2782 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
2783 || (REAL_VALUE_ISINF (f1)
2784 && REAL_VALUES_EQUAL (f0, dconst0))))
2785 /* Inf * 0 = NaN plus exception. */
2786 return 0;
2788 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
2789 &f0, &f1);
2790 real_convert (&result, mode, &value);
2792 /* Don't constant fold this floating point operation if
2793 the result has overflowed and flag_trapping_math. */
2795 if (flag_trapping_math
2796 && MODE_HAS_INFINITIES (mode)
2797 && REAL_VALUE_ISINF (result)
2798 && !REAL_VALUE_ISINF (f0)
2799 && !REAL_VALUE_ISINF (f1))
2800 /* Overflow plus exception. */
2801 return 0;
2803 /* Don't constant fold this floating point operation if the
2804 result may dependent upon the run-time rounding mode and
2805 flag_rounding_math is set, or if GCC's software emulation
2806 is unable to accurately represent the result. */
2808 if ((flag_rounding_math
2809 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
2810 && !flag_unsafe_math_optimizations))
2811 && (inexact || !real_identical (&result, &value)))
2812 return NULL_RTX;
2814 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
2818 /* We can fold some multi-word operations. */
2819 if (GET_MODE_CLASS (mode) == MODE_INT
2820 && width == HOST_BITS_PER_WIDE_INT * 2
2821 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
2822 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
2824 unsigned HOST_WIDE_INT l1, l2, lv, lt;
2825 HOST_WIDE_INT h1, h2, hv, ht;
2827 if (GET_CODE (op0) == CONST_DOUBLE)
2828 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
2829 else
2830 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
2832 if (GET_CODE (op1) == CONST_DOUBLE)
2833 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
2834 else
2835 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
2837 switch (code)
2839 case MINUS:
2840 /* A - B == A + (-B). */
2841 neg_double (l2, h2, &lv, &hv);
2842 l2 = lv, h2 = hv;
2844 /* Fall through.... */
2846 case PLUS:
2847 add_double (l1, h1, l2, h2, &lv, &hv);
2848 break;
2850 case MULT:
2851 mul_double (l1, h1, l2, h2, &lv, &hv);
2852 break;
2854 case DIV:
2855 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2856 &lv, &hv, &lt, &ht))
2857 return 0;
2858 break;
2860 case MOD:
2861 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2862 &lt, &ht, &lv, &hv))
2863 return 0;
2864 break;
2866 case UDIV:
2867 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2868 &lv, &hv, &lt, &ht))
2869 return 0;
2870 break;
2872 case UMOD:
2873 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2874 &lt, &ht, &lv, &hv))
2875 return 0;
2876 break;
2878 case AND:
2879 lv = l1 & l2, hv = h1 & h2;
2880 break;
2882 case IOR:
2883 lv = l1 | l2, hv = h1 | h2;
2884 break;
2886 case XOR:
2887 lv = l1 ^ l2, hv = h1 ^ h2;
2888 break;
2890 case SMIN:
2891 if (h1 < h2
2892 || (h1 == h2
2893 && ((unsigned HOST_WIDE_INT) l1
2894 < (unsigned HOST_WIDE_INT) l2)))
2895 lv = l1, hv = h1;
2896 else
2897 lv = l2, hv = h2;
2898 break;
2900 case SMAX:
2901 if (h1 > h2
2902 || (h1 == h2
2903 && ((unsigned HOST_WIDE_INT) l1
2904 > (unsigned HOST_WIDE_INT) l2)))
2905 lv = l1, hv = h1;
2906 else
2907 lv = l2, hv = h2;
2908 break;
2910 case UMIN:
2911 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
2912 || (h1 == h2
2913 && ((unsigned HOST_WIDE_INT) l1
2914 < (unsigned HOST_WIDE_INT) l2)))
2915 lv = l1, hv = h1;
2916 else
2917 lv = l2, hv = h2;
2918 break;
2920 case UMAX:
2921 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
2922 || (h1 == h2
2923 && ((unsigned HOST_WIDE_INT) l1
2924 > (unsigned HOST_WIDE_INT) l2)))
2925 lv = l1, hv = h1;
2926 else
2927 lv = l2, hv = h2;
2928 break;
2930 case LSHIFTRT: case ASHIFTRT:
2931 case ASHIFT:
2932 case ROTATE: case ROTATERT:
2933 if (SHIFT_COUNT_TRUNCATED)
2934 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
2936 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
2937 return 0;
2939 if (code == LSHIFTRT || code == ASHIFTRT)
2940 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
2941 code == ASHIFTRT);
2942 else if (code == ASHIFT)
2943 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
2944 else if (code == ROTATE)
2945 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2946 else /* code == ROTATERT */
2947 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2948 break;
2950 default:
2951 return 0;
2954 return immed_double_const (lv, hv, mode);
2957 if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
2958 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
2960 /* Get the integer argument values in two forms:
2961 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2963 arg0 = INTVAL (op0);
2964 arg1 = INTVAL (op1);
2966 if (width < HOST_BITS_PER_WIDE_INT)
2968 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2969 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2971 arg0s = arg0;
2972 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2973 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2975 arg1s = arg1;
2976 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2977 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2979 else
2981 arg0s = arg0;
2982 arg1s = arg1;
2985 /* Compute the value of the arithmetic. */
2987 switch (code)
2989 case PLUS:
2990 val = arg0s + arg1s;
2991 break;
2993 case MINUS:
2994 val = arg0s - arg1s;
2995 break;
2997 case MULT:
2998 val = arg0s * arg1s;
2999 break;
3001 case DIV:
3002 if (arg1s == 0
3003 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3004 && arg1s == -1))
3005 return 0;
3006 val = arg0s / arg1s;
3007 break;
3009 case MOD:
3010 if (arg1s == 0
3011 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3012 && arg1s == -1))
3013 return 0;
3014 val = arg0s % arg1s;
3015 break;
3017 case UDIV:
3018 if (arg1 == 0
3019 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3020 && arg1s == -1))
3021 return 0;
3022 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3023 break;
3025 case UMOD:
3026 if (arg1 == 0
3027 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3028 && arg1s == -1))
3029 return 0;
3030 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3031 break;
3033 case AND:
3034 val = arg0 & arg1;
3035 break;
3037 case IOR:
3038 val = arg0 | arg1;
3039 break;
3041 case XOR:
3042 val = arg0 ^ arg1;
3043 break;
3045 case LSHIFTRT:
3046 case ASHIFT:
3047 case ASHIFTRT:
3048 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3049 the value is in range. We can't return any old value for
3050 out-of-range arguments because either the middle-end (via
3051 shift_truncation_mask) or the back-end might be relying on
3052 target-specific knowledge. Nor can we rely on
3053 shift_truncation_mask, since the shift might not be part of an
3054 ashlM3, lshrM3 or ashrM3 instruction. */
3055 if (SHIFT_COUNT_TRUNCATED)
3056 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3057 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3058 return 0;
3060 val = (code == ASHIFT
3061 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3062 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3064 /* Sign-extend the result for arithmetic right shifts. */
3065 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3066 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3067 break;
3069 case ROTATERT:
3070 if (arg1 < 0)
3071 return 0;
3073 arg1 %= width;
3074 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3075 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3076 break;
3078 case ROTATE:
3079 if (arg1 < 0)
3080 return 0;
3082 arg1 %= width;
3083 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3084 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3085 break;
3087 case COMPARE:
3088 /* Do nothing here. */
3089 return 0;
3091 case SMIN:
3092 val = arg0s <= arg1s ? arg0s : arg1s;
3093 break;
3095 case UMIN:
3096 val = ((unsigned HOST_WIDE_INT) arg0
3097 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3098 break;
3100 case SMAX:
3101 val = arg0s > arg1s ? arg0s : arg1s;
3102 break;
3104 case UMAX:
3105 val = ((unsigned HOST_WIDE_INT) arg0
3106 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3107 break;
3109 case SS_PLUS:
3110 case US_PLUS:
3111 case SS_MINUS:
3112 case US_MINUS:
3113 /* ??? There are simplifications that can be done. */
3114 return 0;
3116 default:
3117 gcc_unreachable ();
3120 return gen_int_mode (val, mode);
3123 return NULL_RTX;
3128 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3129 PLUS or MINUS.
3131 Rather than test for specific case, we do this by a brute-force method
3132 and do all possible simplifications until no more changes occur. Then
3133 we rebuild the operation. */
3135 struct simplify_plus_minus_op_data
3137 rtx op;
3138 short neg;
3139 short ix;
3142 static int
3143 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
3145 const struct simplify_plus_minus_op_data *d1 = p1;
3146 const struct simplify_plus_minus_op_data *d2 = p2;
3147 int result;
3149 result = (commutative_operand_precedence (d2->op)
3150 - commutative_operand_precedence (d1->op));
3151 if (result)
3152 return result;
3153 return d1->ix - d2->ix;
3156 static rtx
3157 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3158 rtx op1)
3160 struct simplify_plus_minus_op_data ops[8];
3161 rtx result, tem;
3162 int n_ops = 2, input_ops = 2;
3163 int first, changed, canonicalized = 0;
3164 int i, j;
3166 memset (ops, 0, sizeof ops);
3168 /* Set up the two operands and then expand them until nothing has been
3169 changed. If we run out of room in our array, give up; this should
3170 almost never happen. */
3172 ops[0].op = op0;
3173 ops[0].neg = 0;
3174 ops[1].op = op1;
3175 ops[1].neg = (code == MINUS);
3179 changed = 0;
3181 for (i = 0; i < n_ops; i++)
3183 rtx this_op = ops[i].op;
3184 int this_neg = ops[i].neg;
3185 enum rtx_code this_code = GET_CODE (this_op);
3187 switch (this_code)
3189 case PLUS:
3190 case MINUS:
3191 if (n_ops == 7)
3192 return NULL_RTX;
3194 ops[n_ops].op = XEXP (this_op, 1);
3195 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3196 n_ops++;
3198 ops[i].op = XEXP (this_op, 0);
3199 input_ops++;
3200 changed = 1;
3201 canonicalized |= this_neg;
3202 break;
3204 case NEG:
3205 ops[i].op = XEXP (this_op, 0);
3206 ops[i].neg = ! this_neg;
3207 changed = 1;
3208 canonicalized = 1;
3209 break;
3211 case CONST:
3212 if (n_ops < 7
3213 && GET_CODE (XEXP (this_op, 0)) == PLUS
3214 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3215 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3217 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3218 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3219 ops[n_ops].neg = this_neg;
3220 n_ops++;
3221 changed = 1;
3222 canonicalized = 1;
3224 break;
3226 case NOT:
3227 /* ~a -> (-a - 1) */
3228 if (n_ops != 7)
3230 ops[n_ops].op = constm1_rtx;
3231 ops[n_ops++].neg = this_neg;
3232 ops[i].op = XEXP (this_op, 0);
3233 ops[i].neg = !this_neg;
3234 changed = 1;
3235 canonicalized = 1;
3237 break;
3239 case CONST_INT:
3240 if (this_neg)
3242 ops[i].op = neg_const_int (mode, this_op);
3243 ops[i].neg = 0;
3244 changed = 1;
3245 canonicalized = 1;
3247 break;
3249 default:
3250 break;
3254 while (changed);
3256 gcc_assert (n_ops >= 2);
3257 if (!canonicalized)
3259 int n_constants = 0;
3261 for (i = 0; i < n_ops; i++)
3262 if (GET_CODE (ops[i].op) == CONST_INT)
3263 n_constants++;
3265 if (n_constants <= 1)
3266 return NULL_RTX;
3269 /* If we only have two operands, we can avoid the loops. */
3270 if (n_ops == 2)
3272 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3273 rtx lhs, rhs;
3275 /* Get the two operands. Be careful with the order, especially for
3276 the cases where code == MINUS. */
3277 if (ops[0].neg && ops[1].neg)
3279 lhs = gen_rtx_NEG (mode, ops[0].op);
3280 rhs = ops[1].op;
3282 else if (ops[0].neg)
3284 lhs = ops[1].op;
3285 rhs = ops[0].op;
3287 else
3289 lhs = ops[0].op;
3290 rhs = ops[1].op;
3293 return simplify_const_binary_operation (code, mode, lhs, rhs);
3296 /* Now simplify each pair of operands until nothing changes. The first
3297 time through just simplify constants against each other. */
3299 first = 1;
3302 changed = first;
3304 for (i = 0; i < n_ops - 1; i++)
3305 for (j = i + 1; j < n_ops; j++)
3307 rtx lhs = ops[i].op, rhs = ops[j].op;
3308 int lneg = ops[i].neg, rneg = ops[j].neg;
3310 if (lhs != 0 && rhs != 0
3311 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
3313 enum rtx_code ncode = PLUS;
3315 if (lneg != rneg)
3317 ncode = MINUS;
3318 if (lneg)
3319 tem = lhs, lhs = rhs, rhs = tem;
3321 else if (swap_commutative_operands_p (lhs, rhs))
3322 tem = lhs, lhs = rhs, rhs = tem;
3324 if ((GET_CODE (lhs) == CONST || GET_CODE (lhs) == CONST_INT)
3325 && (GET_CODE (rhs) == CONST || GET_CODE (rhs) == CONST_INT))
3327 rtx tem_lhs, tem_rhs;
3329 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3330 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3331 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3333 if (tem && !CONSTANT_P (tem))
3334 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3336 else
3337 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3339 /* Reject "simplifications" that just wrap the two
3340 arguments in a CONST. Failure to do so can result
3341 in infinite recursion with simplify_binary_operation
3342 when it calls us to simplify CONST operations. */
3343 if (tem
3344 && ! (GET_CODE (tem) == CONST
3345 && GET_CODE (XEXP (tem, 0)) == ncode
3346 && XEXP (XEXP (tem, 0), 0) == lhs
3347 && XEXP (XEXP (tem, 0), 1) == rhs)
3348 /* Don't allow -x + -1 -> ~x simplifications in the
3349 first pass. This allows us the chance to combine
3350 the -1 with other constants. */
3351 && ! (first
3352 && GET_CODE (tem) == NOT
3353 && XEXP (tem, 0) == rhs))
3355 lneg &= rneg;
3356 if (GET_CODE (tem) == NEG)
3357 tem = XEXP (tem, 0), lneg = !lneg;
3358 if (GET_CODE (tem) == CONST_INT && lneg)
3359 tem = neg_const_int (mode, tem), lneg = 0;
3361 ops[i].op = tem;
3362 ops[i].neg = lneg;
3363 ops[j].op = NULL_RTX;
3364 changed = 1;
3369 first = 0;
3371 while (changed);
3373 /* Pack all the operands to the lower-numbered entries. */
3374 for (i = 0, j = 0; j < n_ops; j++)
3375 if (ops[j].op)
3377 ops[i] = ops[j];
3378 /* Stabilize sort. */
3379 ops[i].ix = i;
3380 i++;
3382 n_ops = i;
3384 /* Sort the operations based on swap_commutative_operands_p. */
3385 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
3387 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3388 if (n_ops == 2
3389 && GET_CODE (ops[1].op) == CONST_INT
3390 && CONSTANT_P (ops[0].op)
3391 && ops[0].neg)
3392 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3394 /* We suppressed creation of trivial CONST expressions in the
3395 combination loop to avoid recursion. Create one manually now.
3396 The combination loop should have ensured that there is exactly
3397 one CONST_INT, and the sort will have ensured that it is last
3398 in the array and that any other constant will be next-to-last. */
3400 if (n_ops > 1
3401 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
3402 && CONSTANT_P (ops[n_ops - 2].op))
3404 rtx value = ops[n_ops - 1].op;
3405 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3406 value = neg_const_int (mode, value);
3407 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3408 n_ops--;
3411 /* Put a non-negated operand first, if possible. */
3413 for (i = 0; i < n_ops && ops[i].neg; i++)
3414 continue;
3415 if (i == n_ops)
3416 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3417 else if (i != 0)
3419 tem = ops[0].op;
3420 ops[0] = ops[i];
3421 ops[i].op = tem;
3422 ops[i].neg = 1;
3425 /* Now make the result by performing the requested operations. */
3426 result = ops[0].op;
3427 for (i = 1; i < n_ops; i++)
3428 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3429 mode, result, ops[i].op);
3431 return result;
3434 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3435 static bool
3436 plus_minus_operand_p (rtx x)
3438 return GET_CODE (x) == PLUS
3439 || GET_CODE (x) == MINUS
3440 || (GET_CODE (x) == CONST
3441 && GET_CODE (XEXP (x, 0)) == PLUS
3442 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3443 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3446 /* Like simplify_binary_operation except used for relational operators.
3447 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3448 not also be VOIDmode.
3450 CMP_MODE specifies in which mode the comparison is done in, so it is
3451 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3452 the operands or, if both are VOIDmode, the operands are compared in
3453 "infinite precision". */
3455 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3456 enum machine_mode cmp_mode, rtx op0, rtx op1)
3458 rtx tem, trueop0, trueop1;
3460 if (cmp_mode == VOIDmode)
3461 cmp_mode = GET_MODE (op0);
3462 if (cmp_mode == VOIDmode)
3463 cmp_mode = GET_MODE (op1);
3465 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3466 if (tem)
3468 if (SCALAR_FLOAT_MODE_P (mode))
3470 if (tem == const0_rtx)
3471 return CONST0_RTX (mode);
3472 #ifdef FLOAT_STORE_FLAG_VALUE
3474 REAL_VALUE_TYPE val;
3475 val = FLOAT_STORE_FLAG_VALUE (mode);
3476 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3478 #else
3479 return NULL_RTX;
3480 #endif
3482 if (VECTOR_MODE_P (mode))
3484 if (tem == const0_rtx)
3485 return CONST0_RTX (mode);
3486 #ifdef VECTOR_STORE_FLAG_VALUE
3488 int i, units;
3489 rtvec v;
3491 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3492 if (val == NULL_RTX)
3493 return NULL_RTX;
3494 if (val == const1_rtx)
3495 return CONST1_RTX (mode);
3497 units = GET_MODE_NUNITS (mode);
3498 v = rtvec_alloc (units);
3499 for (i = 0; i < units; i++)
3500 RTVEC_ELT (v, i) = val;
3501 return gen_rtx_raw_CONST_VECTOR (mode, v);
3503 #else
3504 return NULL_RTX;
3505 #endif
3508 return tem;
3511 /* For the following tests, ensure const0_rtx is op1. */
3512 if (swap_commutative_operands_p (op0, op1)
3513 || (op0 == const0_rtx && op1 != const0_rtx))
3514 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3516 /* If op0 is a compare, extract the comparison arguments from it. */
3517 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3518 return simplify_relational_operation (code, mode, VOIDmode,
3519 XEXP (op0, 0), XEXP (op0, 1));
3521 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
3522 || CC0_P (op0))
3523 return NULL_RTX;
3525 trueop0 = avoid_constant_pool_reference (op0);
3526 trueop1 = avoid_constant_pool_reference (op1);
3527 return simplify_relational_operation_1 (code, mode, cmp_mode,
3528 trueop0, trueop1);
3531 /* This part of simplify_relational_operation is only used when CMP_MODE
3532 is not in class MODE_CC (i.e. it is a real comparison).
3534 MODE is the mode of the result, while CMP_MODE specifies in which
3535 mode the comparison is done in, so it is the mode of the operands. */
3537 static rtx
3538 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
3539 enum machine_mode cmp_mode, rtx op0, rtx op1)
3541 enum rtx_code op0code = GET_CODE (op0);
3543 if (GET_CODE (op1) == CONST_INT)
3545 if (INTVAL (op1) == 0 && COMPARISON_P (op0))
3547 /* If op0 is a comparison, extract the comparison arguments
3548 from it. */
3549 if (code == NE)
3551 if (GET_MODE (op0) == mode)
3552 return simplify_rtx (op0);
3553 else
3554 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
3555 XEXP (op0, 0), XEXP (op0, 1));
3557 else if (code == EQ)
3559 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
3560 if (new_code != UNKNOWN)
3561 return simplify_gen_relational (new_code, mode, VOIDmode,
3562 XEXP (op0, 0), XEXP (op0, 1));
3567 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
3568 if ((code == EQ || code == NE)
3569 && (op0code == PLUS || op0code == MINUS)
3570 && CONSTANT_P (op1)
3571 && CONSTANT_P (XEXP (op0, 1))
3572 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
3574 rtx x = XEXP (op0, 0);
3575 rtx c = XEXP (op0, 1);
3577 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
3578 cmp_mode, op1, c);
3579 return simplify_gen_relational (code, mode, cmp_mode, x, c);
3582 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3583 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3584 if (code == NE
3585 && op1 == const0_rtx
3586 && GET_MODE_CLASS (mode) == MODE_INT
3587 && cmp_mode != VOIDmode
3588 /* ??? Work-around BImode bugs in the ia64 backend. */
3589 && mode != BImode
3590 && cmp_mode != BImode
3591 && nonzero_bits (op0, cmp_mode) == 1
3592 && STORE_FLAG_VALUE == 1)
3593 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
3594 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
3595 : lowpart_subreg (mode, op0, cmp_mode);
3597 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
3598 if ((code == EQ || code == NE)
3599 && op1 == const0_rtx
3600 && op0code == XOR)
3601 return simplify_gen_relational (code, mode, cmp_mode,
3602 XEXP (op0, 0), XEXP (op0, 1));
3604 /* (eq/ne (xor x y) x) simplifies to (eq/ne x 0). */
3605 if ((code == EQ || code == NE)
3606 && op0code == XOR
3607 && rtx_equal_p (XEXP (op0, 0), op1)
3608 && !side_effects_p (XEXP (op0, 1)))
3609 return simplify_gen_relational (code, mode, cmp_mode, op1, const0_rtx);
3610 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne y 0). */
3611 if ((code == EQ || code == NE)
3612 && op0code == XOR
3613 && rtx_equal_p (XEXP (op0, 1), op1)
3614 && !side_effects_p (XEXP (op0, 0)))
3615 return simplify_gen_relational (code, mode, cmp_mode, op1, const0_rtx);
3617 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
3618 if ((code == EQ || code == NE)
3619 && op0code == XOR
3620 && (GET_CODE (op1) == CONST_INT
3621 || GET_CODE (op1) == CONST_DOUBLE)
3622 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
3623 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
3624 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
3625 simplify_gen_binary (XOR, cmp_mode,
3626 XEXP (op0, 1), op1));
3628 return NULL_RTX;
3631 /* Check if the given comparison (done in the given MODE) is actually a
3632 tautology or a contradiction.
3633 If no simplification is possible, this function returns zero.
3634 Otherwise, it returns either const_true_rtx or const0_rtx. */
3637 simplify_const_relational_operation (enum rtx_code code,
3638 enum machine_mode mode,
3639 rtx op0, rtx op1)
3641 int equal, op0lt, op0ltu, op1lt, op1ltu;
3642 rtx tem;
3643 rtx trueop0;
3644 rtx trueop1;
3646 gcc_assert (mode != VOIDmode
3647 || (GET_MODE (op0) == VOIDmode
3648 && GET_MODE (op1) == VOIDmode));
3650 /* If op0 is a compare, extract the comparison arguments from it. */
3651 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3653 op1 = XEXP (op0, 1);
3654 op0 = XEXP (op0, 0);
3656 if (GET_MODE (op0) != VOIDmode)
3657 mode = GET_MODE (op0);
3658 else if (GET_MODE (op1) != VOIDmode)
3659 mode = GET_MODE (op1);
3660 else
3661 return 0;
3664 /* We can't simplify MODE_CC values since we don't know what the
3665 actual comparison is. */
3666 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
3667 return 0;
3669 /* Make sure the constant is second. */
3670 if (swap_commutative_operands_p (op0, op1))
3672 tem = op0, op0 = op1, op1 = tem;
3673 code = swap_condition (code);
3676 trueop0 = avoid_constant_pool_reference (op0);
3677 trueop1 = avoid_constant_pool_reference (op1);
3679 /* For integer comparisons of A and B maybe we can simplify A - B and can
3680 then simplify a comparison of that with zero. If A and B are both either
3681 a register or a CONST_INT, this can't help; testing for these cases will
3682 prevent infinite recursion here and speed things up.
3684 If CODE is an unsigned comparison, then we can never do this optimization,
3685 because it gives an incorrect result if the subtraction wraps around zero.
3686 ANSI C defines unsigned operations such that they never overflow, and
3687 thus such cases can not be ignored; but we cannot do it even for
3688 signed comparisons for languages such as Java, so test flag_wrapv. */
3690 if (!flag_wrapv && INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
3691 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
3692 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
3693 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
3694 /* We cannot do this for == or != if tem is a nonzero address. */
3695 && ((code != EQ && code != NE) || ! nonzero_address_p (tem))
3696 && code != GTU && code != GEU && code != LTU && code != LEU)
3697 return simplify_const_relational_operation (signed_condition (code),
3698 mode, tem, const0_rtx);
3700 if (flag_unsafe_math_optimizations && code == ORDERED)
3701 return const_true_rtx;
3703 if (flag_unsafe_math_optimizations && code == UNORDERED)
3704 return const0_rtx;
3706 /* For modes without NaNs, if the two operands are equal, we know the
3707 result except if they have side-effects. */
3708 if (! HONOR_NANS (GET_MODE (trueop0))
3709 && rtx_equal_p (trueop0, trueop1)
3710 && ! side_effects_p (trueop0))
3711 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
3713 /* If the operands are floating-point constants, see if we can fold
3714 the result. */
3715 else if (GET_CODE (trueop0) == CONST_DOUBLE
3716 && GET_CODE (trueop1) == CONST_DOUBLE
3717 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
3719 REAL_VALUE_TYPE d0, d1;
3721 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
3722 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
3724 /* Comparisons are unordered iff at least one of the values is NaN. */
3725 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
3726 switch (code)
3728 case UNEQ:
3729 case UNLT:
3730 case UNGT:
3731 case UNLE:
3732 case UNGE:
3733 case NE:
3734 case UNORDERED:
3735 return const_true_rtx;
3736 case EQ:
3737 case LT:
3738 case GT:
3739 case LE:
3740 case GE:
3741 case LTGT:
3742 case ORDERED:
3743 return const0_rtx;
3744 default:
3745 return 0;
3748 equal = REAL_VALUES_EQUAL (d0, d1);
3749 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
3750 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
3753 /* Otherwise, see if the operands are both integers. */
3754 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
3755 && (GET_CODE (trueop0) == CONST_DOUBLE
3756 || GET_CODE (trueop0) == CONST_INT)
3757 && (GET_CODE (trueop1) == CONST_DOUBLE
3758 || GET_CODE (trueop1) == CONST_INT))
3760 int width = GET_MODE_BITSIZE (mode);
3761 HOST_WIDE_INT l0s, h0s, l1s, h1s;
3762 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
3764 /* Get the two words comprising each integer constant. */
3765 if (GET_CODE (trueop0) == CONST_DOUBLE)
3767 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
3768 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
3770 else
3772 l0u = l0s = INTVAL (trueop0);
3773 h0u = h0s = HWI_SIGN_EXTEND (l0s);
3776 if (GET_CODE (trueop1) == CONST_DOUBLE)
3778 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
3779 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
3781 else
3783 l1u = l1s = INTVAL (trueop1);
3784 h1u = h1s = HWI_SIGN_EXTEND (l1s);
3787 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
3788 we have to sign or zero-extend the values. */
3789 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
3791 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
3792 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
3794 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3795 l0s |= ((HOST_WIDE_INT) (-1) << width);
3797 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3798 l1s |= ((HOST_WIDE_INT) (-1) << width);
3800 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
3801 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
3803 equal = (h0u == h1u && l0u == l1u);
3804 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
3805 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
3806 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
3807 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
3810 /* Otherwise, there are some code-specific tests we can make. */
3811 else
3813 /* Optimize comparisons with upper and lower bounds. */
3814 if (SCALAR_INT_MODE_P (mode)
3815 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3817 rtx mmin, mmax;
3818 int sign;
3820 if (code == GEU
3821 || code == LEU
3822 || code == GTU
3823 || code == LTU)
3824 sign = 0;
3825 else
3826 sign = 1;
3828 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
3830 tem = NULL_RTX;
3831 switch (code)
3833 case GEU:
3834 case GE:
3835 /* x >= min is always true. */
3836 if (rtx_equal_p (trueop1, mmin))
3837 tem = const_true_rtx;
3838 else
3839 break;
3841 case LEU:
3842 case LE:
3843 /* x <= max is always true. */
3844 if (rtx_equal_p (trueop1, mmax))
3845 tem = const_true_rtx;
3846 break;
3848 case GTU:
3849 case GT:
3850 /* x > max is always false. */
3851 if (rtx_equal_p (trueop1, mmax))
3852 tem = const0_rtx;
3853 break;
3855 case LTU:
3856 case LT:
3857 /* x < min is always false. */
3858 if (rtx_equal_p (trueop1, mmin))
3859 tem = const0_rtx;
3860 break;
3862 default:
3863 break;
3865 if (tem == const0_rtx
3866 || tem == const_true_rtx)
3867 return tem;
3870 switch (code)
3872 case EQ:
3873 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3874 return const0_rtx;
3875 break;
3877 case NE:
3878 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3879 return const_true_rtx;
3880 break;
3882 case LT:
3883 /* Optimize abs(x) < 0.0. */
3884 if (trueop1 == CONST0_RTX (mode)
3885 && !HONOR_SNANS (mode)
3886 && !(flag_wrapv && INTEGRAL_MODE_P (mode)))
3888 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3889 : trueop0;
3890 if (GET_CODE (tem) == ABS)
3891 return const0_rtx;
3893 break;
3895 case GE:
3896 /* Optimize abs(x) >= 0.0. */
3897 if (trueop1 == CONST0_RTX (mode)
3898 && !HONOR_NANS (mode)
3899 && !(flag_wrapv && INTEGRAL_MODE_P (mode)))
3901 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3902 : trueop0;
3903 if (GET_CODE (tem) == ABS)
3904 return const_true_rtx;
3906 break;
3908 case UNGE:
3909 /* Optimize ! (abs(x) < 0.0). */
3910 if (trueop1 == CONST0_RTX (mode))
3912 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3913 : trueop0;
3914 if (GET_CODE (tem) == ABS)
3915 return const_true_rtx;
3917 break;
3919 default:
3920 break;
3923 return 0;
3926 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3927 as appropriate. */
3928 switch (code)
3930 case EQ:
3931 case UNEQ:
3932 return equal ? const_true_rtx : const0_rtx;
3933 case NE:
3934 case LTGT:
3935 return ! equal ? const_true_rtx : const0_rtx;
3936 case LT:
3937 case UNLT:
3938 return op0lt ? const_true_rtx : const0_rtx;
3939 case GT:
3940 case UNGT:
3941 return op1lt ? const_true_rtx : const0_rtx;
3942 case LTU:
3943 return op0ltu ? const_true_rtx : const0_rtx;
3944 case GTU:
3945 return op1ltu ? const_true_rtx : const0_rtx;
3946 case LE:
3947 case UNLE:
3948 return equal || op0lt ? const_true_rtx : const0_rtx;
3949 case GE:
3950 case UNGE:
3951 return equal || op1lt ? const_true_rtx : const0_rtx;
3952 case LEU:
3953 return equal || op0ltu ? const_true_rtx : const0_rtx;
3954 case GEU:
3955 return equal || op1ltu ? const_true_rtx : const0_rtx;
3956 case ORDERED:
3957 return const_true_rtx;
3958 case UNORDERED:
3959 return const0_rtx;
3960 default:
3961 gcc_unreachable ();
3965 /* Simplify CODE, an operation with result mode MODE and three operands,
3966 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
3967 a constant. Return 0 if no simplifications is possible. */
3970 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
3971 enum machine_mode op0_mode, rtx op0, rtx op1,
3972 rtx op2)
3974 unsigned int width = GET_MODE_BITSIZE (mode);
3976 /* VOIDmode means "infinite" precision. */
3977 if (width == 0)
3978 width = HOST_BITS_PER_WIDE_INT;
3980 switch (code)
3982 case SIGN_EXTRACT:
3983 case ZERO_EXTRACT:
3984 if (GET_CODE (op0) == CONST_INT
3985 && GET_CODE (op1) == CONST_INT
3986 && GET_CODE (op2) == CONST_INT
3987 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
3988 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
3990 /* Extracting a bit-field from a constant */
3991 HOST_WIDE_INT val = INTVAL (op0);
3993 if (BITS_BIG_ENDIAN)
3994 val >>= (GET_MODE_BITSIZE (op0_mode)
3995 - INTVAL (op2) - INTVAL (op1));
3996 else
3997 val >>= INTVAL (op2);
3999 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4001 /* First zero-extend. */
4002 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4003 /* If desired, propagate sign bit. */
4004 if (code == SIGN_EXTRACT
4005 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4006 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4009 /* Clear the bits that don't belong in our mode,
4010 unless they and our sign bit are all one.
4011 So we get either a reasonable negative value or a reasonable
4012 unsigned value for this mode. */
4013 if (width < HOST_BITS_PER_WIDE_INT
4014 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4015 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4016 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4018 return gen_int_mode (val, mode);
4020 break;
4022 case IF_THEN_ELSE:
4023 if (GET_CODE (op0) == CONST_INT)
4024 return op0 != const0_rtx ? op1 : op2;
4026 /* Convert c ? a : a into "a". */
4027 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4028 return op1;
4030 /* Convert a != b ? a : b into "a". */
4031 if (GET_CODE (op0) == NE
4032 && ! side_effects_p (op0)
4033 && ! HONOR_NANS (mode)
4034 && ! HONOR_SIGNED_ZEROS (mode)
4035 && ((rtx_equal_p (XEXP (op0, 0), op1)
4036 && rtx_equal_p (XEXP (op0, 1), op2))
4037 || (rtx_equal_p (XEXP (op0, 0), op2)
4038 && rtx_equal_p (XEXP (op0, 1), op1))))
4039 return op1;
4041 /* Convert a == b ? a : b into "b". */
4042 if (GET_CODE (op0) == EQ
4043 && ! side_effects_p (op0)
4044 && ! HONOR_NANS (mode)
4045 && ! HONOR_SIGNED_ZEROS (mode)
4046 && ((rtx_equal_p (XEXP (op0, 0), op1)
4047 && rtx_equal_p (XEXP (op0, 1), op2))
4048 || (rtx_equal_p (XEXP (op0, 0), op2)
4049 && rtx_equal_p (XEXP (op0, 1), op1))))
4050 return op2;
4052 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4054 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4055 ? GET_MODE (XEXP (op0, 1))
4056 : GET_MODE (XEXP (op0, 0)));
4057 rtx temp;
4059 /* Look for happy constants in op1 and op2. */
4060 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
4062 HOST_WIDE_INT t = INTVAL (op1);
4063 HOST_WIDE_INT f = INTVAL (op2);
4065 if (t == STORE_FLAG_VALUE && f == 0)
4066 code = GET_CODE (op0);
4067 else if (t == 0 && f == STORE_FLAG_VALUE)
4069 enum rtx_code tmp;
4070 tmp = reversed_comparison_code (op0, NULL_RTX);
4071 if (tmp == UNKNOWN)
4072 break;
4073 code = tmp;
4075 else
4076 break;
4078 return simplify_gen_relational (code, mode, cmp_mode,
4079 XEXP (op0, 0), XEXP (op0, 1));
4082 if (cmp_mode == VOIDmode)
4083 cmp_mode = op0_mode;
4084 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4085 cmp_mode, XEXP (op0, 0),
4086 XEXP (op0, 1));
4088 /* See if any simplifications were possible. */
4089 if (temp)
4091 if (GET_CODE (temp) == CONST_INT)
4092 return temp == const0_rtx ? op2 : op1;
4093 else if (temp)
4094 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4097 break;
4099 case VEC_MERGE:
4100 gcc_assert (GET_MODE (op0) == mode);
4101 gcc_assert (GET_MODE (op1) == mode);
4102 gcc_assert (VECTOR_MODE_P (mode));
4103 op2 = avoid_constant_pool_reference (op2);
4104 if (GET_CODE (op2) == CONST_INT)
4106 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4107 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4108 int mask = (1 << n_elts) - 1;
4110 if (!(INTVAL (op2) & mask))
4111 return op1;
4112 if ((INTVAL (op2) & mask) == mask)
4113 return op0;
4115 op0 = avoid_constant_pool_reference (op0);
4116 op1 = avoid_constant_pool_reference (op1);
4117 if (GET_CODE (op0) == CONST_VECTOR
4118 && GET_CODE (op1) == CONST_VECTOR)
4120 rtvec v = rtvec_alloc (n_elts);
4121 unsigned int i;
4123 for (i = 0; i < n_elts; i++)
4124 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4125 ? CONST_VECTOR_ELT (op0, i)
4126 : CONST_VECTOR_ELT (op1, i));
4127 return gen_rtx_CONST_VECTOR (mode, v);
4130 break;
4132 default:
4133 gcc_unreachable ();
4136 return 0;
4139 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
4140 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
4142 Works by unpacking OP into a collection of 8-bit values
4143 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4144 and then repacking them again for OUTERMODE. */
4146 static rtx
4147 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4148 enum machine_mode innermode, unsigned int byte)
4150 /* We support up to 512-bit values (for V8DFmode). */
4151 enum {
4152 max_bitsize = 512,
4153 value_bit = 8,
4154 value_mask = (1 << value_bit) - 1
4156 unsigned char value[max_bitsize / value_bit];
4157 int value_start;
4158 int i;
4159 int elem;
4161 int num_elem;
4162 rtx * elems;
4163 int elem_bitsize;
4164 rtx result_s;
4165 rtvec result_v = NULL;
4166 enum mode_class outer_class;
4167 enum machine_mode outer_submode;
4169 /* Some ports misuse CCmode. */
4170 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
4171 return op;
4173 /* We have no way to represent a complex constant at the rtl level. */
4174 if (COMPLEX_MODE_P (outermode))
4175 return NULL_RTX;
4177 /* Unpack the value. */
4179 if (GET_CODE (op) == CONST_VECTOR)
4181 num_elem = CONST_VECTOR_NUNITS (op);
4182 elems = &CONST_VECTOR_ELT (op, 0);
4183 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4185 else
4187 num_elem = 1;
4188 elems = &op;
4189 elem_bitsize = max_bitsize;
4191 /* If this asserts, it is too complicated; reducing value_bit may help. */
4192 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4193 /* I don't know how to handle endianness of sub-units. */
4194 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4196 for (elem = 0; elem < num_elem; elem++)
4198 unsigned char * vp;
4199 rtx el = elems[elem];
4201 /* Vectors are kept in target memory order. (This is probably
4202 a mistake.) */
4204 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4205 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4206 / BITS_PER_UNIT);
4207 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4208 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4209 unsigned bytele = (subword_byte % UNITS_PER_WORD
4210 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4211 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4214 switch (GET_CODE (el))
4216 case CONST_INT:
4217 for (i = 0;
4218 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4219 i += value_bit)
4220 *vp++ = INTVAL (el) >> i;
4221 /* CONST_INTs are always logically sign-extended. */
4222 for (; i < elem_bitsize; i += value_bit)
4223 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4224 break;
4226 case CONST_DOUBLE:
4227 if (GET_MODE (el) == VOIDmode)
4229 /* If this triggers, someone should have generated a
4230 CONST_INT instead. */
4231 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4233 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4234 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4235 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4237 *vp++
4238 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4239 i += value_bit;
4241 /* It shouldn't matter what's done here, so fill it with
4242 zero. */
4243 for (; i < elem_bitsize; i += value_bit)
4244 *vp++ = 0;
4246 else
4248 long tmp[max_bitsize / 32];
4249 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4251 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4252 gcc_assert (bitsize <= elem_bitsize);
4253 gcc_assert (bitsize % value_bit == 0);
4255 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4256 GET_MODE (el));
4258 /* real_to_target produces its result in words affected by
4259 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4260 and use WORDS_BIG_ENDIAN instead; see the documentation
4261 of SUBREG in rtl.texi. */
4262 for (i = 0; i < bitsize; i += value_bit)
4264 int ibase;
4265 if (WORDS_BIG_ENDIAN)
4266 ibase = bitsize - 1 - i;
4267 else
4268 ibase = i;
4269 *vp++ = tmp[ibase / 32] >> i % 32;
4272 /* It shouldn't matter what's done here, so fill it with
4273 zero. */
4274 for (; i < elem_bitsize; i += value_bit)
4275 *vp++ = 0;
4277 break;
4279 default:
4280 gcc_unreachable ();
4284 /* Now, pick the right byte to start with. */
4285 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4286 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4287 will already have offset 0. */
4288 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4290 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4291 - byte);
4292 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4293 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4294 byte = (subword_byte % UNITS_PER_WORD
4295 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4298 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4299 so if it's become negative it will instead be very large.) */
4300 gcc_assert (byte < GET_MODE_SIZE (innermode));
4302 /* Convert from bytes to chunks of size value_bit. */
4303 value_start = byte * (BITS_PER_UNIT / value_bit);
4305 /* Re-pack the value. */
4307 if (VECTOR_MODE_P (outermode))
4309 num_elem = GET_MODE_NUNITS (outermode);
4310 result_v = rtvec_alloc (num_elem);
4311 elems = &RTVEC_ELT (result_v, 0);
4312 outer_submode = GET_MODE_INNER (outermode);
4314 else
4316 num_elem = 1;
4317 elems = &result_s;
4318 outer_submode = outermode;
4321 outer_class = GET_MODE_CLASS (outer_submode);
4322 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
4324 gcc_assert (elem_bitsize % value_bit == 0);
4325 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
4327 for (elem = 0; elem < num_elem; elem++)
4329 unsigned char *vp;
4331 /* Vectors are stored in target memory order. (This is probably
4332 a mistake.) */
4334 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4335 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4336 / BITS_PER_UNIT);
4337 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4338 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4339 unsigned bytele = (subword_byte % UNITS_PER_WORD
4340 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4341 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
4344 switch (outer_class)
4346 case MODE_INT:
4347 case MODE_PARTIAL_INT:
4349 unsigned HOST_WIDE_INT hi = 0, lo = 0;
4351 for (i = 0;
4352 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4353 i += value_bit)
4354 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
4355 for (; i < elem_bitsize; i += value_bit)
4356 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
4357 << (i - HOST_BITS_PER_WIDE_INT));
4359 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4360 know why. */
4361 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4362 elems[elem] = gen_int_mode (lo, outer_submode);
4363 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
4364 elems[elem] = immed_double_const (lo, hi, outer_submode);
4365 else
4366 return NULL_RTX;
4368 break;
4370 case MODE_FLOAT:
4371 case MODE_DECIMAL_FLOAT:
4373 REAL_VALUE_TYPE r;
4374 long tmp[max_bitsize / 32];
4376 /* real_from_target wants its input in words affected by
4377 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4378 and use WORDS_BIG_ENDIAN instead; see the documentation
4379 of SUBREG in rtl.texi. */
4380 for (i = 0; i < max_bitsize / 32; i++)
4381 tmp[i] = 0;
4382 for (i = 0; i < elem_bitsize; i += value_bit)
4384 int ibase;
4385 if (WORDS_BIG_ENDIAN)
4386 ibase = elem_bitsize - 1 - i;
4387 else
4388 ibase = i;
4389 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
4392 real_from_target (&r, tmp, outer_submode);
4393 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
4395 break;
4397 default:
4398 gcc_unreachable ();
4401 if (VECTOR_MODE_P (outermode))
4402 return gen_rtx_CONST_VECTOR (outermode, result_v);
4403 else
4404 return result_s;
4407 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
4408 Return 0 if no simplifications are possible. */
4410 simplify_subreg (enum machine_mode outermode, rtx op,
4411 enum machine_mode innermode, unsigned int byte)
4413 /* Little bit of sanity checking. */
4414 gcc_assert (innermode != VOIDmode);
4415 gcc_assert (outermode != VOIDmode);
4416 gcc_assert (innermode != BLKmode);
4417 gcc_assert (outermode != BLKmode);
4419 gcc_assert (GET_MODE (op) == innermode
4420 || GET_MODE (op) == VOIDmode);
4422 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
4423 gcc_assert (byte < GET_MODE_SIZE (innermode));
4425 if (outermode == innermode && !byte)
4426 return op;
4428 if (GET_CODE (op) == CONST_INT
4429 || GET_CODE (op) == CONST_DOUBLE
4430 || GET_CODE (op) == CONST_VECTOR)
4431 return simplify_immed_subreg (outermode, op, innermode, byte);
4433 /* Changing mode twice with SUBREG => just change it once,
4434 or not at all if changing back op starting mode. */
4435 if (GET_CODE (op) == SUBREG)
4437 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
4438 int final_offset = byte + SUBREG_BYTE (op);
4439 rtx newx;
4441 if (outermode == innermostmode
4442 && byte == 0 && SUBREG_BYTE (op) == 0)
4443 return SUBREG_REG (op);
4445 /* The SUBREG_BYTE represents offset, as if the value were stored
4446 in memory. Irritating exception is paradoxical subreg, where
4447 we define SUBREG_BYTE to be 0. On big endian machines, this
4448 value should be negative. For a moment, undo this exception. */
4449 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4451 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
4452 if (WORDS_BIG_ENDIAN)
4453 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4454 if (BYTES_BIG_ENDIAN)
4455 final_offset += difference % UNITS_PER_WORD;
4457 if (SUBREG_BYTE (op) == 0
4458 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
4460 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
4461 if (WORDS_BIG_ENDIAN)
4462 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4463 if (BYTES_BIG_ENDIAN)
4464 final_offset += difference % UNITS_PER_WORD;
4467 /* See whether resulting subreg will be paradoxical. */
4468 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
4470 /* In nonparadoxical subregs we can't handle negative offsets. */
4471 if (final_offset < 0)
4472 return NULL_RTX;
4473 /* Bail out in case resulting subreg would be incorrect. */
4474 if (final_offset % GET_MODE_SIZE (outermode)
4475 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
4476 return NULL_RTX;
4478 else
4480 int offset = 0;
4481 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
4483 /* In paradoxical subreg, see if we are still looking on lower part.
4484 If so, our SUBREG_BYTE will be 0. */
4485 if (WORDS_BIG_ENDIAN)
4486 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4487 if (BYTES_BIG_ENDIAN)
4488 offset += difference % UNITS_PER_WORD;
4489 if (offset == final_offset)
4490 final_offset = 0;
4491 else
4492 return NULL_RTX;
4495 /* Recurse for further possible simplifications. */
4496 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
4497 final_offset);
4498 if (newx)
4499 return newx;
4500 if (validate_subreg (outermode, innermostmode,
4501 SUBREG_REG (op), final_offset))
4502 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
4503 return NULL_RTX;
4506 /* Merge implicit and explicit truncations. */
4508 if (GET_CODE (op) == TRUNCATE
4509 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
4510 && subreg_lowpart_offset (outermode, innermode) == byte)
4511 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
4512 GET_MODE (XEXP (op, 0)));
4514 /* SUBREG of a hard register => just change the register number
4515 and/or mode. If the hard register is not valid in that mode,
4516 suppress this simplification. If the hard register is the stack,
4517 frame, or argument pointer, leave this as a SUBREG. */
4519 if (REG_P (op)
4520 && REGNO (op) < FIRST_PSEUDO_REGISTER
4521 #ifdef CANNOT_CHANGE_MODE_CLASS
4522 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
4523 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
4524 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
4525 #endif
4526 && ((reload_completed && !frame_pointer_needed)
4527 || (REGNO (op) != FRAME_POINTER_REGNUM
4528 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
4529 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
4530 #endif
4532 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4533 && REGNO (op) != ARG_POINTER_REGNUM
4534 #endif
4535 && REGNO (op) != STACK_POINTER_REGNUM
4536 && subreg_offset_representable_p (REGNO (op), innermode,
4537 byte, outermode))
4539 unsigned int regno = REGNO (op);
4540 unsigned int final_regno
4541 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
4543 /* ??? We do allow it if the current REG is not valid for
4544 its mode. This is a kludge to work around how float/complex
4545 arguments are passed on 32-bit SPARC and should be fixed. */
4546 if (HARD_REGNO_MODE_OK (final_regno, outermode)
4547 || ! HARD_REGNO_MODE_OK (regno, innermode))
4549 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
4551 /* Propagate original regno. We don't have any way to specify
4552 the offset inside original regno, so do so only for lowpart.
4553 The information is used only by alias analysis that can not
4554 grog partial register anyway. */
4556 if (subreg_lowpart_offset (outermode, innermode) == byte)
4557 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
4558 return x;
4562 /* If we have a SUBREG of a register that we are replacing and we are
4563 replacing it with a MEM, make a new MEM and try replacing the
4564 SUBREG with it. Don't do this if the MEM has a mode-dependent address
4565 or if we would be widening it. */
4567 if (MEM_P (op)
4568 && ! mode_dependent_address_p (XEXP (op, 0))
4569 /* Allow splitting of volatile memory references in case we don't
4570 have instruction to move the whole thing. */
4571 && (! MEM_VOLATILE_P (op)
4572 || ! have_insn_for (SET, innermode))
4573 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
4574 return adjust_address_nv (op, outermode, byte);
4576 /* Handle complex values represented as CONCAT
4577 of real and imaginary part. */
4578 if (GET_CODE (op) == CONCAT)
4580 unsigned int inner_size, final_offset;
4581 rtx part, res;
4583 inner_size = GET_MODE_UNIT_SIZE (innermode);
4584 part = byte < inner_size ? XEXP (op, 0) : XEXP (op, 1);
4585 final_offset = byte % inner_size;
4586 if (final_offset + GET_MODE_SIZE (outermode) > inner_size)
4587 return NULL_RTX;
4589 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
4590 if (res)
4591 return res;
4592 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
4593 return gen_rtx_SUBREG (outermode, part, final_offset);
4594 return NULL_RTX;
4597 /* Optimize SUBREG truncations of zero and sign extended values. */
4598 if ((GET_CODE (op) == ZERO_EXTEND
4599 || GET_CODE (op) == SIGN_EXTEND)
4600 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
4602 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
4604 /* If we're requesting the lowpart of a zero or sign extension,
4605 there are three possibilities. If the outermode is the same
4606 as the origmode, we can omit both the extension and the subreg.
4607 If the outermode is not larger than the origmode, we can apply
4608 the truncation without the extension. Finally, if the outermode
4609 is larger than the origmode, but both are integer modes, we
4610 can just extend to the appropriate mode. */
4611 if (bitpos == 0)
4613 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
4614 if (outermode == origmode)
4615 return XEXP (op, 0);
4616 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
4617 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
4618 subreg_lowpart_offset (outermode,
4619 origmode));
4620 if (SCALAR_INT_MODE_P (outermode))
4621 return simplify_gen_unary (GET_CODE (op), outermode,
4622 XEXP (op, 0), origmode);
4625 /* A SUBREG resulting from a zero extension may fold to zero if
4626 it extracts higher bits that the ZERO_EXTEND's source bits. */
4627 if (GET_CODE (op) == ZERO_EXTEND
4628 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
4629 return CONST0_RTX (outermode);
4632 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
4633 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
4634 the outer subreg is effectively a truncation to the original mode. */
4635 if ((GET_CODE (op) == LSHIFTRT
4636 || GET_CODE (op) == ASHIFTRT)
4637 && SCALAR_INT_MODE_P (outermode)
4638 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
4639 to avoid the possibility that an outer LSHIFTRT shifts by more
4640 than the sign extension's sign_bit_copies and introduces zeros
4641 into the high bits of the result. */
4642 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
4643 && GET_CODE (XEXP (op, 1)) == CONST_INT
4644 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
4645 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4646 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4647 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4648 return simplify_gen_binary (ASHIFTRT, outermode,
4649 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4651 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
4652 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
4653 the outer subreg is effectively a truncation to the original mode. */
4654 if ((GET_CODE (op) == LSHIFTRT
4655 || GET_CODE (op) == ASHIFTRT)
4656 && SCALAR_INT_MODE_P (outermode)
4657 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4658 && GET_CODE (XEXP (op, 1)) == CONST_INT
4659 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4660 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4661 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4662 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4663 return simplify_gen_binary (LSHIFTRT, outermode,
4664 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4666 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
4667 to (ashift:QI (x:QI) C), where C is a suitable small constant and
4668 the outer subreg is effectively a truncation to the original mode. */
4669 if (GET_CODE (op) == ASHIFT
4670 && SCALAR_INT_MODE_P (outermode)
4671 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4672 && GET_CODE (XEXP (op, 1)) == CONST_INT
4673 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4674 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
4675 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4676 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4677 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4678 return simplify_gen_binary (ASHIFT, outermode,
4679 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4681 return NULL_RTX;
4684 /* Make a SUBREG operation or equivalent if it folds. */
4687 simplify_gen_subreg (enum machine_mode outermode, rtx op,
4688 enum machine_mode innermode, unsigned int byte)
4690 rtx newx;
4692 newx = simplify_subreg (outermode, op, innermode, byte);
4693 if (newx)
4694 return newx;
4696 if (GET_CODE (op) == SUBREG
4697 || GET_CODE (op) == CONCAT
4698 || GET_MODE (op) == VOIDmode)
4699 return NULL_RTX;
4701 if (validate_subreg (outermode, innermode, op, byte))
4702 return gen_rtx_SUBREG (outermode, op, byte);
4704 return NULL_RTX;
4707 /* Simplify X, an rtx expression.
4709 Return the simplified expression or NULL if no simplifications
4710 were possible.
4712 This is the preferred entry point into the simplification routines;
4713 however, we still allow passes to call the more specific routines.
4715 Right now GCC has three (yes, three) major bodies of RTL simplification
4716 code that need to be unified.
4718 1. fold_rtx in cse.c. This code uses various CSE specific
4719 information to aid in RTL simplification.
4721 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
4722 it uses combine specific information to aid in RTL
4723 simplification.
4725 3. The routines in this file.
4728 Long term we want to only have one body of simplification code; to
4729 get to that state I recommend the following steps:
4731 1. Pour over fold_rtx & simplify_rtx and move any simplifications
4732 which are not pass dependent state into these routines.
4734 2. As code is moved by #1, change fold_rtx & simplify_rtx to
4735 use this routine whenever possible.
4737 3. Allow for pass dependent state to be provided to these
4738 routines and add simplifications based on the pass dependent
4739 state. Remove code from cse.c & combine.c that becomes
4740 redundant/dead.
4742 It will take time, but ultimately the compiler will be easier to
4743 maintain and improve. It's totally silly that when we add a
4744 simplification that it needs to be added to 4 places (3 for RTL
4745 simplification and 1 for tree simplification. */
4748 simplify_rtx (rtx x)
4750 enum rtx_code code = GET_CODE (x);
4751 enum machine_mode mode = GET_MODE (x);
4753 switch (GET_RTX_CLASS (code))
4755 case RTX_UNARY:
4756 return simplify_unary_operation (code, mode,
4757 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
4758 case RTX_COMM_ARITH:
4759 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
4760 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
4762 /* Fall through.... */
4764 case RTX_BIN_ARITH:
4765 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
4767 case RTX_TERNARY:
4768 case RTX_BITFIELD_OPS:
4769 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
4770 XEXP (x, 0), XEXP (x, 1),
4771 XEXP (x, 2));
4773 case RTX_COMPARE:
4774 case RTX_COMM_COMPARE:
4775 return simplify_relational_operation (code, mode,
4776 ((GET_MODE (XEXP (x, 0))
4777 != VOIDmode)
4778 ? GET_MODE (XEXP (x, 0))
4779 : GET_MODE (XEXP (x, 1))),
4780 XEXP (x, 0),
4781 XEXP (x, 1));
4783 case RTX_EXTRA:
4784 if (code == SUBREG)
4785 return simplify_gen_subreg (mode, SUBREG_REG (x),
4786 GET_MODE (SUBREG_REG (x)),
4787 SUBREG_BYTE (x));
4788 break;
4790 case RTX_OBJ:
4791 if (code == LO_SUM)
4793 /* Convert (lo_sum (high FOO) FOO) to FOO. */
4794 if (GET_CODE (XEXP (x, 0)) == HIGH
4795 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
4796 return XEXP (x, 1);
4798 break;
4800 default:
4801 break;
4803 return NULL;