sh-modes.def: comment pasto fix.
[official-gcc.git] / gcc / simplify-rtx.c
blob3f627e5308a5ec265bd3e5b940605cb920da2717
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
20 02110-1301, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static bool plus_minus_operand_p (rtx);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
57 unsigned int);
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
59 rtx, rtx);
60 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
61 enum machine_mode, rtx, rtx);
62 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
63 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
64 rtx, rtx, rtx, rtx);
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
68 static rtx
69 neg_const_int (enum machine_mode mode, rtx i)
71 return gen_int_mode (- INTVAL (i), mode);
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
77 bool
78 mode_signbit_p (enum machine_mode mode, rtx x)
80 unsigned HOST_WIDE_INT val;
81 unsigned int width;
83 if (GET_MODE_CLASS (mode) != MODE_INT)
84 return false;
86 width = GET_MODE_BITSIZE (mode);
87 if (width == 0)
88 return false;
90 if (width <= HOST_BITS_PER_WIDE_INT
91 && GET_CODE (x) == CONST_INT)
92 val = INTVAL (x);
93 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x) == 0)
97 val = CONST_DOUBLE_HIGH (x);
98 width -= HOST_BITS_PER_WIDE_INT;
100 else
101 return false;
103 if (width < HOST_BITS_PER_WIDE_INT)
104 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
105 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
112 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
113 rtx op1)
115 rtx tem;
117 /* If this simplifies, do it. */
118 tem = simplify_binary_operation (code, mode, op0, op1);
119 if (tem)
120 return tem;
122 /* Put complex operands first and constants second if commutative. */
123 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
124 && swap_commutative_operands_p (op0, op1))
125 tem = op0, op0 = op1, op1 = tem;
127 return gen_rtx_fmt_ee (code, mode, op0, op1);
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
133 avoid_constant_pool_reference (rtx x)
135 rtx c, tmp, addr;
136 enum machine_mode cmode;
137 HOST_WIDE_INT offset = 0;
139 switch (GET_CODE (x))
141 case MEM:
142 break;
144 case FLOAT_EXTEND:
145 /* Handle float extensions of constant pool references. */
146 tmp = XEXP (x, 0);
147 c = avoid_constant_pool_reference (tmp);
148 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
150 REAL_VALUE_TYPE d;
152 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
155 return x;
157 default:
158 return x;
161 addr = XEXP (x, 0);
163 /* Call target hook to avoid the effects of -fpic etc.... */
164 addr = targetm.delegitimize_address (addr);
166 /* Split the address into a base and integer offset. */
167 if (GET_CODE (addr) == CONST
168 && GET_CODE (XEXP (addr, 0)) == PLUS
169 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
171 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
172 addr = XEXP (XEXP (addr, 0), 0);
175 if (GET_CODE (addr) == LO_SUM)
176 addr = XEXP (addr, 1);
178 /* If this is a constant pool reference, we can turn it into its
179 constant and hope that simplifications happen. */
180 if (GET_CODE (addr) == SYMBOL_REF
181 && CONSTANT_POOL_ADDRESS_P (addr))
183 c = get_pool_constant (addr);
184 cmode = get_pool_mode (addr);
186 /* If we're accessing the constant in a different mode than it was
187 originally stored, attempt to fix that up via subreg simplifications.
188 If that fails we have no choice but to return the original memory. */
189 if (offset != 0 || cmode != GET_MODE (x))
191 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
192 if (tem && CONSTANT_P (tem))
193 return tem;
195 else
196 return c;
199 return x;
202 /* Return true if X is a MEM referencing the constant pool. */
204 bool
205 constant_pool_reference_p (rtx x)
207 return avoid_constant_pool_reference (x) != x;
210 /* Make a unary operation by first seeing if it folds and otherwise making
211 the specified operation. */
214 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
215 enum machine_mode op_mode)
217 rtx tem;
219 /* If this simplifies, use it. */
220 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
221 return tem;
223 return gen_rtx_fmt_e (code, mode, op);
226 /* Likewise for ternary operations. */
229 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
230 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
232 rtx tem;
234 /* If this simplifies, use it. */
235 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
236 op0, op1, op2)))
237 return tem;
239 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
242 /* Likewise, for relational operations.
243 CMP_MODE specifies mode comparison is done in. */
246 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
247 enum machine_mode cmp_mode, rtx op0, rtx op1)
249 rtx tem;
251 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
252 op0, op1)))
253 return tem;
255 return gen_rtx_fmt_ee (code, mode, op0, op1);
258 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
259 resulting RTX. Return a new RTX which is as simplified as possible. */
262 simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
264 enum rtx_code code = GET_CODE (x);
265 enum machine_mode mode = GET_MODE (x);
266 enum machine_mode op_mode;
267 rtx op0, op1, op2;
269 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
270 to build a new expression substituting recursively. If we can't do
271 anything, return our input. */
273 if (x == old_rtx)
274 return new_rtx;
276 switch (GET_RTX_CLASS (code))
278 case RTX_UNARY:
279 op0 = XEXP (x, 0);
280 op_mode = GET_MODE (op0);
281 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
282 if (op0 == XEXP (x, 0))
283 return x;
284 return simplify_gen_unary (code, mode, op0, op_mode);
286 case RTX_BIN_ARITH:
287 case RTX_COMM_ARITH:
288 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
289 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
290 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
291 return x;
292 return simplify_gen_binary (code, mode, op0, op1);
294 case RTX_COMPARE:
295 case RTX_COMM_COMPARE:
296 op0 = XEXP (x, 0);
297 op1 = XEXP (x, 1);
298 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
299 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
300 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
301 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
302 return x;
303 return simplify_gen_relational (code, mode, op_mode, op0, op1);
305 case RTX_TERNARY:
306 case RTX_BITFIELD_OPS:
307 op0 = XEXP (x, 0);
308 op_mode = GET_MODE (op0);
309 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
310 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
311 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
312 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
313 return x;
314 if (op_mode == VOIDmode)
315 op_mode = GET_MODE (op0);
316 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
318 case RTX_EXTRA:
319 /* The only case we try to handle is a SUBREG. */
320 if (code == SUBREG)
322 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
323 if (op0 == SUBREG_REG (x))
324 return x;
325 op0 = simplify_gen_subreg (GET_MODE (x), op0,
326 GET_MODE (SUBREG_REG (x)),
327 SUBREG_BYTE (x));
328 return op0 ? op0 : x;
330 break;
332 case RTX_OBJ:
333 if (code == MEM)
335 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
336 if (op0 == XEXP (x, 0))
337 return x;
338 return replace_equiv_address_nv (x, op0);
340 else if (code == LO_SUM)
342 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
343 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
345 /* (lo_sum (high x) x) -> x */
346 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
347 return op1;
349 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
350 return x;
351 return gen_rtx_LO_SUM (mode, op0, op1);
353 else if (code == REG)
355 if (rtx_equal_p (x, old_rtx))
356 return new_rtx;
358 break;
360 default:
361 break;
363 return x;
366 /* Try to simplify a unary operation CODE whose output mode is to be
367 MODE with input operand OP whose mode was originally OP_MODE.
368 Return zero if no simplification can be made. */
370 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
371 rtx op, enum machine_mode op_mode)
373 rtx trueop, tem;
375 if (GET_CODE (op) == CONST)
376 op = XEXP (op, 0);
378 trueop = avoid_constant_pool_reference (op);
380 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
381 if (tem)
382 return tem;
384 return simplify_unary_operation_1 (code, mode, op);
387 /* Perform some simplifications we can do even if the operands
388 aren't constant. */
389 static rtx
390 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
392 enum rtx_code reversed;
393 rtx temp;
395 switch (code)
397 case NOT:
398 /* (not (not X)) == X. */
399 if (GET_CODE (op) == NOT)
400 return XEXP (op, 0);
402 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
403 comparison is all ones. */
404 if (COMPARISON_P (op)
405 && (mode == BImode || STORE_FLAG_VALUE == -1)
406 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
407 return simplify_gen_relational (reversed, mode, VOIDmode,
408 XEXP (op, 0), XEXP (op, 1));
410 /* (not (plus X -1)) can become (neg X). */
411 if (GET_CODE (op) == PLUS
412 && XEXP (op, 1) == constm1_rtx)
413 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
415 /* Similarly, (not (neg X)) is (plus X -1). */
416 if (GET_CODE (op) == NEG)
417 return plus_constant (XEXP (op, 0), -1);
419 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
420 if (GET_CODE (op) == XOR
421 && GET_CODE (XEXP (op, 1)) == CONST_INT
422 && (temp = simplify_unary_operation (NOT, mode,
423 XEXP (op, 1), mode)) != 0)
424 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
426 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
427 if (GET_CODE (op) == PLUS
428 && GET_CODE (XEXP (op, 1)) == CONST_INT
429 && mode_signbit_p (mode, XEXP (op, 1))
430 && (temp = simplify_unary_operation (NOT, mode,
431 XEXP (op, 1), mode)) != 0)
432 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
435 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
436 operands other than 1, but that is not valid. We could do a
437 similar simplification for (not (lshiftrt C X)) where C is
438 just the sign bit, but this doesn't seem common enough to
439 bother with. */
440 if (GET_CODE (op) == ASHIFT
441 && XEXP (op, 0) == const1_rtx)
443 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
444 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
447 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
448 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
449 so we can perform the above simplification. */
451 if (STORE_FLAG_VALUE == -1
452 && GET_CODE (op) == ASHIFTRT
453 && GET_CODE (XEXP (op, 1)) == CONST_INT
454 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
455 return simplify_gen_relational (GE, mode, VOIDmode,
456 XEXP (op, 0), const0_rtx);
459 if (GET_CODE (op) == SUBREG
460 && subreg_lowpart_p (op)
461 && (GET_MODE_SIZE (GET_MODE (op))
462 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
463 && GET_CODE (SUBREG_REG (op)) == ASHIFT
464 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
466 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
467 rtx x;
469 x = gen_rtx_ROTATE (inner_mode,
470 simplify_gen_unary (NOT, inner_mode, const1_rtx,
471 inner_mode),
472 XEXP (SUBREG_REG (op), 1));
473 return rtl_hooks.gen_lowpart_no_emit (mode, x);
476 /* Apply De Morgan's laws to reduce number of patterns for machines
477 with negating logical insns (and-not, nand, etc.). If result has
478 only one NOT, put it first, since that is how the patterns are
479 coded. */
481 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
483 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
484 enum machine_mode op_mode;
486 op_mode = GET_MODE (in1);
487 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
489 op_mode = GET_MODE (in2);
490 if (op_mode == VOIDmode)
491 op_mode = mode;
492 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
494 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
496 rtx tem = in2;
497 in2 = in1; in1 = tem;
500 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
501 mode, in1, in2);
503 break;
505 case NEG:
506 /* (neg (neg X)) == X. */
507 if (GET_CODE (op) == NEG)
508 return XEXP (op, 0);
510 /* (neg (plus X 1)) can become (not X). */
511 if (GET_CODE (op) == PLUS
512 && XEXP (op, 1) == const1_rtx)
513 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
515 /* Similarly, (neg (not X)) is (plus X 1). */
516 if (GET_CODE (op) == NOT)
517 return plus_constant (XEXP (op, 0), 1);
519 /* (neg (minus X Y)) can become (minus Y X). This transformation
520 isn't safe for modes with signed zeros, since if X and Y are
521 both +0, (minus Y X) is the same as (minus X Y). If the
522 rounding mode is towards +infinity (or -infinity) then the two
523 expressions will be rounded differently. */
524 if (GET_CODE (op) == MINUS
525 && !HONOR_SIGNED_ZEROS (mode)
526 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
527 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
529 if (GET_CODE (op) == PLUS
530 && !HONOR_SIGNED_ZEROS (mode)
531 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
533 /* (neg (plus A C)) is simplified to (minus -C A). */
534 if (GET_CODE (XEXP (op, 1)) == CONST_INT
535 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
537 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
538 if (temp)
539 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
542 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
543 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
544 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
547 /* (neg (mult A B)) becomes (mult (neg A) B).
548 This works even for floating-point values. */
549 if (GET_CODE (op) == MULT
550 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
552 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
553 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
556 /* NEG commutes with ASHIFT since it is multiplication. Only do
557 this if we can then eliminate the NEG (e.g., if the operand
558 is a constant). */
559 if (GET_CODE (op) == ASHIFT)
561 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
562 if (temp)
563 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
566 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
567 C is equal to the width of MODE minus 1. */
568 if (GET_CODE (op) == ASHIFTRT
569 && GET_CODE (XEXP (op, 1)) == CONST_INT
570 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
571 return simplify_gen_binary (LSHIFTRT, mode,
572 XEXP (op, 0), XEXP (op, 1));
574 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
575 C is equal to the width of MODE minus 1. */
576 if (GET_CODE (op) == LSHIFTRT
577 && GET_CODE (XEXP (op, 1)) == CONST_INT
578 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
579 return simplify_gen_binary (ASHIFTRT, mode,
580 XEXP (op, 0), XEXP (op, 1));
582 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
583 if (GET_CODE (op) == XOR
584 && XEXP (op, 1) == const1_rtx
585 && nonzero_bits (XEXP (op, 0), mode) == 1)
586 return plus_constant (XEXP (op, 0), -1);
588 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
589 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
590 if (GET_CODE (op) == LT
591 && XEXP (op, 1) == const0_rtx)
593 enum machine_mode inner = GET_MODE (XEXP (op, 0));
594 int isize = GET_MODE_BITSIZE (inner);
595 if (STORE_FLAG_VALUE == 1)
597 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
598 GEN_INT (isize - 1));
599 if (mode == inner)
600 return temp;
601 if (GET_MODE_BITSIZE (mode) > isize)
602 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
603 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
605 else if (STORE_FLAG_VALUE == -1)
607 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
608 GEN_INT (isize - 1));
609 if (mode == inner)
610 return temp;
611 if (GET_MODE_BITSIZE (mode) > isize)
612 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
613 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
616 break;
618 case TRUNCATE:
619 /* We can't handle truncation to a partial integer mode here
620 because we don't know the real bitsize of the partial
621 integer mode. */
622 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
623 break;
625 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
626 if ((GET_CODE (op) == SIGN_EXTEND
627 || GET_CODE (op) == ZERO_EXTEND)
628 && GET_MODE (XEXP (op, 0)) == mode)
629 return XEXP (op, 0);
631 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
632 (OP:SI foo:SI) if OP is NEG or ABS. */
633 if ((GET_CODE (op) == ABS
634 || GET_CODE (op) == NEG)
635 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
636 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
637 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
638 return simplify_gen_unary (GET_CODE (op), mode,
639 XEXP (XEXP (op, 0), 0), mode);
641 /* (truncate:A (subreg:B (truncate:C X) 0)) is
642 (truncate:A X). */
643 if (GET_CODE (op) == SUBREG
644 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
645 && subreg_lowpart_p (op))
646 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
647 GET_MODE (XEXP (SUBREG_REG (op), 0)));
649 /* If we know that the value is already truncated, we can
650 replace the TRUNCATE with a SUBREG. Note that this is also
651 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
652 modes we just have to apply a different definition for
653 truncation. But don't do this for an (LSHIFTRT (MULT ...))
654 since this will cause problems with the umulXi3_highpart
655 patterns. */
656 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
657 GET_MODE_BITSIZE (GET_MODE (op)))
658 ? (num_sign_bit_copies (op, GET_MODE (op))
659 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
660 - GET_MODE_BITSIZE (mode)))
661 : truncated_to_mode (mode, op))
662 && ! (GET_CODE (op) == LSHIFTRT
663 && GET_CODE (XEXP (op, 0)) == MULT))
664 return rtl_hooks.gen_lowpart_no_emit (mode, op);
666 /* A truncate of a comparison can be replaced with a subreg if
667 STORE_FLAG_VALUE permits. This is like the previous test,
668 but it works even if the comparison is done in a mode larger
669 than HOST_BITS_PER_WIDE_INT. */
670 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
671 && COMPARISON_P (op)
672 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
673 return rtl_hooks.gen_lowpart_no_emit (mode, op);
674 break;
676 case FLOAT_TRUNCATE:
677 if (DECIMAL_FLOAT_MODE_P (mode))
678 break;
680 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
681 if (GET_CODE (op) == FLOAT_EXTEND
682 && GET_MODE (XEXP (op, 0)) == mode)
683 return XEXP (op, 0);
685 /* (float_truncate:SF (float_truncate:DF foo:XF))
686 = (float_truncate:SF foo:XF).
687 This may eliminate double rounding, so it is unsafe.
689 (float_truncate:SF (float_extend:XF foo:DF))
690 = (float_truncate:SF foo:DF).
692 (float_truncate:DF (float_extend:XF foo:SF))
693 = (float_extend:SF foo:DF). */
694 if ((GET_CODE (op) == FLOAT_TRUNCATE
695 && flag_unsafe_math_optimizations)
696 || GET_CODE (op) == FLOAT_EXTEND)
697 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
698 0)))
699 > GET_MODE_SIZE (mode)
700 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
701 mode,
702 XEXP (op, 0), mode);
704 /* (float_truncate (float x)) is (float x) */
705 if (GET_CODE (op) == FLOAT
706 && (flag_unsafe_math_optimizations
707 || ((unsigned)significand_size (GET_MODE (op))
708 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
709 - num_sign_bit_copies (XEXP (op, 0),
710 GET_MODE (XEXP (op, 0)))))))
711 return simplify_gen_unary (FLOAT, mode,
712 XEXP (op, 0),
713 GET_MODE (XEXP (op, 0)));
715 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
716 (OP:SF foo:SF) if OP is NEG or ABS. */
717 if ((GET_CODE (op) == ABS
718 || GET_CODE (op) == NEG)
719 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
720 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
721 return simplify_gen_unary (GET_CODE (op), mode,
722 XEXP (XEXP (op, 0), 0), mode);
724 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
725 is (float_truncate:SF x). */
726 if (GET_CODE (op) == SUBREG
727 && subreg_lowpart_p (op)
728 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
729 return SUBREG_REG (op);
730 break;
732 case FLOAT_EXTEND:
733 if (DECIMAL_FLOAT_MODE_P (mode))
734 break;
736 /* (float_extend (float_extend x)) is (float_extend x)
738 (float_extend (float x)) is (float x) assuming that double
739 rounding can't happen.
741 if (GET_CODE (op) == FLOAT_EXTEND
742 || (GET_CODE (op) == FLOAT
743 && ((unsigned)significand_size (GET_MODE (op))
744 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
745 - num_sign_bit_copies (XEXP (op, 0),
746 GET_MODE (XEXP (op, 0)))))))
747 return simplify_gen_unary (GET_CODE (op), mode,
748 XEXP (op, 0),
749 GET_MODE (XEXP (op, 0)));
751 break;
753 case ABS:
754 /* (abs (neg <foo>)) -> (abs <foo>) */
755 if (GET_CODE (op) == NEG)
756 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
757 GET_MODE (XEXP (op, 0)));
759 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
760 do nothing. */
761 if (GET_MODE (op) == VOIDmode)
762 break;
764 /* If operand is something known to be positive, ignore the ABS. */
765 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
766 || ((GET_MODE_BITSIZE (GET_MODE (op))
767 <= HOST_BITS_PER_WIDE_INT)
768 && ((nonzero_bits (op, GET_MODE (op))
769 & ((HOST_WIDE_INT) 1
770 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
771 == 0)))
772 return op;
774 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
775 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
776 return gen_rtx_NEG (mode, op);
778 break;
780 case FFS:
781 /* (ffs (*_extend <X>)) = (ffs <X>) */
782 if (GET_CODE (op) == SIGN_EXTEND
783 || GET_CODE (op) == ZERO_EXTEND)
784 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
785 GET_MODE (XEXP (op, 0)));
786 break;
788 case POPCOUNT:
789 case PARITY:
790 /* (pop* (zero_extend <X>)) = (pop* <X>) */
791 if (GET_CODE (op) == ZERO_EXTEND)
792 return simplify_gen_unary (code, mode, XEXP (op, 0),
793 GET_MODE (XEXP (op, 0)));
794 break;
796 case FLOAT:
797 /* (float (sign_extend <X>)) = (float <X>). */
798 if (GET_CODE (op) == SIGN_EXTEND)
799 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
800 GET_MODE (XEXP (op, 0)));
801 break;
803 case SIGN_EXTEND:
804 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
805 becomes just the MINUS if its mode is MODE. This allows
806 folding switch statements on machines using casesi (such as
807 the VAX). */
808 if (GET_CODE (op) == TRUNCATE
809 && GET_MODE (XEXP (op, 0)) == mode
810 && GET_CODE (XEXP (op, 0)) == MINUS
811 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
812 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
813 return XEXP (op, 0);
815 /* Check for a sign extension of a subreg of a promoted
816 variable, where the promotion is sign-extended, and the
817 target mode is the same as the variable's promotion. */
818 if (GET_CODE (op) == SUBREG
819 && SUBREG_PROMOTED_VAR_P (op)
820 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
821 && GET_MODE (XEXP (op, 0)) == mode)
822 return XEXP (op, 0);
824 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
825 if (! POINTERS_EXTEND_UNSIGNED
826 && mode == Pmode && GET_MODE (op) == ptr_mode
827 && (CONSTANT_P (op)
828 || (GET_CODE (op) == SUBREG
829 && REG_P (SUBREG_REG (op))
830 && REG_POINTER (SUBREG_REG (op))
831 && GET_MODE (SUBREG_REG (op)) == Pmode)))
832 return convert_memory_address (Pmode, op);
833 #endif
834 break;
836 case ZERO_EXTEND:
837 /* Check for a zero extension of a subreg of a promoted
838 variable, where the promotion is zero-extended, and the
839 target mode is the same as the variable's promotion. */
840 if (GET_CODE (op) == SUBREG
841 && SUBREG_PROMOTED_VAR_P (op)
842 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
843 && GET_MODE (XEXP (op, 0)) == mode)
844 return XEXP (op, 0);
846 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
847 if (POINTERS_EXTEND_UNSIGNED > 0
848 && mode == Pmode && GET_MODE (op) == ptr_mode
849 && (CONSTANT_P (op)
850 || (GET_CODE (op) == SUBREG
851 && REG_P (SUBREG_REG (op))
852 && REG_POINTER (SUBREG_REG (op))
853 && GET_MODE (SUBREG_REG (op)) == Pmode)))
854 return convert_memory_address (Pmode, op);
855 #endif
856 break;
858 default:
859 break;
862 return 0;
865 /* Try to compute the value of a unary operation CODE whose output mode is to
866 be MODE with input operand OP whose mode was originally OP_MODE.
867 Return zero if the value cannot be computed. */
869 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
870 rtx op, enum machine_mode op_mode)
872 unsigned int width = GET_MODE_BITSIZE (mode);
874 if (code == VEC_DUPLICATE)
876 gcc_assert (VECTOR_MODE_P (mode));
877 if (GET_MODE (op) != VOIDmode)
879 if (!VECTOR_MODE_P (GET_MODE (op)))
880 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
881 else
882 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
883 (GET_MODE (op)));
885 if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
886 || GET_CODE (op) == CONST_VECTOR)
888 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
889 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
890 rtvec v = rtvec_alloc (n_elts);
891 unsigned int i;
893 if (GET_CODE (op) != CONST_VECTOR)
894 for (i = 0; i < n_elts; i++)
895 RTVEC_ELT (v, i) = op;
896 else
898 enum machine_mode inmode = GET_MODE (op);
899 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
900 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
902 gcc_assert (in_n_elts < n_elts);
903 gcc_assert ((n_elts % in_n_elts) == 0);
904 for (i = 0; i < n_elts; i++)
905 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
907 return gen_rtx_CONST_VECTOR (mode, v);
911 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
913 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
914 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
915 enum machine_mode opmode = GET_MODE (op);
916 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
917 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
918 rtvec v = rtvec_alloc (n_elts);
919 unsigned int i;
921 gcc_assert (op_n_elts == n_elts);
922 for (i = 0; i < n_elts; i++)
924 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
925 CONST_VECTOR_ELT (op, i),
926 GET_MODE_INNER (opmode));
927 if (!x)
928 return 0;
929 RTVEC_ELT (v, i) = x;
931 return gen_rtx_CONST_VECTOR (mode, v);
934 /* The order of these tests is critical so that, for example, we don't
935 check the wrong mode (input vs. output) for a conversion operation,
936 such as FIX. At some point, this should be simplified. */
938 if (code == FLOAT && GET_MODE (op) == VOIDmode
939 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
941 HOST_WIDE_INT hv, lv;
942 REAL_VALUE_TYPE d;
944 if (GET_CODE (op) == CONST_INT)
945 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
946 else
947 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
949 REAL_VALUE_FROM_INT (d, lv, hv, mode);
950 d = real_value_truncate (mode, d);
951 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
953 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
954 && (GET_CODE (op) == CONST_DOUBLE
955 || GET_CODE (op) == CONST_INT))
957 HOST_WIDE_INT hv, lv;
958 REAL_VALUE_TYPE d;
960 if (GET_CODE (op) == CONST_INT)
961 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
962 else
963 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
965 if (op_mode == VOIDmode)
967 /* We don't know how to interpret negative-looking numbers in
968 this case, so don't try to fold those. */
969 if (hv < 0)
970 return 0;
972 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
974 else
975 hv = 0, lv &= GET_MODE_MASK (op_mode);
977 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
978 d = real_value_truncate (mode, d);
979 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
982 if (GET_CODE (op) == CONST_INT
983 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
985 HOST_WIDE_INT arg0 = INTVAL (op);
986 HOST_WIDE_INT val;
988 switch (code)
990 case NOT:
991 val = ~ arg0;
992 break;
994 case NEG:
995 val = - arg0;
996 break;
998 case ABS:
999 val = (arg0 >= 0 ? arg0 : - arg0);
1000 break;
1002 case FFS:
1003 /* Don't use ffs here. Instead, get low order bit and then its
1004 number. If arg0 is zero, this will return 0, as desired. */
1005 arg0 &= GET_MODE_MASK (mode);
1006 val = exact_log2 (arg0 & (- arg0)) + 1;
1007 break;
1009 case CLZ:
1010 arg0 &= GET_MODE_MASK (mode);
1011 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1013 else
1014 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1015 break;
1017 case CTZ:
1018 arg0 &= GET_MODE_MASK (mode);
1019 if (arg0 == 0)
1021 /* Even if the value at zero is undefined, we have to come
1022 up with some replacement. Seems good enough. */
1023 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1024 val = GET_MODE_BITSIZE (mode);
1026 else
1027 val = exact_log2 (arg0 & -arg0);
1028 break;
1030 case POPCOUNT:
1031 arg0 &= GET_MODE_MASK (mode);
1032 val = 0;
1033 while (arg0)
1034 val++, arg0 &= arg0 - 1;
1035 break;
1037 case PARITY:
1038 arg0 &= GET_MODE_MASK (mode);
1039 val = 0;
1040 while (arg0)
1041 val++, arg0 &= arg0 - 1;
1042 val &= 1;
1043 break;
1045 case TRUNCATE:
1046 val = arg0;
1047 break;
1049 case ZERO_EXTEND:
1050 /* When zero-extending a CONST_INT, we need to know its
1051 original mode. */
1052 gcc_assert (op_mode != VOIDmode);
1053 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1055 /* If we were really extending the mode,
1056 we would have to distinguish between zero-extension
1057 and sign-extension. */
1058 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1059 val = arg0;
1061 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1062 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1063 else
1064 return 0;
1065 break;
1067 case SIGN_EXTEND:
1068 if (op_mode == VOIDmode)
1069 op_mode = mode;
1070 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1072 /* If we were really extending the mode,
1073 we would have to distinguish between zero-extension
1074 and sign-extension. */
1075 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1076 val = arg0;
1078 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1081 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1082 if (val
1083 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1084 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1086 else
1087 return 0;
1088 break;
1090 case SQRT:
1091 case FLOAT_EXTEND:
1092 case FLOAT_TRUNCATE:
1093 case SS_TRUNCATE:
1094 case US_TRUNCATE:
1095 case SS_NEG:
1096 return 0;
1098 default:
1099 gcc_unreachable ();
1102 return gen_int_mode (val, mode);
1105 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1106 for a DImode operation on a CONST_INT. */
1107 else if (GET_MODE (op) == VOIDmode
1108 && width <= HOST_BITS_PER_WIDE_INT * 2
1109 && (GET_CODE (op) == CONST_DOUBLE
1110 || GET_CODE (op) == CONST_INT))
1112 unsigned HOST_WIDE_INT l1, lv;
1113 HOST_WIDE_INT h1, hv;
1115 if (GET_CODE (op) == CONST_DOUBLE)
1116 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1117 else
1118 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1120 switch (code)
1122 case NOT:
1123 lv = ~ l1;
1124 hv = ~ h1;
1125 break;
1127 case NEG:
1128 neg_double (l1, h1, &lv, &hv);
1129 break;
1131 case ABS:
1132 if (h1 < 0)
1133 neg_double (l1, h1, &lv, &hv);
1134 else
1135 lv = l1, hv = h1;
1136 break;
1138 case FFS:
1139 hv = 0;
1140 if (l1 == 0)
1142 if (h1 == 0)
1143 lv = 0;
1144 else
1145 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1147 else
1148 lv = exact_log2 (l1 & -l1) + 1;
1149 break;
1151 case CLZ:
1152 hv = 0;
1153 if (h1 != 0)
1154 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1155 - HOST_BITS_PER_WIDE_INT;
1156 else if (l1 != 0)
1157 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1158 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1159 lv = GET_MODE_BITSIZE (mode);
1160 break;
1162 case CTZ:
1163 hv = 0;
1164 if (l1 != 0)
1165 lv = exact_log2 (l1 & -l1);
1166 else if (h1 != 0)
1167 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1168 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1169 lv = GET_MODE_BITSIZE (mode);
1170 break;
1172 case POPCOUNT:
1173 hv = 0;
1174 lv = 0;
1175 while (l1)
1176 lv++, l1 &= l1 - 1;
1177 while (h1)
1178 lv++, h1 &= h1 - 1;
1179 break;
1181 case PARITY:
1182 hv = 0;
1183 lv = 0;
1184 while (l1)
1185 lv++, l1 &= l1 - 1;
1186 while (h1)
1187 lv++, h1 &= h1 - 1;
1188 lv &= 1;
1189 break;
1191 case TRUNCATE:
1192 /* This is just a change-of-mode, so do nothing. */
1193 lv = l1, hv = h1;
1194 break;
1196 case ZERO_EXTEND:
1197 gcc_assert (op_mode != VOIDmode);
1199 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1200 return 0;
1202 hv = 0;
1203 lv = l1 & GET_MODE_MASK (op_mode);
1204 break;
1206 case SIGN_EXTEND:
1207 if (op_mode == VOIDmode
1208 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1209 return 0;
1210 else
1212 lv = l1 & GET_MODE_MASK (op_mode);
1213 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1214 && (lv & ((HOST_WIDE_INT) 1
1215 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1216 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1218 hv = HWI_SIGN_EXTEND (lv);
1220 break;
1222 case SQRT:
1223 return 0;
1225 default:
1226 return 0;
1229 return immed_double_const (lv, hv, mode);
1232 else if (GET_CODE (op) == CONST_DOUBLE
1233 && SCALAR_FLOAT_MODE_P (mode))
1235 REAL_VALUE_TYPE d, t;
1236 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1238 switch (code)
1240 case SQRT:
1241 if (HONOR_SNANS (mode) && real_isnan (&d))
1242 return 0;
1243 real_sqrt (&t, mode, &d);
1244 d = t;
1245 break;
1246 case ABS:
1247 d = REAL_VALUE_ABS (d);
1248 break;
1249 case NEG:
1250 d = REAL_VALUE_NEGATE (d);
1251 break;
1252 case FLOAT_TRUNCATE:
1253 d = real_value_truncate (mode, d);
1254 break;
1255 case FLOAT_EXTEND:
1256 /* All this does is change the mode. */
1257 break;
1258 case FIX:
1259 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1260 break;
1261 case NOT:
1263 long tmp[4];
1264 int i;
1266 real_to_target (tmp, &d, GET_MODE (op));
1267 for (i = 0; i < 4; i++)
1268 tmp[i] = ~tmp[i];
1269 real_from_target (&d, tmp, mode);
1270 break;
1272 default:
1273 gcc_unreachable ();
1275 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1278 else if (GET_CODE (op) == CONST_DOUBLE
1279 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1280 && GET_MODE_CLASS (mode) == MODE_INT
1281 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1283 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1284 operators are intentionally left unspecified (to ease implementation
1285 by target backends), for consistency, this routine implements the
1286 same semantics for constant folding as used by the middle-end. */
1288 /* This was formerly used only for non-IEEE float.
1289 eggert@twinsun.com says it is safe for IEEE also. */
1290 HOST_WIDE_INT xh, xl, th, tl;
1291 REAL_VALUE_TYPE x, t;
1292 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1293 switch (code)
1295 case FIX:
1296 if (REAL_VALUE_ISNAN (x))
1297 return const0_rtx;
1299 /* Test against the signed upper bound. */
1300 if (width > HOST_BITS_PER_WIDE_INT)
1302 th = ((unsigned HOST_WIDE_INT) 1
1303 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1304 tl = -1;
1306 else
1308 th = 0;
1309 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1311 real_from_integer (&t, VOIDmode, tl, th, 0);
1312 if (REAL_VALUES_LESS (t, x))
1314 xh = th;
1315 xl = tl;
1316 break;
1319 /* Test against the signed lower bound. */
1320 if (width > HOST_BITS_PER_WIDE_INT)
1322 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1323 tl = 0;
1325 else
1327 th = -1;
1328 tl = (HOST_WIDE_INT) -1 << (width - 1);
1330 real_from_integer (&t, VOIDmode, tl, th, 0);
1331 if (REAL_VALUES_LESS (x, t))
1333 xh = th;
1334 xl = tl;
1335 break;
1337 REAL_VALUE_TO_INT (&xl, &xh, x);
1338 break;
1340 case UNSIGNED_FIX:
1341 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1342 return const0_rtx;
1344 /* Test against the unsigned upper bound. */
1345 if (width == 2*HOST_BITS_PER_WIDE_INT)
1347 th = -1;
1348 tl = -1;
1350 else if (width >= HOST_BITS_PER_WIDE_INT)
1352 th = ((unsigned HOST_WIDE_INT) 1
1353 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1354 tl = -1;
1356 else
1358 th = 0;
1359 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1361 real_from_integer (&t, VOIDmode, tl, th, 1);
1362 if (REAL_VALUES_LESS (t, x))
1364 xh = th;
1365 xl = tl;
1366 break;
1369 REAL_VALUE_TO_INT (&xl, &xh, x);
1370 break;
1372 default:
1373 gcc_unreachable ();
1375 return immed_double_const (xl, xh, mode);
1378 return NULL_RTX;
1381 /* Subroutine of simplify_binary_operation to simplify a commutative,
1382 associative binary operation CODE with result mode MODE, operating
1383 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1384 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1385 canonicalization is possible. */
1387 static rtx
1388 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1389 rtx op0, rtx op1)
1391 rtx tem;
1393 /* Linearize the operator to the left. */
1394 if (GET_CODE (op1) == code)
1396 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1397 if (GET_CODE (op0) == code)
1399 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1400 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1403 /* "a op (b op c)" becomes "(b op c) op a". */
1404 if (! swap_commutative_operands_p (op1, op0))
1405 return simplify_gen_binary (code, mode, op1, op0);
1407 tem = op0;
1408 op0 = op1;
1409 op1 = tem;
1412 if (GET_CODE (op0) == code)
1414 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1415 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1417 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1418 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1421 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1422 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1423 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1424 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1425 if (tem != 0)
1426 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1428 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1429 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1430 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1431 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1432 if (tem != 0)
1433 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1436 return 0;
1440 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1441 and OP1. Return 0 if no simplification is possible.
1443 Don't use this for relational operations such as EQ or LT.
1444 Use simplify_relational_operation instead. */
1446 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1447 rtx op0, rtx op1)
1449 rtx trueop0, trueop1;
1450 rtx tem;
1452 /* Relational operations don't work here. We must know the mode
1453 of the operands in order to do the comparison correctly.
1454 Assuming a full word can give incorrect results.
1455 Consider comparing 128 with -128 in QImode. */
1456 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1457 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1459 /* Make sure the constant is second. */
1460 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1461 && swap_commutative_operands_p (op0, op1))
1463 tem = op0, op0 = op1, op1 = tem;
1466 trueop0 = avoid_constant_pool_reference (op0);
1467 trueop1 = avoid_constant_pool_reference (op1);
1469 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1470 if (tem)
1471 return tem;
1472 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1475 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1476 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1477 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1478 actual constants. */
1480 static rtx
1481 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1482 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1484 rtx tem, reversed, opleft, opright;
1485 HOST_WIDE_INT val;
1486 unsigned int width = GET_MODE_BITSIZE (mode);
1488 /* Even if we can't compute a constant result,
1489 there are some cases worth simplifying. */
1491 switch (code)
1493 case PLUS:
1494 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1495 when x is NaN, infinite, or finite and nonzero. They aren't
1496 when x is -0 and the rounding mode is not towards -infinity,
1497 since (-0) + 0 is then 0. */
1498 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1499 return op0;
1501 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1502 transformations are safe even for IEEE. */
1503 if (GET_CODE (op0) == NEG)
1504 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1505 else if (GET_CODE (op1) == NEG)
1506 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1508 /* (~a) + 1 -> -a */
1509 if (INTEGRAL_MODE_P (mode)
1510 && GET_CODE (op0) == NOT
1511 && trueop1 == const1_rtx)
1512 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1514 /* Handle both-operands-constant cases. We can only add
1515 CONST_INTs to constants since the sum of relocatable symbols
1516 can't be handled by most assemblers. Don't add CONST_INT
1517 to CONST_INT since overflow won't be computed properly if wider
1518 than HOST_BITS_PER_WIDE_INT. */
1520 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1521 && GET_CODE (op1) == CONST_INT)
1522 return plus_constant (op0, INTVAL (op1));
1523 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1524 && GET_CODE (op0) == CONST_INT)
1525 return plus_constant (op1, INTVAL (op0));
1527 /* See if this is something like X * C - X or vice versa or
1528 if the multiplication is written as a shift. If so, we can
1529 distribute and make a new multiply, shift, or maybe just
1530 have X (if C is 2 in the example above). But don't make
1531 something more expensive than we had before. */
1533 if (SCALAR_INT_MODE_P (mode))
1535 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1536 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1537 rtx lhs = op0, rhs = op1;
1539 if (GET_CODE (lhs) == NEG)
1541 coeff0l = -1;
1542 coeff0h = -1;
1543 lhs = XEXP (lhs, 0);
1545 else if (GET_CODE (lhs) == MULT
1546 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1548 coeff0l = INTVAL (XEXP (lhs, 1));
1549 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1550 lhs = XEXP (lhs, 0);
1552 else if (GET_CODE (lhs) == ASHIFT
1553 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1554 && INTVAL (XEXP (lhs, 1)) >= 0
1555 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1557 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1558 coeff0h = 0;
1559 lhs = XEXP (lhs, 0);
1562 if (GET_CODE (rhs) == NEG)
1564 coeff1l = -1;
1565 coeff1h = -1;
1566 rhs = XEXP (rhs, 0);
1568 else if (GET_CODE (rhs) == MULT
1569 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1571 coeff1l = INTVAL (XEXP (rhs, 1));
1572 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1573 rhs = XEXP (rhs, 0);
1575 else if (GET_CODE (rhs) == ASHIFT
1576 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1577 && INTVAL (XEXP (rhs, 1)) >= 0
1578 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1580 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1581 coeff1h = 0;
1582 rhs = XEXP (rhs, 0);
1585 if (rtx_equal_p (lhs, rhs))
1587 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1588 rtx coeff;
1589 unsigned HOST_WIDE_INT l;
1590 HOST_WIDE_INT h;
1592 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1593 coeff = immed_double_const (l, h, mode);
1595 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1596 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1597 ? tem : 0;
1601 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1602 if ((GET_CODE (op1) == CONST_INT
1603 || GET_CODE (op1) == CONST_DOUBLE)
1604 && GET_CODE (op0) == XOR
1605 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1606 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1607 && mode_signbit_p (mode, op1))
1608 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1609 simplify_gen_binary (XOR, mode, op1,
1610 XEXP (op0, 1)));
1612 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1613 if (GET_CODE (op0) == MULT
1614 && GET_CODE (XEXP (op0, 0)) == NEG)
1616 rtx in1, in2;
1618 in1 = XEXP (XEXP (op0, 0), 0);
1619 in2 = XEXP (op0, 1);
1620 return simplify_gen_binary (MINUS, mode, op1,
1621 simplify_gen_binary (MULT, mode,
1622 in1, in2));
1625 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1626 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1627 is 1. */
1628 if (COMPARISON_P (op0)
1629 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1630 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1631 && (reversed = reversed_comparison (op0, mode)))
1632 return
1633 simplify_gen_unary (NEG, mode, reversed, mode);
1635 /* If one of the operands is a PLUS or a MINUS, see if we can
1636 simplify this by the associative law.
1637 Don't use the associative law for floating point.
1638 The inaccuracy makes it nonassociative,
1639 and subtle programs can break if operations are associated. */
1641 if (INTEGRAL_MODE_P (mode)
1642 && (plus_minus_operand_p (op0)
1643 || plus_minus_operand_p (op1))
1644 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1645 return tem;
1647 /* Reassociate floating point addition only when the user
1648 specifies unsafe math optimizations. */
1649 if (FLOAT_MODE_P (mode)
1650 && flag_unsafe_math_optimizations)
1652 tem = simplify_associative_operation (code, mode, op0, op1);
1653 if (tem)
1654 return tem;
1656 break;
1658 case COMPARE:
1659 #ifdef HAVE_cc0
1660 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1661 using cc0, in which case we want to leave it as a COMPARE
1662 so we can distinguish it from a register-register-copy.
1664 In IEEE floating point, x-0 is not the same as x. */
1666 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1667 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1668 && trueop1 == CONST0_RTX (mode))
1669 return op0;
1670 #endif
1672 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1673 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1674 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1675 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1677 rtx xop00 = XEXP (op0, 0);
1678 rtx xop10 = XEXP (op1, 0);
1680 #ifdef HAVE_cc0
1681 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1682 #else
1683 if (REG_P (xop00) && REG_P (xop10)
1684 && GET_MODE (xop00) == GET_MODE (xop10)
1685 && REGNO (xop00) == REGNO (xop10)
1686 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1687 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1688 #endif
1689 return xop00;
1691 break;
1693 case MINUS:
1694 /* We can't assume x-x is 0 even with non-IEEE floating point,
1695 but since it is zero except in very strange circumstances, we
1696 will treat it as zero with -funsafe-math-optimizations. */
1697 if (rtx_equal_p (trueop0, trueop1)
1698 && ! side_effects_p (op0)
1699 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1700 return CONST0_RTX (mode);
1702 /* Change subtraction from zero into negation. (0 - x) is the
1703 same as -x when x is NaN, infinite, or finite and nonzero.
1704 But if the mode has signed zeros, and does not round towards
1705 -infinity, then 0 - 0 is 0, not -0. */
1706 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1707 return simplify_gen_unary (NEG, mode, op1, mode);
1709 /* (-1 - a) is ~a. */
1710 if (trueop0 == constm1_rtx)
1711 return simplify_gen_unary (NOT, mode, op1, mode);
1713 /* Subtracting 0 has no effect unless the mode has signed zeros
1714 and supports rounding towards -infinity. In such a case,
1715 0 - 0 is -0. */
1716 if (!(HONOR_SIGNED_ZEROS (mode)
1717 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1718 && trueop1 == CONST0_RTX (mode))
1719 return op0;
1721 /* See if this is something like X * C - X or vice versa or
1722 if the multiplication is written as a shift. If so, we can
1723 distribute and make a new multiply, shift, or maybe just
1724 have X (if C is 2 in the example above). But don't make
1725 something more expensive than we had before. */
1727 if (SCALAR_INT_MODE_P (mode))
1729 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1730 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1731 rtx lhs = op0, rhs = op1;
1733 if (GET_CODE (lhs) == NEG)
1735 coeff0l = -1;
1736 coeff0h = -1;
1737 lhs = XEXP (lhs, 0);
1739 else if (GET_CODE (lhs) == MULT
1740 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1742 coeff0l = INTVAL (XEXP (lhs, 1));
1743 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1744 lhs = XEXP (lhs, 0);
1746 else if (GET_CODE (lhs) == ASHIFT
1747 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1748 && INTVAL (XEXP (lhs, 1)) >= 0
1749 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1751 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1752 coeff0h = 0;
1753 lhs = XEXP (lhs, 0);
1756 if (GET_CODE (rhs) == NEG)
1758 negcoeff1l = 1;
1759 negcoeff1h = 0;
1760 rhs = XEXP (rhs, 0);
1762 else if (GET_CODE (rhs) == MULT
1763 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1765 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1766 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1767 rhs = XEXP (rhs, 0);
1769 else if (GET_CODE (rhs) == ASHIFT
1770 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1771 && INTVAL (XEXP (rhs, 1)) >= 0
1772 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1774 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1775 negcoeff1h = -1;
1776 rhs = XEXP (rhs, 0);
1779 if (rtx_equal_p (lhs, rhs))
1781 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1782 rtx coeff;
1783 unsigned HOST_WIDE_INT l;
1784 HOST_WIDE_INT h;
1786 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1787 coeff = immed_double_const (l, h, mode);
1789 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1790 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1791 ? tem : 0;
1795 /* (a - (-b)) -> (a + b). True even for IEEE. */
1796 if (GET_CODE (op1) == NEG)
1797 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1799 /* (-x - c) may be simplified as (-c - x). */
1800 if (GET_CODE (op0) == NEG
1801 && (GET_CODE (op1) == CONST_INT
1802 || GET_CODE (op1) == CONST_DOUBLE))
1804 tem = simplify_unary_operation (NEG, mode, op1, mode);
1805 if (tem)
1806 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1809 /* Don't let a relocatable value get a negative coeff. */
1810 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1811 return simplify_gen_binary (PLUS, mode,
1812 op0,
1813 neg_const_int (mode, op1));
1815 /* (x - (x & y)) -> (x & ~y) */
1816 if (GET_CODE (op1) == AND)
1818 if (rtx_equal_p (op0, XEXP (op1, 0)))
1820 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1821 GET_MODE (XEXP (op1, 1)));
1822 return simplify_gen_binary (AND, mode, op0, tem);
1824 if (rtx_equal_p (op0, XEXP (op1, 1)))
1826 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1827 GET_MODE (XEXP (op1, 0)));
1828 return simplify_gen_binary (AND, mode, op0, tem);
1832 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1833 by reversing the comparison code if valid. */
1834 if (STORE_FLAG_VALUE == 1
1835 && trueop0 == const1_rtx
1836 && COMPARISON_P (op1)
1837 && (reversed = reversed_comparison (op1, mode)))
1838 return reversed;
1840 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
1841 if (GET_CODE (op1) == MULT
1842 && GET_CODE (XEXP (op1, 0)) == NEG)
1844 rtx in1, in2;
1846 in1 = XEXP (XEXP (op1, 0), 0);
1847 in2 = XEXP (op1, 1);
1848 return simplify_gen_binary (PLUS, mode,
1849 simplify_gen_binary (MULT, mode,
1850 in1, in2),
1851 op0);
1854 /* Canonicalize (minus (neg A) (mult B C)) to
1855 (minus (mult (neg B) C) A). */
1856 if (GET_CODE (op1) == MULT
1857 && GET_CODE (op0) == NEG)
1859 rtx in1, in2;
1861 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
1862 in2 = XEXP (op1, 1);
1863 return simplify_gen_binary (MINUS, mode,
1864 simplify_gen_binary (MULT, mode,
1865 in1, in2),
1866 XEXP (op0, 0));
1869 /* If one of the operands is a PLUS or a MINUS, see if we can
1870 simplify this by the associative law. This will, for example,
1871 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1872 Don't use the associative law for floating point.
1873 The inaccuracy makes it nonassociative,
1874 and subtle programs can break if operations are associated. */
1876 if (INTEGRAL_MODE_P (mode)
1877 && (plus_minus_operand_p (op0)
1878 || plus_minus_operand_p (op1))
1879 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1880 return tem;
1881 break;
1883 case MULT:
1884 if (trueop1 == constm1_rtx)
1885 return simplify_gen_unary (NEG, mode, op0, mode);
1887 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1888 x is NaN, since x * 0 is then also NaN. Nor is it valid
1889 when the mode has signed zeros, since multiplying a negative
1890 number by 0 will give -0, not 0. */
1891 if (!HONOR_NANS (mode)
1892 && !HONOR_SIGNED_ZEROS (mode)
1893 && trueop1 == CONST0_RTX (mode)
1894 && ! side_effects_p (op0))
1895 return op1;
1897 /* In IEEE floating point, x*1 is not equivalent to x for
1898 signalling NaNs. */
1899 if (!HONOR_SNANS (mode)
1900 && trueop1 == CONST1_RTX (mode))
1901 return op0;
1903 /* Convert multiply by constant power of two into shift unless
1904 we are still generating RTL. This test is a kludge. */
1905 if (GET_CODE (trueop1) == CONST_INT
1906 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1907 /* If the mode is larger than the host word size, and the
1908 uppermost bit is set, then this isn't a power of two due
1909 to implicit sign extension. */
1910 && (width <= HOST_BITS_PER_WIDE_INT
1911 || val != HOST_BITS_PER_WIDE_INT - 1))
1912 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1914 /* Likewise for multipliers wider than a word. */
1915 if (GET_CODE (trueop1) == CONST_DOUBLE
1916 && (GET_MODE (trueop1) == VOIDmode
1917 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
1918 && GET_MODE (op0) == mode
1919 && CONST_DOUBLE_LOW (trueop1) == 0
1920 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
1921 return simplify_gen_binary (ASHIFT, mode, op0,
1922 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
1924 /* x*2 is x+x and x*(-1) is -x */
1925 if (GET_CODE (trueop1) == CONST_DOUBLE
1926 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
1927 && GET_MODE (op0) == mode)
1929 REAL_VALUE_TYPE d;
1930 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1932 if (REAL_VALUES_EQUAL (d, dconst2))
1933 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1935 if (!HONOR_SNANS (mode)
1936 && REAL_VALUES_EQUAL (d, dconstm1))
1937 return simplify_gen_unary (NEG, mode, op0, mode);
1940 /* Optimize -x * -x as x * x. */
1941 if (FLOAT_MODE_P (mode)
1942 && GET_CODE (op0) == NEG
1943 && GET_CODE (op1) == NEG
1944 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
1945 && !side_effects_p (XEXP (op0, 0)))
1946 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
1948 /* Likewise, optimize abs(x) * abs(x) as x * x. */
1949 if (SCALAR_FLOAT_MODE_P (mode)
1950 && GET_CODE (op0) == ABS
1951 && GET_CODE (op1) == ABS
1952 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
1953 && !side_effects_p (XEXP (op0, 0)))
1954 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
1956 /* Reassociate multiplication, but for floating point MULTs
1957 only when the user specifies unsafe math optimizations. */
1958 if (! FLOAT_MODE_P (mode)
1959 || flag_unsafe_math_optimizations)
1961 tem = simplify_associative_operation (code, mode, op0, op1);
1962 if (tem)
1963 return tem;
1965 break;
1967 case IOR:
1968 if (trueop1 == const0_rtx)
1969 return op0;
1970 if (GET_CODE (trueop1) == CONST_INT
1971 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1972 == GET_MODE_MASK (mode)))
1973 return op1;
1974 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1975 return op0;
1976 /* A | (~A) -> -1 */
1977 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1978 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1979 && ! side_effects_p (op0)
1980 && SCALAR_INT_MODE_P (mode))
1981 return constm1_rtx;
1983 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
1984 if (GET_CODE (op1) == CONST_INT
1985 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1986 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
1987 return op1;
1989 /* Convert (A & B) | A to A. */
1990 if (GET_CODE (op0) == AND
1991 && (rtx_equal_p (XEXP (op0, 0), op1)
1992 || rtx_equal_p (XEXP (op0, 1), op1))
1993 && ! side_effects_p (XEXP (op0, 0))
1994 && ! side_effects_p (XEXP (op0, 1)))
1995 return op1;
1997 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
1998 mode size to (rotate A CX). */
2000 if (GET_CODE (op1) == ASHIFT
2001 || GET_CODE (op1) == SUBREG)
2003 opleft = op1;
2004 opright = op0;
2006 else
2008 opright = op1;
2009 opleft = op0;
2012 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2013 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2014 && GET_CODE (XEXP (opleft, 1)) == CONST_INT
2015 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2016 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2017 == GET_MODE_BITSIZE (mode)))
2018 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2020 /* Same, but for ashift that has been "simplified" to a wider mode
2021 by simplify_shift_const. */
2023 if (GET_CODE (opleft) == SUBREG
2024 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2025 && GET_CODE (opright) == LSHIFTRT
2026 && GET_CODE (XEXP (opright, 0)) == SUBREG
2027 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2028 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2029 && (GET_MODE_SIZE (GET_MODE (opleft))
2030 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2031 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2032 SUBREG_REG (XEXP (opright, 0)))
2033 && GET_CODE (XEXP (SUBREG_REG (opleft), 1)) == CONST_INT
2034 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2035 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2036 == GET_MODE_BITSIZE (mode)))
2037 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2038 XEXP (SUBREG_REG (opleft), 1));
2040 /* If we have (ior (and (X C1) C2)), simplify this by making
2041 C1 as small as possible if C1 actually changes. */
2042 if (GET_CODE (op1) == CONST_INT
2043 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2044 || INTVAL (op1) > 0)
2045 && GET_CODE (op0) == AND
2046 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2047 && GET_CODE (op1) == CONST_INT
2048 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2049 return simplify_gen_binary (IOR, mode,
2050 simplify_gen_binary
2051 (AND, mode, XEXP (op0, 0),
2052 GEN_INT (INTVAL (XEXP (op0, 1))
2053 & ~INTVAL (op1))),
2054 op1);
2056 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2057 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2058 the PLUS does not affect any of the bits in OP1: then we can do
2059 the IOR as a PLUS and we can associate. This is valid if OP1
2060 can be safely shifted left C bits. */
2061 if (GET_CODE (trueop1) == CONST_INT && GET_CODE (op0) == ASHIFTRT
2062 && GET_CODE (XEXP (op0, 0)) == PLUS
2063 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
2064 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2065 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2067 int count = INTVAL (XEXP (op0, 1));
2068 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2070 if (mask >> count == INTVAL (trueop1)
2071 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2072 return simplify_gen_binary (ASHIFTRT, mode,
2073 plus_constant (XEXP (op0, 0), mask),
2074 XEXP (op0, 1));
2077 tem = simplify_associative_operation (code, mode, op0, op1);
2078 if (tem)
2079 return tem;
2080 break;
2082 case XOR:
2083 if (trueop1 == const0_rtx)
2084 return op0;
2085 if (GET_CODE (trueop1) == CONST_INT
2086 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2087 == GET_MODE_MASK (mode)))
2088 return simplify_gen_unary (NOT, mode, op0, mode);
2089 if (rtx_equal_p (trueop0, trueop1)
2090 && ! side_effects_p (op0)
2091 && GET_MODE_CLASS (mode) != MODE_CC)
2092 return CONST0_RTX (mode);
2094 /* Canonicalize XOR of the most significant bit to PLUS. */
2095 if ((GET_CODE (op1) == CONST_INT
2096 || GET_CODE (op1) == CONST_DOUBLE)
2097 && mode_signbit_p (mode, op1))
2098 return simplify_gen_binary (PLUS, mode, op0, op1);
2099 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2100 if ((GET_CODE (op1) == CONST_INT
2101 || GET_CODE (op1) == CONST_DOUBLE)
2102 && GET_CODE (op0) == PLUS
2103 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
2104 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2105 && mode_signbit_p (mode, XEXP (op0, 1)))
2106 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2107 simplify_gen_binary (XOR, mode, op1,
2108 XEXP (op0, 1)));
2110 /* If we are XORing two things that have no bits in common,
2111 convert them into an IOR. This helps to detect rotation encoded
2112 using those methods and possibly other simplifications. */
2114 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2115 && (nonzero_bits (op0, mode)
2116 & nonzero_bits (op1, mode)) == 0)
2117 return (simplify_gen_binary (IOR, mode, op0, op1));
2119 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2120 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2121 (NOT y). */
2123 int num_negated = 0;
2125 if (GET_CODE (op0) == NOT)
2126 num_negated++, op0 = XEXP (op0, 0);
2127 if (GET_CODE (op1) == NOT)
2128 num_negated++, op1 = XEXP (op1, 0);
2130 if (num_negated == 2)
2131 return simplify_gen_binary (XOR, mode, op0, op1);
2132 else if (num_negated == 1)
2133 return simplify_gen_unary (NOT, mode,
2134 simplify_gen_binary (XOR, mode, op0, op1),
2135 mode);
2138 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2139 correspond to a machine insn or result in further simplifications
2140 if B is a constant. */
2142 if (GET_CODE (op0) == AND
2143 && rtx_equal_p (XEXP (op0, 1), op1)
2144 && ! side_effects_p (op1))
2145 return simplify_gen_binary (AND, mode,
2146 simplify_gen_unary (NOT, mode,
2147 XEXP (op0, 0), mode),
2148 op1);
2150 else if (GET_CODE (op0) == AND
2151 && rtx_equal_p (XEXP (op0, 0), op1)
2152 && ! side_effects_p (op1))
2153 return simplify_gen_binary (AND, mode,
2154 simplify_gen_unary (NOT, mode,
2155 XEXP (op0, 1), mode),
2156 op1);
2158 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2159 comparison if STORE_FLAG_VALUE is 1. */
2160 if (STORE_FLAG_VALUE == 1
2161 && trueop1 == const1_rtx
2162 && COMPARISON_P (op0)
2163 && (reversed = reversed_comparison (op0, mode)))
2164 return reversed;
2166 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2167 is (lt foo (const_int 0)), so we can perform the above
2168 simplification if STORE_FLAG_VALUE is 1. */
2170 if (STORE_FLAG_VALUE == 1
2171 && trueop1 == const1_rtx
2172 && GET_CODE (op0) == LSHIFTRT
2173 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2174 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2175 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2177 /* (xor (comparison foo bar) (const_int sign-bit))
2178 when STORE_FLAG_VALUE is the sign bit. */
2179 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2180 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2181 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2182 && trueop1 == const_true_rtx
2183 && COMPARISON_P (op0)
2184 && (reversed = reversed_comparison (op0, mode)))
2185 return reversed;
2187 break;
2189 tem = simplify_associative_operation (code, mode, op0, op1);
2190 if (tem)
2191 return tem;
2192 break;
2194 case AND:
2195 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2196 return trueop1;
2197 /* If we are turning off bits already known off in OP0, we need
2198 not do an AND. */
2199 if (GET_CODE (trueop1) == CONST_INT
2200 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2201 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
2202 return op0;
2203 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2204 && GET_MODE_CLASS (mode) != MODE_CC)
2205 return op0;
2206 /* A & (~A) -> 0 */
2207 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2208 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2209 && ! side_effects_p (op0)
2210 && GET_MODE_CLASS (mode) != MODE_CC)
2211 return CONST0_RTX (mode);
2213 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2214 there are no nonzero bits of C outside of X's mode. */
2215 if ((GET_CODE (op0) == SIGN_EXTEND
2216 || GET_CODE (op0) == ZERO_EXTEND)
2217 && GET_CODE (trueop1) == CONST_INT
2218 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2219 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2220 & INTVAL (trueop1)) == 0)
2222 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2223 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2224 gen_int_mode (INTVAL (trueop1),
2225 imode));
2226 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2229 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2230 insn (and may simplify more). */
2231 if (GET_CODE (op0) == XOR
2232 && rtx_equal_p (XEXP (op0, 0), op1)
2233 && ! side_effects_p (op1))
2234 return simplify_gen_binary (AND, mode,
2235 simplify_gen_unary (NOT, mode,
2236 XEXP (op0, 1), mode),
2237 op1);
2239 if (GET_CODE (op0) == XOR
2240 && rtx_equal_p (XEXP (op0, 1), op1)
2241 && ! side_effects_p (op1))
2242 return simplify_gen_binary (AND, mode,
2243 simplify_gen_unary (NOT, mode,
2244 XEXP (op0, 0), mode),
2245 op1);
2247 /* Similarly for (~(A ^ B)) & A. */
2248 if (GET_CODE (op0) == NOT
2249 && GET_CODE (XEXP (op0, 0)) == XOR
2250 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2251 && ! side_effects_p (op1))
2252 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2254 if (GET_CODE (op0) == NOT
2255 && GET_CODE (XEXP (op0, 0)) == XOR
2256 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2257 && ! side_effects_p (op1))
2258 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2260 /* Convert (A | B) & A to A. */
2261 if (GET_CODE (op0) == IOR
2262 && (rtx_equal_p (XEXP (op0, 0), op1)
2263 || rtx_equal_p (XEXP (op0, 1), op1))
2264 && ! side_effects_p (XEXP (op0, 0))
2265 && ! side_effects_p (XEXP (op0, 1)))
2266 return op1;
2268 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2269 ((A & N) + B) & M -> (A + B) & M
2270 Similarly if (N & M) == 0,
2271 ((A | N) + B) & M -> (A + B) & M
2272 and for - instead of + and/or ^ instead of |. */
2273 if (GET_CODE (trueop1) == CONST_INT
2274 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2275 && ~INTVAL (trueop1)
2276 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2277 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2279 rtx pmop[2];
2280 int which;
2282 pmop[0] = XEXP (op0, 0);
2283 pmop[1] = XEXP (op0, 1);
2285 for (which = 0; which < 2; which++)
2287 tem = pmop[which];
2288 switch (GET_CODE (tem))
2290 case AND:
2291 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2292 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2293 == INTVAL (trueop1))
2294 pmop[which] = XEXP (tem, 0);
2295 break;
2296 case IOR:
2297 case XOR:
2298 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2299 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2300 pmop[which] = XEXP (tem, 0);
2301 break;
2302 default:
2303 break;
2307 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2309 tem = simplify_gen_binary (GET_CODE (op0), mode,
2310 pmop[0], pmop[1]);
2311 return simplify_gen_binary (code, mode, tem, op1);
2314 tem = simplify_associative_operation (code, mode, op0, op1);
2315 if (tem)
2316 return tem;
2317 break;
2319 case UDIV:
2320 /* 0/x is 0 (or x&0 if x has side-effects). */
2321 if (trueop0 == CONST0_RTX (mode))
2323 if (side_effects_p (op1))
2324 return simplify_gen_binary (AND, mode, op1, trueop0);
2325 return trueop0;
2327 /* x/1 is x. */
2328 if (trueop1 == CONST1_RTX (mode))
2329 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2330 /* Convert divide by power of two into shift. */
2331 if (GET_CODE (trueop1) == CONST_INT
2332 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2333 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2334 break;
2336 case DIV:
2337 /* Handle floating point and integers separately. */
2338 if (SCALAR_FLOAT_MODE_P (mode))
2340 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2341 safe for modes with NaNs, since 0.0 / 0.0 will then be
2342 NaN rather than 0.0. Nor is it safe for modes with signed
2343 zeros, since dividing 0 by a negative number gives -0.0 */
2344 if (trueop0 == CONST0_RTX (mode)
2345 && !HONOR_NANS (mode)
2346 && !HONOR_SIGNED_ZEROS (mode)
2347 && ! side_effects_p (op1))
2348 return op0;
2349 /* x/1.0 is x. */
2350 if (trueop1 == CONST1_RTX (mode)
2351 && !HONOR_SNANS (mode))
2352 return op0;
2354 if (GET_CODE (trueop1) == CONST_DOUBLE
2355 && trueop1 != CONST0_RTX (mode))
2357 REAL_VALUE_TYPE d;
2358 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2360 /* x/-1.0 is -x. */
2361 if (REAL_VALUES_EQUAL (d, dconstm1)
2362 && !HONOR_SNANS (mode))
2363 return simplify_gen_unary (NEG, mode, op0, mode);
2365 /* Change FP division by a constant into multiplication.
2366 Only do this with -funsafe-math-optimizations. */
2367 if (flag_unsafe_math_optimizations
2368 && !REAL_VALUES_EQUAL (d, dconst0))
2370 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2371 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2372 return simplify_gen_binary (MULT, mode, op0, tem);
2376 else
2378 /* 0/x is 0 (or x&0 if x has side-effects). */
2379 if (trueop0 == CONST0_RTX (mode))
2381 if (side_effects_p (op1))
2382 return simplify_gen_binary (AND, mode, op1, trueop0);
2383 return trueop0;
2385 /* x/1 is x. */
2386 if (trueop1 == CONST1_RTX (mode))
2387 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2388 /* x/-1 is -x. */
2389 if (trueop1 == constm1_rtx)
2391 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2392 return simplify_gen_unary (NEG, mode, x, mode);
2395 break;
2397 case UMOD:
2398 /* 0%x is 0 (or x&0 if x has side-effects). */
2399 if (trueop0 == CONST0_RTX (mode))
2401 if (side_effects_p (op1))
2402 return simplify_gen_binary (AND, mode, op1, trueop0);
2403 return trueop0;
2405 /* x%1 is 0 (of x&0 if x has side-effects). */
2406 if (trueop1 == CONST1_RTX (mode))
2408 if (side_effects_p (op0))
2409 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2410 return CONST0_RTX (mode);
2412 /* Implement modulus by power of two as AND. */
2413 if (GET_CODE (trueop1) == CONST_INT
2414 && exact_log2 (INTVAL (trueop1)) > 0)
2415 return simplify_gen_binary (AND, mode, op0,
2416 GEN_INT (INTVAL (op1) - 1));
2417 break;
2419 case MOD:
2420 /* 0%x is 0 (or x&0 if x has side-effects). */
2421 if (trueop0 == CONST0_RTX (mode))
2423 if (side_effects_p (op1))
2424 return simplify_gen_binary (AND, mode, op1, trueop0);
2425 return trueop0;
2427 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2428 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2430 if (side_effects_p (op0))
2431 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2432 return CONST0_RTX (mode);
2434 break;
2436 case ROTATERT:
2437 case ROTATE:
2438 case ASHIFTRT:
2439 if (trueop1 == CONST0_RTX (mode))
2440 return op0;
2441 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2442 return op0;
2443 /* Rotating ~0 always results in ~0. */
2444 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2445 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2446 && ! side_effects_p (op1))
2447 return op0;
2448 break;
2450 case ASHIFT:
2451 case SS_ASHIFT:
2452 if (trueop1 == CONST0_RTX (mode))
2453 return op0;
2454 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2455 return op0;
2456 break;
2458 case LSHIFTRT:
2459 if (trueop1 == CONST0_RTX (mode))
2460 return op0;
2461 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2462 return op0;
2463 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2464 if (GET_CODE (op0) == CLZ
2465 && GET_CODE (trueop1) == CONST_INT
2466 && STORE_FLAG_VALUE == 1
2467 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2469 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2470 unsigned HOST_WIDE_INT zero_val = 0;
2472 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2473 && zero_val == GET_MODE_BITSIZE (imode)
2474 && INTVAL (trueop1) == exact_log2 (zero_val))
2475 return simplify_gen_relational (EQ, mode, imode,
2476 XEXP (op0, 0), const0_rtx);
2478 break;
2480 case SMIN:
2481 if (width <= HOST_BITS_PER_WIDE_INT
2482 && GET_CODE (trueop1) == CONST_INT
2483 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2484 && ! side_effects_p (op0))
2485 return op1;
2486 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2487 return op0;
2488 tem = simplify_associative_operation (code, mode, op0, op1);
2489 if (tem)
2490 return tem;
2491 break;
2493 case SMAX:
2494 if (width <= HOST_BITS_PER_WIDE_INT
2495 && GET_CODE (trueop1) == CONST_INT
2496 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2497 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2498 && ! side_effects_p (op0))
2499 return op1;
2500 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2501 return op0;
2502 tem = simplify_associative_operation (code, mode, op0, op1);
2503 if (tem)
2504 return tem;
2505 break;
2507 case UMIN:
2508 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2509 return op1;
2510 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2511 return op0;
2512 tem = simplify_associative_operation (code, mode, op0, op1);
2513 if (tem)
2514 return tem;
2515 break;
2517 case UMAX:
2518 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2519 return op1;
2520 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2521 return op0;
2522 tem = simplify_associative_operation (code, mode, op0, op1);
2523 if (tem)
2524 return tem;
2525 break;
2527 case SS_PLUS:
2528 case US_PLUS:
2529 case SS_MINUS:
2530 case US_MINUS:
2531 /* ??? There are simplifications that can be done. */
2532 return 0;
2534 case VEC_SELECT:
2535 if (!VECTOR_MODE_P (mode))
2537 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2538 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2539 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2540 gcc_assert (XVECLEN (trueop1, 0) == 1);
2541 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2543 if (GET_CODE (trueop0) == CONST_VECTOR)
2544 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2545 (trueop1, 0, 0)));
2547 else
2549 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2550 gcc_assert (GET_MODE_INNER (mode)
2551 == GET_MODE_INNER (GET_MODE (trueop0)));
2552 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2554 if (GET_CODE (trueop0) == CONST_VECTOR)
2556 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2557 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2558 rtvec v = rtvec_alloc (n_elts);
2559 unsigned int i;
2561 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2562 for (i = 0; i < n_elts; i++)
2564 rtx x = XVECEXP (trueop1, 0, i);
2566 gcc_assert (GET_CODE (x) == CONST_INT);
2567 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2568 INTVAL (x));
2571 return gen_rtx_CONST_VECTOR (mode, v);
2575 if (XVECLEN (trueop1, 0) == 1
2576 && GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT
2577 && GET_CODE (trueop0) == VEC_CONCAT)
2579 rtx vec = trueop0;
2580 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2582 /* Try to find the element in the VEC_CONCAT. */
2583 while (GET_MODE (vec) != mode
2584 && GET_CODE (vec) == VEC_CONCAT)
2586 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2587 if (offset < vec_size)
2588 vec = XEXP (vec, 0);
2589 else
2591 offset -= vec_size;
2592 vec = XEXP (vec, 1);
2594 vec = avoid_constant_pool_reference (vec);
2597 if (GET_MODE (vec) == mode)
2598 return vec;
2601 return 0;
2602 case VEC_CONCAT:
2604 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2605 ? GET_MODE (trueop0)
2606 : GET_MODE_INNER (mode));
2607 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2608 ? GET_MODE (trueop1)
2609 : GET_MODE_INNER (mode));
2611 gcc_assert (VECTOR_MODE_P (mode));
2612 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2613 == GET_MODE_SIZE (mode));
2615 if (VECTOR_MODE_P (op0_mode))
2616 gcc_assert (GET_MODE_INNER (mode)
2617 == GET_MODE_INNER (op0_mode));
2618 else
2619 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2621 if (VECTOR_MODE_P (op1_mode))
2622 gcc_assert (GET_MODE_INNER (mode)
2623 == GET_MODE_INNER (op1_mode));
2624 else
2625 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2627 if ((GET_CODE (trueop0) == CONST_VECTOR
2628 || GET_CODE (trueop0) == CONST_INT
2629 || GET_CODE (trueop0) == CONST_DOUBLE)
2630 && (GET_CODE (trueop1) == CONST_VECTOR
2631 || GET_CODE (trueop1) == CONST_INT
2632 || GET_CODE (trueop1) == CONST_DOUBLE))
2634 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2635 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2636 rtvec v = rtvec_alloc (n_elts);
2637 unsigned int i;
2638 unsigned in_n_elts = 1;
2640 if (VECTOR_MODE_P (op0_mode))
2641 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2642 for (i = 0; i < n_elts; i++)
2644 if (i < in_n_elts)
2646 if (!VECTOR_MODE_P (op0_mode))
2647 RTVEC_ELT (v, i) = trueop0;
2648 else
2649 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2651 else
2653 if (!VECTOR_MODE_P (op1_mode))
2654 RTVEC_ELT (v, i) = trueop1;
2655 else
2656 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2657 i - in_n_elts);
2661 return gen_rtx_CONST_VECTOR (mode, v);
2664 return 0;
2666 default:
2667 gcc_unreachable ();
2670 return 0;
2674 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
2675 rtx op0, rtx op1)
2677 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
2678 HOST_WIDE_INT val;
2679 unsigned int width = GET_MODE_BITSIZE (mode);
2681 if (VECTOR_MODE_P (mode)
2682 && code != VEC_CONCAT
2683 && GET_CODE (op0) == CONST_VECTOR
2684 && GET_CODE (op1) == CONST_VECTOR)
2686 unsigned n_elts = GET_MODE_NUNITS (mode);
2687 enum machine_mode op0mode = GET_MODE (op0);
2688 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2689 enum machine_mode op1mode = GET_MODE (op1);
2690 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2691 rtvec v = rtvec_alloc (n_elts);
2692 unsigned int i;
2694 gcc_assert (op0_n_elts == n_elts);
2695 gcc_assert (op1_n_elts == n_elts);
2696 for (i = 0; i < n_elts; i++)
2698 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2699 CONST_VECTOR_ELT (op0, i),
2700 CONST_VECTOR_ELT (op1, i));
2701 if (!x)
2702 return 0;
2703 RTVEC_ELT (v, i) = x;
2706 return gen_rtx_CONST_VECTOR (mode, v);
2709 if (VECTOR_MODE_P (mode)
2710 && code == VEC_CONCAT
2711 && CONSTANT_P (op0) && CONSTANT_P (op1))
2713 unsigned n_elts = GET_MODE_NUNITS (mode);
2714 rtvec v = rtvec_alloc (n_elts);
2716 gcc_assert (n_elts >= 2);
2717 if (n_elts == 2)
2719 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2720 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2722 RTVEC_ELT (v, 0) = op0;
2723 RTVEC_ELT (v, 1) = op1;
2725 else
2727 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2728 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2729 unsigned i;
2731 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2732 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2733 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2735 for (i = 0; i < op0_n_elts; ++i)
2736 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2737 for (i = 0; i < op1_n_elts; ++i)
2738 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2741 return gen_rtx_CONST_VECTOR (mode, v);
2744 if (SCALAR_FLOAT_MODE_P (mode)
2745 && GET_CODE (op0) == CONST_DOUBLE
2746 && GET_CODE (op1) == CONST_DOUBLE
2747 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2749 if (code == AND
2750 || code == IOR
2751 || code == XOR)
2753 long tmp0[4];
2754 long tmp1[4];
2755 REAL_VALUE_TYPE r;
2756 int i;
2758 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
2759 GET_MODE (op0));
2760 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
2761 GET_MODE (op1));
2762 for (i = 0; i < 4; i++)
2764 switch (code)
2766 case AND:
2767 tmp0[i] &= tmp1[i];
2768 break;
2769 case IOR:
2770 tmp0[i] |= tmp1[i];
2771 break;
2772 case XOR:
2773 tmp0[i] ^= tmp1[i];
2774 break;
2775 default:
2776 gcc_unreachable ();
2779 real_from_target (&r, tmp0, mode);
2780 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
2782 else
2784 REAL_VALUE_TYPE f0, f1, value, result;
2785 bool inexact;
2787 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
2788 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
2789 real_convert (&f0, mode, &f0);
2790 real_convert (&f1, mode, &f1);
2792 if (HONOR_SNANS (mode)
2793 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
2794 return 0;
2796 if (code == DIV
2797 && REAL_VALUES_EQUAL (f1, dconst0)
2798 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
2799 return 0;
2801 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2802 && flag_trapping_math
2803 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
2805 int s0 = REAL_VALUE_NEGATIVE (f0);
2806 int s1 = REAL_VALUE_NEGATIVE (f1);
2808 switch (code)
2810 case PLUS:
2811 /* Inf + -Inf = NaN plus exception. */
2812 if (s0 != s1)
2813 return 0;
2814 break;
2815 case MINUS:
2816 /* Inf - Inf = NaN plus exception. */
2817 if (s0 == s1)
2818 return 0;
2819 break;
2820 case DIV:
2821 /* Inf / Inf = NaN plus exception. */
2822 return 0;
2823 default:
2824 break;
2828 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2829 && flag_trapping_math
2830 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
2831 || (REAL_VALUE_ISINF (f1)
2832 && REAL_VALUES_EQUAL (f0, dconst0))))
2833 /* Inf * 0 = NaN plus exception. */
2834 return 0;
2836 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
2837 &f0, &f1);
2838 real_convert (&result, mode, &value);
2840 /* Don't constant fold this floating point operation if
2841 the result has overflowed and flag_trapping_math. */
2843 if (flag_trapping_math
2844 && MODE_HAS_INFINITIES (mode)
2845 && REAL_VALUE_ISINF (result)
2846 && !REAL_VALUE_ISINF (f0)
2847 && !REAL_VALUE_ISINF (f1))
2848 /* Overflow plus exception. */
2849 return 0;
2851 /* Don't constant fold this floating point operation if the
2852 result may dependent upon the run-time rounding mode and
2853 flag_rounding_math is set, or if GCC's software emulation
2854 is unable to accurately represent the result. */
2856 if ((flag_rounding_math
2857 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
2858 && !flag_unsafe_math_optimizations))
2859 && (inexact || !real_identical (&result, &value)))
2860 return NULL_RTX;
2862 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
2866 /* We can fold some multi-word operations. */
2867 if (GET_MODE_CLASS (mode) == MODE_INT
2868 && width == HOST_BITS_PER_WIDE_INT * 2
2869 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
2870 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
2872 unsigned HOST_WIDE_INT l1, l2, lv, lt;
2873 HOST_WIDE_INT h1, h2, hv, ht;
2875 if (GET_CODE (op0) == CONST_DOUBLE)
2876 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
2877 else
2878 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
2880 if (GET_CODE (op1) == CONST_DOUBLE)
2881 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
2882 else
2883 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
2885 switch (code)
2887 case MINUS:
2888 /* A - B == A + (-B). */
2889 neg_double (l2, h2, &lv, &hv);
2890 l2 = lv, h2 = hv;
2892 /* Fall through.... */
2894 case PLUS:
2895 add_double (l1, h1, l2, h2, &lv, &hv);
2896 break;
2898 case MULT:
2899 mul_double (l1, h1, l2, h2, &lv, &hv);
2900 break;
2902 case DIV:
2903 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2904 &lv, &hv, &lt, &ht))
2905 return 0;
2906 break;
2908 case MOD:
2909 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2910 &lt, &ht, &lv, &hv))
2911 return 0;
2912 break;
2914 case UDIV:
2915 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2916 &lv, &hv, &lt, &ht))
2917 return 0;
2918 break;
2920 case UMOD:
2921 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2922 &lt, &ht, &lv, &hv))
2923 return 0;
2924 break;
2926 case AND:
2927 lv = l1 & l2, hv = h1 & h2;
2928 break;
2930 case IOR:
2931 lv = l1 | l2, hv = h1 | h2;
2932 break;
2934 case XOR:
2935 lv = l1 ^ l2, hv = h1 ^ h2;
2936 break;
2938 case SMIN:
2939 if (h1 < h2
2940 || (h1 == h2
2941 && ((unsigned HOST_WIDE_INT) l1
2942 < (unsigned HOST_WIDE_INT) l2)))
2943 lv = l1, hv = h1;
2944 else
2945 lv = l2, hv = h2;
2946 break;
2948 case SMAX:
2949 if (h1 > h2
2950 || (h1 == h2
2951 && ((unsigned HOST_WIDE_INT) l1
2952 > (unsigned HOST_WIDE_INT) l2)))
2953 lv = l1, hv = h1;
2954 else
2955 lv = l2, hv = h2;
2956 break;
2958 case UMIN:
2959 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
2960 || (h1 == h2
2961 && ((unsigned HOST_WIDE_INT) l1
2962 < (unsigned HOST_WIDE_INT) l2)))
2963 lv = l1, hv = h1;
2964 else
2965 lv = l2, hv = h2;
2966 break;
2968 case UMAX:
2969 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
2970 || (h1 == h2
2971 && ((unsigned HOST_WIDE_INT) l1
2972 > (unsigned HOST_WIDE_INT) l2)))
2973 lv = l1, hv = h1;
2974 else
2975 lv = l2, hv = h2;
2976 break;
2978 case LSHIFTRT: case ASHIFTRT:
2979 case ASHIFT:
2980 case ROTATE: case ROTATERT:
2981 if (SHIFT_COUNT_TRUNCATED)
2982 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
2984 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
2985 return 0;
2987 if (code == LSHIFTRT || code == ASHIFTRT)
2988 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
2989 code == ASHIFTRT);
2990 else if (code == ASHIFT)
2991 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
2992 else if (code == ROTATE)
2993 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2994 else /* code == ROTATERT */
2995 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2996 break;
2998 default:
2999 return 0;
3002 return immed_double_const (lv, hv, mode);
3005 if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
3006 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3008 /* Get the integer argument values in two forms:
3009 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3011 arg0 = INTVAL (op0);
3012 arg1 = INTVAL (op1);
3014 if (width < HOST_BITS_PER_WIDE_INT)
3016 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
3017 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
3019 arg0s = arg0;
3020 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3021 arg0s |= ((HOST_WIDE_INT) (-1) << width);
3023 arg1s = arg1;
3024 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3025 arg1s |= ((HOST_WIDE_INT) (-1) << width);
3027 else
3029 arg0s = arg0;
3030 arg1s = arg1;
3033 /* Compute the value of the arithmetic. */
3035 switch (code)
3037 case PLUS:
3038 val = arg0s + arg1s;
3039 break;
3041 case MINUS:
3042 val = arg0s - arg1s;
3043 break;
3045 case MULT:
3046 val = arg0s * arg1s;
3047 break;
3049 case DIV:
3050 if (arg1s == 0
3051 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3052 && arg1s == -1))
3053 return 0;
3054 val = arg0s / arg1s;
3055 break;
3057 case MOD:
3058 if (arg1s == 0
3059 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3060 && arg1s == -1))
3061 return 0;
3062 val = arg0s % arg1s;
3063 break;
3065 case UDIV:
3066 if (arg1 == 0
3067 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3068 && arg1s == -1))
3069 return 0;
3070 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3071 break;
3073 case UMOD:
3074 if (arg1 == 0
3075 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3076 && arg1s == -1))
3077 return 0;
3078 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3079 break;
3081 case AND:
3082 val = arg0 & arg1;
3083 break;
3085 case IOR:
3086 val = arg0 | arg1;
3087 break;
3089 case XOR:
3090 val = arg0 ^ arg1;
3091 break;
3093 case LSHIFTRT:
3094 case ASHIFT:
3095 case ASHIFTRT:
3096 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3097 the value is in range. We can't return any old value for
3098 out-of-range arguments because either the middle-end (via
3099 shift_truncation_mask) or the back-end might be relying on
3100 target-specific knowledge. Nor can we rely on
3101 shift_truncation_mask, since the shift might not be part of an
3102 ashlM3, lshrM3 or ashrM3 instruction. */
3103 if (SHIFT_COUNT_TRUNCATED)
3104 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3105 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3106 return 0;
3108 val = (code == ASHIFT
3109 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3110 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3112 /* Sign-extend the result for arithmetic right shifts. */
3113 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3114 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3115 break;
3117 case ROTATERT:
3118 if (arg1 < 0)
3119 return 0;
3121 arg1 %= width;
3122 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3123 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3124 break;
3126 case ROTATE:
3127 if (arg1 < 0)
3128 return 0;
3130 arg1 %= width;
3131 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3132 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3133 break;
3135 case COMPARE:
3136 /* Do nothing here. */
3137 return 0;
3139 case SMIN:
3140 val = arg0s <= arg1s ? arg0s : arg1s;
3141 break;
3143 case UMIN:
3144 val = ((unsigned HOST_WIDE_INT) arg0
3145 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3146 break;
3148 case SMAX:
3149 val = arg0s > arg1s ? arg0s : arg1s;
3150 break;
3152 case UMAX:
3153 val = ((unsigned HOST_WIDE_INT) arg0
3154 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3155 break;
3157 case SS_PLUS:
3158 case US_PLUS:
3159 case SS_MINUS:
3160 case US_MINUS:
3161 case SS_ASHIFT:
3162 /* ??? There are simplifications that can be done. */
3163 return 0;
3165 default:
3166 gcc_unreachable ();
3169 return gen_int_mode (val, mode);
3172 return NULL_RTX;
3177 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3178 PLUS or MINUS.
3180 Rather than test for specific case, we do this by a brute-force method
3181 and do all possible simplifications until no more changes occur. Then
3182 we rebuild the operation. */
3184 struct simplify_plus_minus_op_data
3186 rtx op;
3187 short neg;
3188 short ix;
3191 static int
3192 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
3194 const struct simplify_plus_minus_op_data *d1 = p1;
3195 const struct simplify_plus_minus_op_data *d2 = p2;
3196 int result;
3198 result = (commutative_operand_precedence (d2->op)
3199 - commutative_operand_precedence (d1->op));
3200 if (result)
3201 return result;
3202 return d1->ix - d2->ix;
3205 static rtx
3206 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3207 rtx op1)
3209 struct simplify_plus_minus_op_data ops[8];
3210 rtx result, tem;
3211 int n_ops = 2, input_ops = 2;
3212 int first, changed, canonicalized = 0;
3213 int i, j;
3215 memset (ops, 0, sizeof ops);
3217 /* Set up the two operands and then expand them until nothing has been
3218 changed. If we run out of room in our array, give up; this should
3219 almost never happen. */
3221 ops[0].op = op0;
3222 ops[0].neg = 0;
3223 ops[1].op = op1;
3224 ops[1].neg = (code == MINUS);
3228 changed = 0;
3230 for (i = 0; i < n_ops; i++)
3232 rtx this_op = ops[i].op;
3233 int this_neg = ops[i].neg;
3234 enum rtx_code this_code = GET_CODE (this_op);
3236 switch (this_code)
3238 case PLUS:
3239 case MINUS:
3240 if (n_ops == 7)
3241 return NULL_RTX;
3243 ops[n_ops].op = XEXP (this_op, 1);
3244 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3245 n_ops++;
3247 ops[i].op = XEXP (this_op, 0);
3248 input_ops++;
3249 changed = 1;
3250 canonicalized |= this_neg;
3251 break;
3253 case NEG:
3254 ops[i].op = XEXP (this_op, 0);
3255 ops[i].neg = ! this_neg;
3256 changed = 1;
3257 canonicalized = 1;
3258 break;
3260 case CONST:
3261 if (n_ops < 7
3262 && GET_CODE (XEXP (this_op, 0)) == PLUS
3263 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3264 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3266 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3267 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3268 ops[n_ops].neg = this_neg;
3269 n_ops++;
3270 changed = 1;
3271 canonicalized = 1;
3273 break;
3275 case NOT:
3276 /* ~a -> (-a - 1) */
3277 if (n_ops != 7)
3279 ops[n_ops].op = constm1_rtx;
3280 ops[n_ops++].neg = this_neg;
3281 ops[i].op = XEXP (this_op, 0);
3282 ops[i].neg = !this_neg;
3283 changed = 1;
3284 canonicalized = 1;
3286 break;
3288 case CONST_INT:
3289 if (this_neg)
3291 ops[i].op = neg_const_int (mode, this_op);
3292 ops[i].neg = 0;
3293 changed = 1;
3294 canonicalized = 1;
3296 break;
3298 default:
3299 break;
3303 while (changed);
3305 gcc_assert (n_ops >= 2);
3306 if (!canonicalized)
3308 int n_constants = 0;
3310 for (i = 0; i < n_ops; i++)
3311 if (GET_CODE (ops[i].op) == CONST_INT)
3312 n_constants++;
3314 if (n_constants <= 1)
3315 return NULL_RTX;
3318 /* If we only have two operands, we can avoid the loops. */
3319 if (n_ops == 2)
3321 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3322 rtx lhs, rhs;
3324 /* Get the two operands. Be careful with the order, especially for
3325 the cases where code == MINUS. */
3326 if (ops[0].neg && ops[1].neg)
3328 lhs = gen_rtx_NEG (mode, ops[0].op);
3329 rhs = ops[1].op;
3331 else if (ops[0].neg)
3333 lhs = ops[1].op;
3334 rhs = ops[0].op;
3336 else
3338 lhs = ops[0].op;
3339 rhs = ops[1].op;
3342 return simplify_const_binary_operation (code, mode, lhs, rhs);
3345 /* Now simplify each pair of operands until nothing changes. The first
3346 time through just simplify constants against each other. */
3348 first = 1;
3351 changed = first;
3353 for (i = 0; i < n_ops - 1; i++)
3354 for (j = i + 1; j < n_ops; j++)
3356 rtx lhs = ops[i].op, rhs = ops[j].op;
3357 int lneg = ops[i].neg, rneg = ops[j].neg;
3359 if (lhs != 0 && rhs != 0
3360 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
3362 enum rtx_code ncode = PLUS;
3364 if (lneg != rneg)
3366 ncode = MINUS;
3367 if (lneg)
3368 tem = lhs, lhs = rhs, rhs = tem;
3370 else if (swap_commutative_operands_p (lhs, rhs))
3371 tem = lhs, lhs = rhs, rhs = tem;
3373 if ((GET_CODE (lhs) == CONST || GET_CODE (lhs) == CONST_INT)
3374 && (GET_CODE (rhs) == CONST || GET_CODE (rhs) == CONST_INT))
3376 rtx tem_lhs, tem_rhs;
3378 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3379 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3380 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3382 if (tem && !CONSTANT_P (tem))
3383 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3385 else
3386 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3388 /* Reject "simplifications" that just wrap the two
3389 arguments in a CONST. Failure to do so can result
3390 in infinite recursion with simplify_binary_operation
3391 when it calls us to simplify CONST operations. */
3392 if (tem
3393 && ! (GET_CODE (tem) == CONST
3394 && GET_CODE (XEXP (tem, 0)) == ncode
3395 && XEXP (XEXP (tem, 0), 0) == lhs
3396 && XEXP (XEXP (tem, 0), 1) == rhs)
3397 /* Don't allow -x + -1 -> ~x simplifications in the
3398 first pass. This allows us the chance to combine
3399 the -1 with other constants. */
3400 && ! (first
3401 && GET_CODE (tem) == NOT
3402 && XEXP (tem, 0) == rhs))
3404 lneg &= rneg;
3405 if (GET_CODE (tem) == NEG)
3406 tem = XEXP (tem, 0), lneg = !lneg;
3407 if (GET_CODE (tem) == CONST_INT && lneg)
3408 tem = neg_const_int (mode, tem), lneg = 0;
3410 ops[i].op = tem;
3411 ops[i].neg = lneg;
3412 ops[j].op = NULL_RTX;
3413 changed = 1;
3418 first = 0;
3420 while (changed);
3422 /* Pack all the operands to the lower-numbered entries. */
3423 for (i = 0, j = 0; j < n_ops; j++)
3424 if (ops[j].op)
3426 ops[i] = ops[j];
3427 /* Stabilize sort. */
3428 ops[i].ix = i;
3429 i++;
3431 n_ops = i;
3433 /* Sort the operations based on swap_commutative_operands_p. */
3434 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
3436 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3437 if (n_ops == 2
3438 && GET_CODE (ops[1].op) == CONST_INT
3439 && CONSTANT_P (ops[0].op)
3440 && ops[0].neg)
3441 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3443 /* We suppressed creation of trivial CONST expressions in the
3444 combination loop to avoid recursion. Create one manually now.
3445 The combination loop should have ensured that there is exactly
3446 one CONST_INT, and the sort will have ensured that it is last
3447 in the array and that any other constant will be next-to-last. */
3449 if (n_ops > 1
3450 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
3451 && CONSTANT_P (ops[n_ops - 2].op))
3453 rtx value = ops[n_ops - 1].op;
3454 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3455 value = neg_const_int (mode, value);
3456 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3457 n_ops--;
3460 /* Put a non-negated operand first, if possible. */
3462 for (i = 0; i < n_ops && ops[i].neg; i++)
3463 continue;
3464 if (i == n_ops)
3465 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3466 else if (i != 0)
3468 tem = ops[0].op;
3469 ops[0] = ops[i];
3470 ops[i].op = tem;
3471 ops[i].neg = 1;
3474 /* Now make the result by performing the requested operations. */
3475 result = ops[0].op;
3476 for (i = 1; i < n_ops; i++)
3477 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3478 mode, result, ops[i].op);
3480 return result;
3483 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3484 static bool
3485 plus_minus_operand_p (rtx x)
3487 return GET_CODE (x) == PLUS
3488 || GET_CODE (x) == MINUS
3489 || (GET_CODE (x) == CONST
3490 && GET_CODE (XEXP (x, 0)) == PLUS
3491 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3492 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3495 /* Like simplify_binary_operation except used for relational operators.
3496 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3497 not also be VOIDmode.
3499 CMP_MODE specifies in which mode the comparison is done in, so it is
3500 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3501 the operands or, if both are VOIDmode, the operands are compared in
3502 "infinite precision". */
3504 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3505 enum machine_mode cmp_mode, rtx op0, rtx op1)
3507 rtx tem, trueop0, trueop1;
3509 if (cmp_mode == VOIDmode)
3510 cmp_mode = GET_MODE (op0);
3511 if (cmp_mode == VOIDmode)
3512 cmp_mode = GET_MODE (op1);
3514 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3515 if (tem)
3517 if (SCALAR_FLOAT_MODE_P (mode))
3519 if (tem == const0_rtx)
3520 return CONST0_RTX (mode);
3521 #ifdef FLOAT_STORE_FLAG_VALUE
3523 REAL_VALUE_TYPE val;
3524 val = FLOAT_STORE_FLAG_VALUE (mode);
3525 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3527 #else
3528 return NULL_RTX;
3529 #endif
3531 if (VECTOR_MODE_P (mode))
3533 if (tem == const0_rtx)
3534 return CONST0_RTX (mode);
3535 #ifdef VECTOR_STORE_FLAG_VALUE
3537 int i, units;
3538 rtvec v;
3540 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3541 if (val == NULL_RTX)
3542 return NULL_RTX;
3543 if (val == const1_rtx)
3544 return CONST1_RTX (mode);
3546 units = GET_MODE_NUNITS (mode);
3547 v = rtvec_alloc (units);
3548 for (i = 0; i < units; i++)
3549 RTVEC_ELT (v, i) = val;
3550 return gen_rtx_raw_CONST_VECTOR (mode, v);
3552 #else
3553 return NULL_RTX;
3554 #endif
3557 return tem;
3560 /* For the following tests, ensure const0_rtx is op1. */
3561 if (swap_commutative_operands_p (op0, op1)
3562 || (op0 == const0_rtx && op1 != const0_rtx))
3563 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3565 /* If op0 is a compare, extract the comparison arguments from it. */
3566 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3567 return simplify_relational_operation (code, mode, VOIDmode,
3568 XEXP (op0, 0), XEXP (op0, 1));
3570 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
3571 || CC0_P (op0))
3572 return NULL_RTX;
3574 trueop0 = avoid_constant_pool_reference (op0);
3575 trueop1 = avoid_constant_pool_reference (op1);
3576 return simplify_relational_operation_1 (code, mode, cmp_mode,
3577 trueop0, trueop1);
3580 /* This part of simplify_relational_operation is only used when CMP_MODE
3581 is not in class MODE_CC (i.e. it is a real comparison).
3583 MODE is the mode of the result, while CMP_MODE specifies in which
3584 mode the comparison is done in, so it is the mode of the operands. */
3586 static rtx
3587 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
3588 enum machine_mode cmp_mode, rtx op0, rtx op1)
3590 enum rtx_code op0code = GET_CODE (op0);
3592 if (GET_CODE (op1) == CONST_INT)
3594 if (INTVAL (op1) == 0 && COMPARISON_P (op0))
3596 /* If op0 is a comparison, extract the comparison arguments
3597 from it. */
3598 if (code == NE)
3600 if (GET_MODE (op0) == mode)
3601 return simplify_rtx (op0);
3602 else
3603 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
3604 XEXP (op0, 0), XEXP (op0, 1));
3606 else if (code == EQ)
3608 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
3609 if (new_code != UNKNOWN)
3610 return simplify_gen_relational (new_code, mode, VOIDmode,
3611 XEXP (op0, 0), XEXP (op0, 1));
3616 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
3617 if ((code == EQ || code == NE)
3618 && (op0code == PLUS || op0code == MINUS)
3619 && CONSTANT_P (op1)
3620 && CONSTANT_P (XEXP (op0, 1))
3621 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
3623 rtx x = XEXP (op0, 0);
3624 rtx c = XEXP (op0, 1);
3626 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
3627 cmp_mode, op1, c);
3628 return simplify_gen_relational (code, mode, cmp_mode, x, c);
3631 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3632 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3633 if (code == NE
3634 && op1 == const0_rtx
3635 && GET_MODE_CLASS (mode) == MODE_INT
3636 && cmp_mode != VOIDmode
3637 /* ??? Work-around BImode bugs in the ia64 backend. */
3638 && mode != BImode
3639 && cmp_mode != BImode
3640 && nonzero_bits (op0, cmp_mode) == 1
3641 && STORE_FLAG_VALUE == 1)
3642 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
3643 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
3644 : lowpart_subreg (mode, op0, cmp_mode);
3646 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
3647 if ((code == EQ || code == NE)
3648 && op1 == const0_rtx
3649 && op0code == XOR)
3650 return simplify_gen_relational (code, mode, cmp_mode,
3651 XEXP (op0, 0), XEXP (op0, 1));
3653 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
3654 if ((code == EQ || code == NE)
3655 && op0code == XOR
3656 && rtx_equal_p (XEXP (op0, 0), op1)
3657 && !side_effects_p (XEXP (op0, 0)))
3658 return simplify_gen_relational (code, mode, cmp_mode,
3659 XEXP (op0, 1), const0_rtx);
3661 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
3662 if ((code == EQ || code == NE)
3663 && op0code == XOR
3664 && rtx_equal_p (XEXP (op0, 1), op1)
3665 && !side_effects_p (XEXP (op0, 1)))
3666 return simplify_gen_relational (code, mode, cmp_mode,
3667 XEXP (op0, 0), const0_rtx);
3669 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
3670 if ((code == EQ || code == NE)
3671 && op0code == XOR
3672 && (GET_CODE (op1) == CONST_INT
3673 || GET_CODE (op1) == CONST_DOUBLE)
3674 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
3675 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
3676 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
3677 simplify_gen_binary (XOR, cmp_mode,
3678 XEXP (op0, 1), op1));
3680 return NULL_RTX;
3683 /* Check if the given comparison (done in the given MODE) is actually a
3684 tautology or a contradiction.
3685 If no simplification is possible, this function returns zero.
3686 Otherwise, it returns either const_true_rtx or const0_rtx. */
3689 simplify_const_relational_operation (enum rtx_code code,
3690 enum machine_mode mode,
3691 rtx op0, rtx op1)
3693 int equal, op0lt, op0ltu, op1lt, op1ltu;
3694 rtx tem;
3695 rtx trueop0;
3696 rtx trueop1;
3698 gcc_assert (mode != VOIDmode
3699 || (GET_MODE (op0) == VOIDmode
3700 && GET_MODE (op1) == VOIDmode));
3702 /* If op0 is a compare, extract the comparison arguments from it. */
3703 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3705 op1 = XEXP (op0, 1);
3706 op0 = XEXP (op0, 0);
3708 if (GET_MODE (op0) != VOIDmode)
3709 mode = GET_MODE (op0);
3710 else if (GET_MODE (op1) != VOIDmode)
3711 mode = GET_MODE (op1);
3712 else
3713 return 0;
3716 /* We can't simplify MODE_CC values since we don't know what the
3717 actual comparison is. */
3718 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
3719 return 0;
3721 /* Make sure the constant is second. */
3722 if (swap_commutative_operands_p (op0, op1))
3724 tem = op0, op0 = op1, op1 = tem;
3725 code = swap_condition (code);
3728 trueop0 = avoid_constant_pool_reference (op0);
3729 trueop1 = avoid_constant_pool_reference (op1);
3731 /* For integer comparisons of A and B maybe we can simplify A - B and can
3732 then simplify a comparison of that with zero. If A and B are both either
3733 a register or a CONST_INT, this can't help; testing for these cases will
3734 prevent infinite recursion here and speed things up.
3736 If CODE is an unsigned comparison, then we can never do this optimization,
3737 because it gives an incorrect result if the subtraction wraps around zero.
3738 ANSI C defines unsigned operations such that they never overflow, and
3739 thus such cases can not be ignored; but we cannot do it even for
3740 signed comparisons for languages such as Java, so test flag_wrapv. */
3742 if (!flag_wrapv && INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
3743 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
3744 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
3745 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
3746 /* We cannot do this for == or != if tem is a nonzero address. */
3747 && ((code != EQ && code != NE) || ! nonzero_address_p (tem))
3748 && code != GTU && code != GEU && code != LTU && code != LEU)
3749 return simplify_const_relational_operation (signed_condition (code),
3750 mode, tem, const0_rtx);
3752 if (flag_unsafe_math_optimizations && code == ORDERED)
3753 return const_true_rtx;
3755 if (flag_unsafe_math_optimizations && code == UNORDERED)
3756 return const0_rtx;
3758 /* For modes without NaNs, if the two operands are equal, we know the
3759 result except if they have side-effects. */
3760 if (! HONOR_NANS (GET_MODE (trueop0))
3761 && rtx_equal_p (trueop0, trueop1)
3762 && ! side_effects_p (trueop0))
3763 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
3765 /* If the operands are floating-point constants, see if we can fold
3766 the result. */
3767 else if (GET_CODE (trueop0) == CONST_DOUBLE
3768 && GET_CODE (trueop1) == CONST_DOUBLE
3769 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
3771 REAL_VALUE_TYPE d0, d1;
3773 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
3774 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
3776 /* Comparisons are unordered iff at least one of the values is NaN. */
3777 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
3778 switch (code)
3780 case UNEQ:
3781 case UNLT:
3782 case UNGT:
3783 case UNLE:
3784 case UNGE:
3785 case NE:
3786 case UNORDERED:
3787 return const_true_rtx;
3788 case EQ:
3789 case LT:
3790 case GT:
3791 case LE:
3792 case GE:
3793 case LTGT:
3794 case ORDERED:
3795 return const0_rtx;
3796 default:
3797 return 0;
3800 equal = REAL_VALUES_EQUAL (d0, d1);
3801 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
3802 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
3805 /* Otherwise, see if the operands are both integers. */
3806 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
3807 && (GET_CODE (trueop0) == CONST_DOUBLE
3808 || GET_CODE (trueop0) == CONST_INT)
3809 && (GET_CODE (trueop1) == CONST_DOUBLE
3810 || GET_CODE (trueop1) == CONST_INT))
3812 int width = GET_MODE_BITSIZE (mode);
3813 HOST_WIDE_INT l0s, h0s, l1s, h1s;
3814 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
3816 /* Get the two words comprising each integer constant. */
3817 if (GET_CODE (trueop0) == CONST_DOUBLE)
3819 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
3820 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
3822 else
3824 l0u = l0s = INTVAL (trueop0);
3825 h0u = h0s = HWI_SIGN_EXTEND (l0s);
3828 if (GET_CODE (trueop1) == CONST_DOUBLE)
3830 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
3831 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
3833 else
3835 l1u = l1s = INTVAL (trueop1);
3836 h1u = h1s = HWI_SIGN_EXTEND (l1s);
3839 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
3840 we have to sign or zero-extend the values. */
3841 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
3843 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
3844 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
3846 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3847 l0s |= ((HOST_WIDE_INT) (-1) << width);
3849 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3850 l1s |= ((HOST_WIDE_INT) (-1) << width);
3852 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
3853 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
3855 equal = (h0u == h1u && l0u == l1u);
3856 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
3857 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
3858 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
3859 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
3862 /* Otherwise, there are some code-specific tests we can make. */
3863 else
3865 /* Optimize comparisons with upper and lower bounds. */
3866 if (SCALAR_INT_MODE_P (mode)
3867 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3869 rtx mmin, mmax;
3870 int sign;
3872 if (code == GEU
3873 || code == LEU
3874 || code == GTU
3875 || code == LTU)
3876 sign = 0;
3877 else
3878 sign = 1;
3880 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
3882 tem = NULL_RTX;
3883 switch (code)
3885 case GEU:
3886 case GE:
3887 /* x >= min is always true. */
3888 if (rtx_equal_p (trueop1, mmin))
3889 tem = const_true_rtx;
3890 else
3891 break;
3893 case LEU:
3894 case LE:
3895 /* x <= max is always true. */
3896 if (rtx_equal_p (trueop1, mmax))
3897 tem = const_true_rtx;
3898 break;
3900 case GTU:
3901 case GT:
3902 /* x > max is always false. */
3903 if (rtx_equal_p (trueop1, mmax))
3904 tem = const0_rtx;
3905 break;
3907 case LTU:
3908 case LT:
3909 /* x < min is always false. */
3910 if (rtx_equal_p (trueop1, mmin))
3911 tem = const0_rtx;
3912 break;
3914 default:
3915 break;
3917 if (tem == const0_rtx
3918 || tem == const_true_rtx)
3919 return tem;
3922 switch (code)
3924 case EQ:
3925 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3926 return const0_rtx;
3927 break;
3929 case NE:
3930 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3931 return const_true_rtx;
3932 break;
3934 case LT:
3935 /* Optimize abs(x) < 0.0. */
3936 if (trueop1 == CONST0_RTX (mode)
3937 && !HONOR_SNANS (mode)
3938 && !(flag_wrapv && INTEGRAL_MODE_P (mode)))
3940 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3941 : trueop0;
3942 if (GET_CODE (tem) == ABS)
3943 return const0_rtx;
3945 break;
3947 case GE:
3948 /* Optimize abs(x) >= 0.0. */
3949 if (trueop1 == CONST0_RTX (mode)
3950 && !HONOR_NANS (mode)
3951 && !(flag_wrapv && INTEGRAL_MODE_P (mode)))
3953 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3954 : trueop0;
3955 if (GET_CODE (tem) == ABS)
3956 return const_true_rtx;
3958 break;
3960 case UNGE:
3961 /* Optimize ! (abs(x) < 0.0). */
3962 if (trueop1 == CONST0_RTX (mode))
3964 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3965 : trueop0;
3966 if (GET_CODE (tem) == ABS)
3967 return const_true_rtx;
3969 break;
3971 default:
3972 break;
3975 return 0;
3978 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3979 as appropriate. */
3980 switch (code)
3982 case EQ:
3983 case UNEQ:
3984 return equal ? const_true_rtx : const0_rtx;
3985 case NE:
3986 case LTGT:
3987 return ! equal ? const_true_rtx : const0_rtx;
3988 case LT:
3989 case UNLT:
3990 return op0lt ? const_true_rtx : const0_rtx;
3991 case GT:
3992 case UNGT:
3993 return op1lt ? const_true_rtx : const0_rtx;
3994 case LTU:
3995 return op0ltu ? const_true_rtx : const0_rtx;
3996 case GTU:
3997 return op1ltu ? const_true_rtx : const0_rtx;
3998 case LE:
3999 case UNLE:
4000 return equal || op0lt ? const_true_rtx : const0_rtx;
4001 case GE:
4002 case UNGE:
4003 return equal || op1lt ? const_true_rtx : const0_rtx;
4004 case LEU:
4005 return equal || op0ltu ? const_true_rtx : const0_rtx;
4006 case GEU:
4007 return equal || op1ltu ? const_true_rtx : const0_rtx;
4008 case ORDERED:
4009 return const_true_rtx;
4010 case UNORDERED:
4011 return const0_rtx;
4012 default:
4013 gcc_unreachable ();
4017 /* Simplify CODE, an operation with result mode MODE and three operands,
4018 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4019 a constant. Return 0 if no simplifications is possible. */
4022 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4023 enum machine_mode op0_mode, rtx op0, rtx op1,
4024 rtx op2)
4026 unsigned int width = GET_MODE_BITSIZE (mode);
4028 /* VOIDmode means "infinite" precision. */
4029 if (width == 0)
4030 width = HOST_BITS_PER_WIDE_INT;
4032 switch (code)
4034 case SIGN_EXTRACT:
4035 case ZERO_EXTRACT:
4036 if (GET_CODE (op0) == CONST_INT
4037 && GET_CODE (op1) == CONST_INT
4038 && GET_CODE (op2) == CONST_INT
4039 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4040 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4042 /* Extracting a bit-field from a constant */
4043 HOST_WIDE_INT val = INTVAL (op0);
4045 if (BITS_BIG_ENDIAN)
4046 val >>= (GET_MODE_BITSIZE (op0_mode)
4047 - INTVAL (op2) - INTVAL (op1));
4048 else
4049 val >>= INTVAL (op2);
4051 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4053 /* First zero-extend. */
4054 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4055 /* If desired, propagate sign bit. */
4056 if (code == SIGN_EXTRACT
4057 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4058 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4061 /* Clear the bits that don't belong in our mode,
4062 unless they and our sign bit are all one.
4063 So we get either a reasonable negative value or a reasonable
4064 unsigned value for this mode. */
4065 if (width < HOST_BITS_PER_WIDE_INT
4066 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4067 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4068 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4070 return gen_int_mode (val, mode);
4072 break;
4074 case IF_THEN_ELSE:
4075 if (GET_CODE (op0) == CONST_INT)
4076 return op0 != const0_rtx ? op1 : op2;
4078 /* Convert c ? a : a into "a". */
4079 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4080 return op1;
4082 /* Convert a != b ? a : b into "a". */
4083 if (GET_CODE (op0) == NE
4084 && ! side_effects_p (op0)
4085 && ! HONOR_NANS (mode)
4086 && ! HONOR_SIGNED_ZEROS (mode)
4087 && ((rtx_equal_p (XEXP (op0, 0), op1)
4088 && rtx_equal_p (XEXP (op0, 1), op2))
4089 || (rtx_equal_p (XEXP (op0, 0), op2)
4090 && rtx_equal_p (XEXP (op0, 1), op1))))
4091 return op1;
4093 /* Convert a == b ? a : b into "b". */
4094 if (GET_CODE (op0) == EQ
4095 && ! side_effects_p (op0)
4096 && ! HONOR_NANS (mode)
4097 && ! HONOR_SIGNED_ZEROS (mode)
4098 && ((rtx_equal_p (XEXP (op0, 0), op1)
4099 && rtx_equal_p (XEXP (op0, 1), op2))
4100 || (rtx_equal_p (XEXP (op0, 0), op2)
4101 && rtx_equal_p (XEXP (op0, 1), op1))))
4102 return op2;
4104 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4106 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4107 ? GET_MODE (XEXP (op0, 1))
4108 : GET_MODE (XEXP (op0, 0)));
4109 rtx temp;
4111 /* Look for happy constants in op1 and op2. */
4112 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
4114 HOST_WIDE_INT t = INTVAL (op1);
4115 HOST_WIDE_INT f = INTVAL (op2);
4117 if (t == STORE_FLAG_VALUE && f == 0)
4118 code = GET_CODE (op0);
4119 else if (t == 0 && f == STORE_FLAG_VALUE)
4121 enum rtx_code tmp;
4122 tmp = reversed_comparison_code (op0, NULL_RTX);
4123 if (tmp == UNKNOWN)
4124 break;
4125 code = tmp;
4127 else
4128 break;
4130 return simplify_gen_relational (code, mode, cmp_mode,
4131 XEXP (op0, 0), XEXP (op0, 1));
4134 if (cmp_mode == VOIDmode)
4135 cmp_mode = op0_mode;
4136 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4137 cmp_mode, XEXP (op0, 0),
4138 XEXP (op0, 1));
4140 /* See if any simplifications were possible. */
4141 if (temp)
4143 if (GET_CODE (temp) == CONST_INT)
4144 return temp == const0_rtx ? op2 : op1;
4145 else if (temp)
4146 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4149 break;
4151 case VEC_MERGE:
4152 gcc_assert (GET_MODE (op0) == mode);
4153 gcc_assert (GET_MODE (op1) == mode);
4154 gcc_assert (VECTOR_MODE_P (mode));
4155 op2 = avoid_constant_pool_reference (op2);
4156 if (GET_CODE (op2) == CONST_INT)
4158 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4159 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4160 int mask = (1 << n_elts) - 1;
4162 if (!(INTVAL (op2) & mask))
4163 return op1;
4164 if ((INTVAL (op2) & mask) == mask)
4165 return op0;
4167 op0 = avoid_constant_pool_reference (op0);
4168 op1 = avoid_constant_pool_reference (op1);
4169 if (GET_CODE (op0) == CONST_VECTOR
4170 && GET_CODE (op1) == CONST_VECTOR)
4172 rtvec v = rtvec_alloc (n_elts);
4173 unsigned int i;
4175 for (i = 0; i < n_elts; i++)
4176 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4177 ? CONST_VECTOR_ELT (op0, i)
4178 : CONST_VECTOR_ELT (op1, i));
4179 return gen_rtx_CONST_VECTOR (mode, v);
4182 break;
4184 default:
4185 gcc_unreachable ();
4188 return 0;
4191 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
4192 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
4194 Works by unpacking OP into a collection of 8-bit values
4195 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4196 and then repacking them again for OUTERMODE. */
4198 static rtx
4199 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4200 enum machine_mode innermode, unsigned int byte)
4202 /* We support up to 512-bit values (for V8DFmode). */
4203 enum {
4204 max_bitsize = 512,
4205 value_bit = 8,
4206 value_mask = (1 << value_bit) - 1
4208 unsigned char value[max_bitsize / value_bit];
4209 int value_start;
4210 int i;
4211 int elem;
4213 int num_elem;
4214 rtx * elems;
4215 int elem_bitsize;
4216 rtx result_s;
4217 rtvec result_v = NULL;
4218 enum mode_class outer_class;
4219 enum machine_mode outer_submode;
4221 /* Some ports misuse CCmode. */
4222 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
4223 return op;
4225 /* We have no way to represent a complex constant at the rtl level. */
4226 if (COMPLEX_MODE_P (outermode))
4227 return NULL_RTX;
4229 /* Unpack the value. */
4231 if (GET_CODE (op) == CONST_VECTOR)
4233 num_elem = CONST_VECTOR_NUNITS (op);
4234 elems = &CONST_VECTOR_ELT (op, 0);
4235 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4237 else
4239 num_elem = 1;
4240 elems = &op;
4241 elem_bitsize = max_bitsize;
4243 /* If this asserts, it is too complicated; reducing value_bit may help. */
4244 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4245 /* I don't know how to handle endianness of sub-units. */
4246 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4248 for (elem = 0; elem < num_elem; elem++)
4250 unsigned char * vp;
4251 rtx el = elems[elem];
4253 /* Vectors are kept in target memory order. (This is probably
4254 a mistake.) */
4256 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4257 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4258 / BITS_PER_UNIT);
4259 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4260 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4261 unsigned bytele = (subword_byte % UNITS_PER_WORD
4262 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4263 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4266 switch (GET_CODE (el))
4268 case CONST_INT:
4269 for (i = 0;
4270 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4271 i += value_bit)
4272 *vp++ = INTVAL (el) >> i;
4273 /* CONST_INTs are always logically sign-extended. */
4274 for (; i < elem_bitsize; i += value_bit)
4275 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4276 break;
4278 case CONST_DOUBLE:
4279 if (GET_MODE (el) == VOIDmode)
4281 /* If this triggers, someone should have generated a
4282 CONST_INT instead. */
4283 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4285 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4286 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4287 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4289 *vp++
4290 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4291 i += value_bit;
4293 /* It shouldn't matter what's done here, so fill it with
4294 zero. */
4295 for (; i < elem_bitsize; i += value_bit)
4296 *vp++ = 0;
4298 else
4300 long tmp[max_bitsize / 32];
4301 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4303 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4304 gcc_assert (bitsize <= elem_bitsize);
4305 gcc_assert (bitsize % value_bit == 0);
4307 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4308 GET_MODE (el));
4310 /* real_to_target produces its result in words affected by
4311 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4312 and use WORDS_BIG_ENDIAN instead; see the documentation
4313 of SUBREG in rtl.texi. */
4314 for (i = 0; i < bitsize; i += value_bit)
4316 int ibase;
4317 if (WORDS_BIG_ENDIAN)
4318 ibase = bitsize - 1 - i;
4319 else
4320 ibase = i;
4321 *vp++ = tmp[ibase / 32] >> i % 32;
4324 /* It shouldn't matter what's done here, so fill it with
4325 zero. */
4326 for (; i < elem_bitsize; i += value_bit)
4327 *vp++ = 0;
4329 break;
4331 default:
4332 gcc_unreachable ();
4336 /* Now, pick the right byte to start with. */
4337 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4338 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4339 will already have offset 0. */
4340 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4342 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4343 - byte);
4344 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4345 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4346 byte = (subword_byte % UNITS_PER_WORD
4347 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4350 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4351 so if it's become negative it will instead be very large.) */
4352 gcc_assert (byte < GET_MODE_SIZE (innermode));
4354 /* Convert from bytes to chunks of size value_bit. */
4355 value_start = byte * (BITS_PER_UNIT / value_bit);
4357 /* Re-pack the value. */
4359 if (VECTOR_MODE_P (outermode))
4361 num_elem = GET_MODE_NUNITS (outermode);
4362 result_v = rtvec_alloc (num_elem);
4363 elems = &RTVEC_ELT (result_v, 0);
4364 outer_submode = GET_MODE_INNER (outermode);
4366 else
4368 num_elem = 1;
4369 elems = &result_s;
4370 outer_submode = outermode;
4373 outer_class = GET_MODE_CLASS (outer_submode);
4374 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
4376 gcc_assert (elem_bitsize % value_bit == 0);
4377 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
4379 for (elem = 0; elem < num_elem; elem++)
4381 unsigned char *vp;
4383 /* Vectors are stored in target memory order. (This is probably
4384 a mistake.) */
4386 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4387 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4388 / BITS_PER_UNIT);
4389 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4390 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4391 unsigned bytele = (subword_byte % UNITS_PER_WORD
4392 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4393 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
4396 switch (outer_class)
4398 case MODE_INT:
4399 case MODE_PARTIAL_INT:
4401 unsigned HOST_WIDE_INT hi = 0, lo = 0;
4403 for (i = 0;
4404 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4405 i += value_bit)
4406 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
4407 for (; i < elem_bitsize; i += value_bit)
4408 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
4409 << (i - HOST_BITS_PER_WIDE_INT));
4411 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4412 know why. */
4413 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4414 elems[elem] = gen_int_mode (lo, outer_submode);
4415 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
4416 elems[elem] = immed_double_const (lo, hi, outer_submode);
4417 else
4418 return NULL_RTX;
4420 break;
4422 case MODE_FLOAT:
4423 case MODE_DECIMAL_FLOAT:
4425 REAL_VALUE_TYPE r;
4426 long tmp[max_bitsize / 32];
4428 /* real_from_target wants its input in words affected by
4429 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4430 and use WORDS_BIG_ENDIAN instead; see the documentation
4431 of SUBREG in rtl.texi. */
4432 for (i = 0; i < max_bitsize / 32; i++)
4433 tmp[i] = 0;
4434 for (i = 0; i < elem_bitsize; i += value_bit)
4436 int ibase;
4437 if (WORDS_BIG_ENDIAN)
4438 ibase = elem_bitsize - 1 - i;
4439 else
4440 ibase = i;
4441 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
4444 real_from_target (&r, tmp, outer_submode);
4445 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
4447 break;
4449 default:
4450 gcc_unreachable ();
4453 if (VECTOR_MODE_P (outermode))
4454 return gen_rtx_CONST_VECTOR (outermode, result_v);
4455 else
4456 return result_s;
4459 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
4460 Return 0 if no simplifications are possible. */
4462 simplify_subreg (enum machine_mode outermode, rtx op,
4463 enum machine_mode innermode, unsigned int byte)
4465 /* Little bit of sanity checking. */
4466 gcc_assert (innermode != VOIDmode);
4467 gcc_assert (outermode != VOIDmode);
4468 gcc_assert (innermode != BLKmode);
4469 gcc_assert (outermode != BLKmode);
4471 gcc_assert (GET_MODE (op) == innermode
4472 || GET_MODE (op) == VOIDmode);
4474 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
4475 gcc_assert (byte < GET_MODE_SIZE (innermode));
4477 if (outermode == innermode && !byte)
4478 return op;
4480 if (GET_CODE (op) == CONST_INT
4481 || GET_CODE (op) == CONST_DOUBLE
4482 || GET_CODE (op) == CONST_VECTOR)
4483 return simplify_immed_subreg (outermode, op, innermode, byte);
4485 /* Changing mode twice with SUBREG => just change it once,
4486 or not at all if changing back op starting mode. */
4487 if (GET_CODE (op) == SUBREG)
4489 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
4490 int final_offset = byte + SUBREG_BYTE (op);
4491 rtx newx;
4493 if (outermode == innermostmode
4494 && byte == 0 && SUBREG_BYTE (op) == 0)
4495 return SUBREG_REG (op);
4497 /* The SUBREG_BYTE represents offset, as if the value were stored
4498 in memory. Irritating exception is paradoxical subreg, where
4499 we define SUBREG_BYTE to be 0. On big endian machines, this
4500 value should be negative. For a moment, undo this exception. */
4501 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4503 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
4504 if (WORDS_BIG_ENDIAN)
4505 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4506 if (BYTES_BIG_ENDIAN)
4507 final_offset += difference % UNITS_PER_WORD;
4509 if (SUBREG_BYTE (op) == 0
4510 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
4512 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
4513 if (WORDS_BIG_ENDIAN)
4514 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4515 if (BYTES_BIG_ENDIAN)
4516 final_offset += difference % UNITS_PER_WORD;
4519 /* See whether resulting subreg will be paradoxical. */
4520 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
4522 /* In nonparadoxical subregs we can't handle negative offsets. */
4523 if (final_offset < 0)
4524 return NULL_RTX;
4525 /* Bail out in case resulting subreg would be incorrect. */
4526 if (final_offset % GET_MODE_SIZE (outermode)
4527 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
4528 return NULL_RTX;
4530 else
4532 int offset = 0;
4533 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
4535 /* In paradoxical subreg, see if we are still looking on lower part.
4536 If so, our SUBREG_BYTE will be 0. */
4537 if (WORDS_BIG_ENDIAN)
4538 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4539 if (BYTES_BIG_ENDIAN)
4540 offset += difference % UNITS_PER_WORD;
4541 if (offset == final_offset)
4542 final_offset = 0;
4543 else
4544 return NULL_RTX;
4547 /* Recurse for further possible simplifications. */
4548 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
4549 final_offset);
4550 if (newx)
4551 return newx;
4552 if (validate_subreg (outermode, innermostmode,
4553 SUBREG_REG (op), final_offset))
4554 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
4555 return NULL_RTX;
4558 /* Merge implicit and explicit truncations. */
4560 if (GET_CODE (op) == TRUNCATE
4561 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
4562 && subreg_lowpart_offset (outermode, innermode) == byte)
4563 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
4564 GET_MODE (XEXP (op, 0)));
4566 /* SUBREG of a hard register => just change the register number
4567 and/or mode. If the hard register is not valid in that mode,
4568 suppress this simplification. If the hard register is the stack,
4569 frame, or argument pointer, leave this as a SUBREG. */
4571 if (REG_P (op)
4572 && REGNO (op) < FIRST_PSEUDO_REGISTER
4573 #ifdef CANNOT_CHANGE_MODE_CLASS
4574 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
4575 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
4576 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
4577 #endif
4578 && ((reload_completed && !frame_pointer_needed)
4579 || (REGNO (op) != FRAME_POINTER_REGNUM
4580 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
4581 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
4582 #endif
4584 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4585 && REGNO (op) != ARG_POINTER_REGNUM
4586 #endif
4587 && REGNO (op) != STACK_POINTER_REGNUM
4588 && subreg_offset_representable_p (REGNO (op), innermode,
4589 byte, outermode))
4591 unsigned int regno = REGNO (op);
4592 unsigned int final_regno
4593 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
4595 /* ??? We do allow it if the current REG is not valid for
4596 its mode. This is a kludge to work around how float/complex
4597 arguments are passed on 32-bit SPARC and should be fixed. */
4598 if (HARD_REGNO_MODE_OK (final_regno, outermode)
4599 || ! HARD_REGNO_MODE_OK (regno, innermode))
4601 rtx x;
4602 int final_offset = byte;
4604 /* Adjust offset for paradoxical subregs. */
4605 if (byte == 0
4606 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4608 int difference = (GET_MODE_SIZE (innermode)
4609 - GET_MODE_SIZE (outermode));
4610 if (WORDS_BIG_ENDIAN)
4611 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4612 if (BYTES_BIG_ENDIAN)
4613 final_offset += difference % UNITS_PER_WORD;
4616 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
4618 /* Propagate original regno. We don't have any way to specify
4619 the offset inside original regno, so do so only for lowpart.
4620 The information is used only by alias analysis that can not
4621 grog partial register anyway. */
4623 if (subreg_lowpart_offset (outermode, innermode) == byte)
4624 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
4625 return x;
4629 /* If we have a SUBREG of a register that we are replacing and we are
4630 replacing it with a MEM, make a new MEM and try replacing the
4631 SUBREG with it. Don't do this if the MEM has a mode-dependent address
4632 or if we would be widening it. */
4634 if (MEM_P (op)
4635 && ! mode_dependent_address_p (XEXP (op, 0))
4636 /* Allow splitting of volatile memory references in case we don't
4637 have instruction to move the whole thing. */
4638 && (! MEM_VOLATILE_P (op)
4639 || ! have_insn_for (SET, innermode))
4640 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
4641 return adjust_address_nv (op, outermode, byte);
4643 /* Handle complex values represented as CONCAT
4644 of real and imaginary part. */
4645 if (GET_CODE (op) == CONCAT)
4647 unsigned int inner_size, final_offset;
4648 rtx part, res;
4650 inner_size = GET_MODE_UNIT_SIZE (innermode);
4651 part = byte < inner_size ? XEXP (op, 0) : XEXP (op, 1);
4652 final_offset = byte % inner_size;
4653 if (final_offset + GET_MODE_SIZE (outermode) > inner_size)
4654 return NULL_RTX;
4656 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
4657 if (res)
4658 return res;
4659 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
4660 return gen_rtx_SUBREG (outermode, part, final_offset);
4661 return NULL_RTX;
4664 /* Optimize SUBREG truncations of zero and sign extended values. */
4665 if ((GET_CODE (op) == ZERO_EXTEND
4666 || GET_CODE (op) == SIGN_EXTEND)
4667 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
4669 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
4671 /* If we're requesting the lowpart of a zero or sign extension,
4672 there are three possibilities. If the outermode is the same
4673 as the origmode, we can omit both the extension and the subreg.
4674 If the outermode is not larger than the origmode, we can apply
4675 the truncation without the extension. Finally, if the outermode
4676 is larger than the origmode, but both are integer modes, we
4677 can just extend to the appropriate mode. */
4678 if (bitpos == 0)
4680 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
4681 if (outermode == origmode)
4682 return XEXP (op, 0);
4683 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
4684 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
4685 subreg_lowpart_offset (outermode,
4686 origmode));
4687 if (SCALAR_INT_MODE_P (outermode))
4688 return simplify_gen_unary (GET_CODE (op), outermode,
4689 XEXP (op, 0), origmode);
4692 /* A SUBREG resulting from a zero extension may fold to zero if
4693 it extracts higher bits that the ZERO_EXTEND's source bits. */
4694 if (GET_CODE (op) == ZERO_EXTEND
4695 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
4696 return CONST0_RTX (outermode);
4699 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
4700 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
4701 the outer subreg is effectively a truncation to the original mode. */
4702 if ((GET_CODE (op) == LSHIFTRT
4703 || GET_CODE (op) == ASHIFTRT)
4704 && SCALAR_INT_MODE_P (outermode)
4705 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
4706 to avoid the possibility that an outer LSHIFTRT shifts by more
4707 than the sign extension's sign_bit_copies and introduces zeros
4708 into the high bits of the result. */
4709 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
4710 && GET_CODE (XEXP (op, 1)) == CONST_INT
4711 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
4712 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4713 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4714 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4715 return simplify_gen_binary (ASHIFTRT, outermode,
4716 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4718 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
4719 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
4720 the outer subreg is effectively a truncation to the original mode. */
4721 if ((GET_CODE (op) == LSHIFTRT
4722 || GET_CODE (op) == ASHIFTRT)
4723 && SCALAR_INT_MODE_P (outermode)
4724 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4725 && GET_CODE (XEXP (op, 1)) == CONST_INT
4726 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4727 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4728 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4729 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4730 return simplify_gen_binary (LSHIFTRT, outermode,
4731 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4733 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
4734 to (ashift:QI (x:QI) C), where C is a suitable small constant and
4735 the outer subreg is effectively a truncation to the original mode. */
4736 if (GET_CODE (op) == ASHIFT
4737 && SCALAR_INT_MODE_P (outermode)
4738 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4739 && GET_CODE (XEXP (op, 1)) == CONST_INT
4740 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4741 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
4742 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4743 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4744 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4745 return simplify_gen_binary (ASHIFT, outermode,
4746 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4748 return NULL_RTX;
4751 /* Make a SUBREG operation or equivalent if it folds. */
4754 simplify_gen_subreg (enum machine_mode outermode, rtx op,
4755 enum machine_mode innermode, unsigned int byte)
4757 rtx newx;
4759 newx = simplify_subreg (outermode, op, innermode, byte);
4760 if (newx)
4761 return newx;
4763 if (GET_CODE (op) == SUBREG
4764 || GET_CODE (op) == CONCAT
4765 || GET_MODE (op) == VOIDmode)
4766 return NULL_RTX;
4768 if (validate_subreg (outermode, innermode, op, byte))
4769 return gen_rtx_SUBREG (outermode, op, byte);
4771 return NULL_RTX;
4774 /* Simplify X, an rtx expression.
4776 Return the simplified expression or NULL if no simplifications
4777 were possible.
4779 This is the preferred entry point into the simplification routines;
4780 however, we still allow passes to call the more specific routines.
4782 Right now GCC has three (yes, three) major bodies of RTL simplification
4783 code that need to be unified.
4785 1. fold_rtx in cse.c. This code uses various CSE specific
4786 information to aid in RTL simplification.
4788 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
4789 it uses combine specific information to aid in RTL
4790 simplification.
4792 3. The routines in this file.
4795 Long term we want to only have one body of simplification code; to
4796 get to that state I recommend the following steps:
4798 1. Pour over fold_rtx & simplify_rtx and move any simplifications
4799 which are not pass dependent state into these routines.
4801 2. As code is moved by #1, change fold_rtx & simplify_rtx to
4802 use this routine whenever possible.
4804 3. Allow for pass dependent state to be provided to these
4805 routines and add simplifications based on the pass dependent
4806 state. Remove code from cse.c & combine.c that becomes
4807 redundant/dead.
4809 It will take time, but ultimately the compiler will be easier to
4810 maintain and improve. It's totally silly that when we add a
4811 simplification that it needs to be added to 4 places (3 for RTL
4812 simplification and 1 for tree simplification. */
4815 simplify_rtx (rtx x)
4817 enum rtx_code code = GET_CODE (x);
4818 enum machine_mode mode = GET_MODE (x);
4820 switch (GET_RTX_CLASS (code))
4822 case RTX_UNARY:
4823 return simplify_unary_operation (code, mode,
4824 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
4825 case RTX_COMM_ARITH:
4826 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
4827 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
4829 /* Fall through.... */
4831 case RTX_BIN_ARITH:
4832 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
4834 case RTX_TERNARY:
4835 case RTX_BITFIELD_OPS:
4836 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
4837 XEXP (x, 0), XEXP (x, 1),
4838 XEXP (x, 2));
4840 case RTX_COMPARE:
4841 case RTX_COMM_COMPARE:
4842 return simplify_relational_operation (code, mode,
4843 ((GET_MODE (XEXP (x, 0))
4844 != VOIDmode)
4845 ? GET_MODE (XEXP (x, 0))
4846 : GET_MODE (XEXP (x, 1))),
4847 XEXP (x, 0),
4848 XEXP (x, 1));
4850 case RTX_EXTRA:
4851 if (code == SUBREG)
4852 return simplify_gen_subreg (mode, SUBREG_REG (x),
4853 GET_MODE (SUBREG_REG (x)),
4854 SUBREG_BYTE (x));
4855 break;
4857 case RTX_OBJ:
4858 if (code == LO_SUM)
4860 /* Convert (lo_sum (high FOO) FOO) to FOO. */
4861 if (GET_CODE (XEXP (x, 0)) == HIGH
4862 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
4863 return XEXP (x, 1);
4865 break;
4867 default:
4868 break;
4870 return NULL;