* config/m68k/m68k.md (bungt_rev): New pattern.
[official-gcc.git] / gcc / simplify-rtx.c
blob3b12b20e682343713694bcf11fba554aa3df7834
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
20 02110-1301, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static bool plus_minus_operand_p (rtx);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
57 unsigned int);
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
59 rtx, rtx);
60 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
61 enum machine_mode, rtx, rtx);
62 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
63 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
64 rtx, rtx, rtx, rtx);
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
68 static rtx
69 neg_const_int (enum machine_mode mode, rtx i)
71 return gen_int_mode (- INTVAL (i), mode);
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
77 bool
78 mode_signbit_p (enum machine_mode mode, rtx x)
80 unsigned HOST_WIDE_INT val;
81 unsigned int width;
83 if (GET_MODE_CLASS (mode) != MODE_INT)
84 return false;
86 width = GET_MODE_BITSIZE (mode);
87 if (width == 0)
88 return false;
90 if (width <= HOST_BITS_PER_WIDE_INT
91 && GET_CODE (x) == CONST_INT)
92 val = INTVAL (x);
93 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x) == 0)
97 val = CONST_DOUBLE_HIGH (x);
98 width -= HOST_BITS_PER_WIDE_INT;
100 else
101 return false;
103 if (width < HOST_BITS_PER_WIDE_INT)
104 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
105 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
112 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
113 rtx op1)
115 rtx tem;
117 /* If this simplifies, do it. */
118 tem = simplify_binary_operation (code, mode, op0, op1);
119 if (tem)
120 return tem;
122 /* Put complex operands first and constants second if commutative. */
123 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
124 && swap_commutative_operands_p (op0, op1))
125 tem = op0, op0 = op1, op1 = tem;
127 return gen_rtx_fmt_ee (code, mode, op0, op1);
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
133 avoid_constant_pool_reference (rtx x)
135 rtx c, tmp, addr;
136 enum machine_mode cmode;
137 HOST_WIDE_INT offset = 0;
139 switch (GET_CODE (x))
141 case MEM:
142 break;
144 case FLOAT_EXTEND:
145 /* Handle float extensions of constant pool references. */
146 tmp = XEXP (x, 0);
147 c = avoid_constant_pool_reference (tmp);
148 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
150 REAL_VALUE_TYPE d;
152 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
155 return x;
157 default:
158 return x;
161 addr = XEXP (x, 0);
163 /* Call target hook to avoid the effects of -fpic etc.... */
164 addr = targetm.delegitimize_address (addr);
166 /* Split the address into a base and integer offset. */
167 if (GET_CODE (addr) == CONST
168 && GET_CODE (XEXP (addr, 0)) == PLUS
169 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
171 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
172 addr = XEXP (XEXP (addr, 0), 0);
175 if (GET_CODE (addr) == LO_SUM)
176 addr = XEXP (addr, 1);
178 /* If this is a constant pool reference, we can turn it into its
179 constant and hope that simplifications happen. */
180 if (GET_CODE (addr) == SYMBOL_REF
181 && CONSTANT_POOL_ADDRESS_P (addr))
183 c = get_pool_constant (addr);
184 cmode = get_pool_mode (addr);
186 /* If we're accessing the constant in a different mode than it was
187 originally stored, attempt to fix that up via subreg simplifications.
188 If that fails we have no choice but to return the original memory. */
189 if (offset != 0 || cmode != GET_MODE (x))
191 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
192 if (tem && CONSTANT_P (tem))
193 return tem;
195 else
196 return c;
199 return x;
202 /* Return true if X is a MEM referencing the constant pool. */
204 bool
205 constant_pool_reference_p (rtx x)
207 return avoid_constant_pool_reference (x) != x;
210 /* Make a unary operation by first seeing if it folds and otherwise making
211 the specified operation. */
214 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
215 enum machine_mode op_mode)
217 rtx tem;
219 /* If this simplifies, use it. */
220 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
221 return tem;
223 return gen_rtx_fmt_e (code, mode, op);
226 /* Likewise for ternary operations. */
229 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
230 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
232 rtx tem;
234 /* If this simplifies, use it. */
235 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
236 op0, op1, op2)))
237 return tem;
239 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
242 /* Likewise, for relational operations.
243 CMP_MODE specifies mode comparison is done in. */
246 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
247 enum machine_mode cmp_mode, rtx op0, rtx op1)
249 rtx tem;
251 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
252 op0, op1)))
253 return tem;
255 return gen_rtx_fmt_ee (code, mode, op0, op1);
258 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
259 resulting RTX. Return a new RTX which is as simplified as possible. */
262 simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
264 enum rtx_code code = GET_CODE (x);
265 enum machine_mode mode = GET_MODE (x);
266 enum machine_mode op_mode;
267 rtx op0, op1, op2;
269 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
270 to build a new expression substituting recursively. If we can't do
271 anything, return our input. */
273 if (x == old_rtx)
274 return new_rtx;
276 switch (GET_RTX_CLASS (code))
278 case RTX_UNARY:
279 op0 = XEXP (x, 0);
280 op_mode = GET_MODE (op0);
281 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
282 if (op0 == XEXP (x, 0))
283 return x;
284 return simplify_gen_unary (code, mode, op0, op_mode);
286 case RTX_BIN_ARITH:
287 case RTX_COMM_ARITH:
288 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
289 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
290 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
291 return x;
292 return simplify_gen_binary (code, mode, op0, op1);
294 case RTX_COMPARE:
295 case RTX_COMM_COMPARE:
296 op0 = XEXP (x, 0);
297 op1 = XEXP (x, 1);
298 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
299 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
300 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
301 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
302 return x;
303 return simplify_gen_relational (code, mode, op_mode, op0, op1);
305 case RTX_TERNARY:
306 case RTX_BITFIELD_OPS:
307 op0 = XEXP (x, 0);
308 op_mode = GET_MODE (op0);
309 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
310 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
311 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
312 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
313 return x;
314 if (op_mode == VOIDmode)
315 op_mode = GET_MODE (op0);
316 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
318 case RTX_EXTRA:
319 /* The only case we try to handle is a SUBREG. */
320 if (code == SUBREG)
322 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
323 if (op0 == SUBREG_REG (x))
324 return x;
325 op0 = simplify_gen_subreg (GET_MODE (x), op0,
326 GET_MODE (SUBREG_REG (x)),
327 SUBREG_BYTE (x));
328 return op0 ? op0 : x;
330 break;
332 case RTX_OBJ:
333 if (code == MEM)
335 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
336 if (op0 == XEXP (x, 0))
337 return x;
338 return replace_equiv_address_nv (x, op0);
340 else if (code == LO_SUM)
342 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
343 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
345 /* (lo_sum (high x) x) -> x */
346 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
347 return op1;
349 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
350 return x;
351 return gen_rtx_LO_SUM (mode, op0, op1);
353 else if (code == REG)
355 if (rtx_equal_p (x, old_rtx))
356 return new_rtx;
358 break;
360 default:
361 break;
363 return x;
366 /* Try to simplify a unary operation CODE whose output mode is to be
367 MODE with input operand OP whose mode was originally OP_MODE.
368 Return zero if no simplification can be made. */
370 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
371 rtx op, enum machine_mode op_mode)
373 rtx trueop, tem;
375 if (GET_CODE (op) == CONST)
376 op = XEXP (op, 0);
378 trueop = avoid_constant_pool_reference (op);
380 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
381 if (tem)
382 return tem;
384 return simplify_unary_operation_1 (code, mode, op);
387 /* Perform some simplifications we can do even if the operands
388 aren't constant. */
389 static rtx
390 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
392 enum rtx_code reversed;
393 rtx temp;
395 switch (code)
397 case NOT:
398 /* (not (not X)) == X. */
399 if (GET_CODE (op) == NOT)
400 return XEXP (op, 0);
402 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
403 comparison is all ones. */
404 if (COMPARISON_P (op)
405 && (mode == BImode || STORE_FLAG_VALUE == -1)
406 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
407 return simplify_gen_relational (reversed, mode, VOIDmode,
408 XEXP (op, 0), XEXP (op, 1));
410 /* (not (plus X -1)) can become (neg X). */
411 if (GET_CODE (op) == PLUS
412 && XEXP (op, 1) == constm1_rtx)
413 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
415 /* Similarly, (not (neg X)) is (plus X -1). */
416 if (GET_CODE (op) == NEG)
417 return plus_constant (XEXP (op, 0), -1);
419 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
420 if (GET_CODE (op) == XOR
421 && GET_CODE (XEXP (op, 1)) == CONST_INT
422 && (temp = simplify_unary_operation (NOT, mode,
423 XEXP (op, 1), mode)) != 0)
424 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
426 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
427 if (GET_CODE (op) == PLUS
428 && GET_CODE (XEXP (op, 1)) == CONST_INT
429 && mode_signbit_p (mode, XEXP (op, 1))
430 && (temp = simplify_unary_operation (NOT, mode,
431 XEXP (op, 1), mode)) != 0)
432 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
435 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
436 operands other than 1, but that is not valid. We could do a
437 similar simplification for (not (lshiftrt C X)) where C is
438 just the sign bit, but this doesn't seem common enough to
439 bother with. */
440 if (GET_CODE (op) == ASHIFT
441 && XEXP (op, 0) == const1_rtx)
443 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
444 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
447 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
448 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
449 so we can perform the above simplification. */
451 if (STORE_FLAG_VALUE == -1
452 && GET_CODE (op) == ASHIFTRT
453 && GET_CODE (XEXP (op, 1)) == CONST_INT
454 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
455 return simplify_gen_relational (GE, mode, VOIDmode,
456 XEXP (op, 0), const0_rtx);
459 if (GET_CODE (op) == SUBREG
460 && subreg_lowpart_p (op)
461 && (GET_MODE_SIZE (GET_MODE (op))
462 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
463 && GET_CODE (SUBREG_REG (op)) == ASHIFT
464 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
466 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
467 rtx x;
469 x = gen_rtx_ROTATE (inner_mode,
470 simplify_gen_unary (NOT, inner_mode, const1_rtx,
471 inner_mode),
472 XEXP (SUBREG_REG (op), 1));
473 return rtl_hooks.gen_lowpart_no_emit (mode, x);
476 /* Apply De Morgan's laws to reduce number of patterns for machines
477 with negating logical insns (and-not, nand, etc.). If result has
478 only one NOT, put it first, since that is how the patterns are
479 coded. */
481 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
483 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
484 enum machine_mode op_mode;
486 op_mode = GET_MODE (in1);
487 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
489 op_mode = GET_MODE (in2);
490 if (op_mode == VOIDmode)
491 op_mode = mode;
492 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
494 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
496 rtx tem = in2;
497 in2 = in1; in1 = tem;
500 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
501 mode, in1, in2);
503 break;
505 case NEG:
506 /* (neg (neg X)) == X. */
507 if (GET_CODE (op) == NEG)
508 return XEXP (op, 0);
510 /* (neg (plus X 1)) can become (not X). */
511 if (GET_CODE (op) == PLUS
512 && XEXP (op, 1) == const1_rtx)
513 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
515 /* Similarly, (neg (not X)) is (plus X 1). */
516 if (GET_CODE (op) == NOT)
517 return plus_constant (XEXP (op, 0), 1);
519 /* (neg (minus X Y)) can become (minus Y X). This transformation
520 isn't safe for modes with signed zeros, since if X and Y are
521 both +0, (minus Y X) is the same as (minus X Y). If the
522 rounding mode is towards +infinity (or -infinity) then the two
523 expressions will be rounded differently. */
524 if (GET_CODE (op) == MINUS
525 && !HONOR_SIGNED_ZEROS (mode)
526 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
527 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
529 if (GET_CODE (op) == PLUS
530 && !HONOR_SIGNED_ZEROS (mode)
531 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
533 /* (neg (plus A C)) is simplified to (minus -C A). */
534 if (GET_CODE (XEXP (op, 1)) == CONST_INT
535 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
537 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
538 if (temp)
539 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
542 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
543 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
544 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
547 /* (neg (mult A B)) becomes (mult (neg A) B).
548 This works even for floating-point values. */
549 if (GET_CODE (op) == MULT
550 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
552 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
553 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
556 /* NEG commutes with ASHIFT since it is multiplication. Only do
557 this if we can then eliminate the NEG (e.g., if the operand
558 is a constant). */
559 if (GET_CODE (op) == ASHIFT)
561 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
562 if (temp)
563 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
566 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
567 C is equal to the width of MODE minus 1. */
568 if (GET_CODE (op) == ASHIFTRT
569 && GET_CODE (XEXP (op, 1)) == CONST_INT
570 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
571 return simplify_gen_binary (LSHIFTRT, mode,
572 XEXP (op, 0), XEXP (op, 1));
574 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
575 C is equal to the width of MODE minus 1. */
576 if (GET_CODE (op) == LSHIFTRT
577 && GET_CODE (XEXP (op, 1)) == CONST_INT
578 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
579 return simplify_gen_binary (ASHIFTRT, mode,
580 XEXP (op, 0), XEXP (op, 1));
582 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
583 if (GET_CODE (op) == XOR
584 && XEXP (op, 1) == const1_rtx
585 && nonzero_bits (XEXP (op, 0), mode) == 1)
586 return plus_constant (XEXP (op, 0), -1);
588 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
589 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
590 if (GET_CODE (op) == LT
591 && XEXP (op, 1) == const0_rtx)
593 enum machine_mode inner = GET_MODE (XEXP (op, 0));
594 int isize = GET_MODE_BITSIZE (inner);
595 if (STORE_FLAG_VALUE == 1)
597 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
598 GEN_INT (isize - 1));
599 if (mode == inner)
600 return temp;
601 if (GET_MODE_BITSIZE (mode) > isize)
602 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
603 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
605 else if (STORE_FLAG_VALUE == -1)
607 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
608 GEN_INT (isize - 1));
609 if (mode == inner)
610 return temp;
611 if (GET_MODE_BITSIZE (mode) > isize)
612 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
613 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
616 break;
618 case TRUNCATE:
619 /* We can't handle truncation to a partial integer mode here
620 because we don't know the real bitsize of the partial
621 integer mode. */
622 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
623 break;
625 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
626 if ((GET_CODE (op) == SIGN_EXTEND
627 || GET_CODE (op) == ZERO_EXTEND)
628 && GET_MODE (XEXP (op, 0)) == mode)
629 return XEXP (op, 0);
631 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
632 (OP:SI foo:SI) if OP is NEG or ABS. */
633 if ((GET_CODE (op) == ABS
634 || GET_CODE (op) == NEG)
635 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
636 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
637 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
638 return simplify_gen_unary (GET_CODE (op), mode,
639 XEXP (XEXP (op, 0), 0), mode);
641 /* (truncate:A (subreg:B (truncate:C X) 0)) is
642 (truncate:A X). */
643 if (GET_CODE (op) == SUBREG
644 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
645 && subreg_lowpart_p (op))
646 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
647 GET_MODE (XEXP (SUBREG_REG (op), 0)));
649 /* If we know that the value is already truncated, we can
650 replace the TRUNCATE with a SUBREG. Note that this is also
651 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
652 modes we just have to apply a different definition for
653 truncation. But don't do this for an (LSHIFTRT (MULT ...))
654 since this will cause problems with the umulXi3_highpart
655 patterns. */
656 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
657 GET_MODE_BITSIZE (GET_MODE (op)))
658 ? (num_sign_bit_copies (op, GET_MODE (op))
659 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
660 - GET_MODE_BITSIZE (mode)))
661 : truncated_to_mode (mode, op))
662 && ! (GET_CODE (op) == LSHIFTRT
663 && GET_CODE (XEXP (op, 0)) == MULT))
664 return rtl_hooks.gen_lowpart_no_emit (mode, op);
666 /* A truncate of a comparison can be replaced with a subreg if
667 STORE_FLAG_VALUE permits. This is like the previous test,
668 but it works even if the comparison is done in a mode larger
669 than HOST_BITS_PER_WIDE_INT. */
670 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
671 && COMPARISON_P (op)
672 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
673 return rtl_hooks.gen_lowpart_no_emit (mode, op);
674 break;
676 case FLOAT_TRUNCATE:
677 if (DECIMAL_FLOAT_MODE_P (mode))
678 break;
680 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
681 if (GET_CODE (op) == FLOAT_EXTEND
682 && GET_MODE (XEXP (op, 0)) == mode)
683 return XEXP (op, 0);
685 /* (float_truncate:SF (float_truncate:DF foo:XF))
686 = (float_truncate:SF foo:XF).
687 This may eliminate double rounding, so it is unsafe.
689 (float_truncate:SF (float_extend:XF foo:DF))
690 = (float_truncate:SF foo:DF).
692 (float_truncate:DF (float_extend:XF foo:SF))
693 = (float_extend:SF foo:DF). */
694 if ((GET_CODE (op) == FLOAT_TRUNCATE
695 && flag_unsafe_math_optimizations)
696 || GET_CODE (op) == FLOAT_EXTEND)
697 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
698 0)))
699 > GET_MODE_SIZE (mode)
700 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
701 mode,
702 XEXP (op, 0), mode);
704 /* (float_truncate (float x)) is (float x) */
705 if (GET_CODE (op) == FLOAT
706 && (flag_unsafe_math_optimizations
707 || ((unsigned)significand_size (GET_MODE (op))
708 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
709 - num_sign_bit_copies (XEXP (op, 0),
710 GET_MODE (XEXP (op, 0)))))))
711 return simplify_gen_unary (FLOAT, mode,
712 XEXP (op, 0),
713 GET_MODE (XEXP (op, 0)));
715 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
716 (OP:SF foo:SF) if OP is NEG or ABS. */
717 if ((GET_CODE (op) == ABS
718 || GET_CODE (op) == NEG)
719 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
720 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
721 return simplify_gen_unary (GET_CODE (op), mode,
722 XEXP (XEXP (op, 0), 0), mode);
724 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
725 is (float_truncate:SF x). */
726 if (GET_CODE (op) == SUBREG
727 && subreg_lowpart_p (op)
728 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
729 return SUBREG_REG (op);
730 break;
732 case FLOAT_EXTEND:
733 if (DECIMAL_FLOAT_MODE_P (mode))
734 break;
736 /* (float_extend (float_extend x)) is (float_extend x)
738 (float_extend (float x)) is (float x) assuming that double
739 rounding can't happen.
741 if (GET_CODE (op) == FLOAT_EXTEND
742 || (GET_CODE (op) == FLOAT
743 && ((unsigned)significand_size (GET_MODE (op))
744 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
745 - num_sign_bit_copies (XEXP (op, 0),
746 GET_MODE (XEXP (op, 0)))))))
747 return simplify_gen_unary (GET_CODE (op), mode,
748 XEXP (op, 0),
749 GET_MODE (XEXP (op, 0)));
751 break;
753 case ABS:
754 /* (abs (neg <foo>)) -> (abs <foo>) */
755 if (GET_CODE (op) == NEG)
756 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
757 GET_MODE (XEXP (op, 0)));
759 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
760 do nothing. */
761 if (GET_MODE (op) == VOIDmode)
762 break;
764 /* If operand is something known to be positive, ignore the ABS. */
765 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
766 || ((GET_MODE_BITSIZE (GET_MODE (op))
767 <= HOST_BITS_PER_WIDE_INT)
768 && ((nonzero_bits (op, GET_MODE (op))
769 & ((HOST_WIDE_INT) 1
770 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
771 == 0)))
772 return op;
774 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
775 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
776 return gen_rtx_NEG (mode, op);
778 break;
780 case FFS:
781 /* (ffs (*_extend <X>)) = (ffs <X>) */
782 if (GET_CODE (op) == SIGN_EXTEND
783 || GET_CODE (op) == ZERO_EXTEND)
784 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
785 GET_MODE (XEXP (op, 0)));
786 break;
788 case POPCOUNT:
789 case PARITY:
790 /* (pop* (zero_extend <X>)) = (pop* <X>) */
791 if (GET_CODE (op) == ZERO_EXTEND)
792 return simplify_gen_unary (code, mode, XEXP (op, 0),
793 GET_MODE (XEXP (op, 0)));
794 break;
796 case FLOAT:
797 /* (float (sign_extend <X>)) = (float <X>). */
798 if (GET_CODE (op) == SIGN_EXTEND)
799 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
800 GET_MODE (XEXP (op, 0)));
801 break;
803 case SIGN_EXTEND:
804 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
805 becomes just the MINUS if its mode is MODE. This allows
806 folding switch statements on machines using casesi (such as
807 the VAX). */
808 if (GET_CODE (op) == TRUNCATE
809 && GET_MODE (XEXP (op, 0)) == mode
810 && GET_CODE (XEXP (op, 0)) == MINUS
811 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
812 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
813 return XEXP (op, 0);
815 /* Check for a sign extension of a subreg of a promoted
816 variable, where the promotion is sign-extended, and the
817 target mode is the same as the variable's promotion. */
818 if (GET_CODE (op) == SUBREG
819 && SUBREG_PROMOTED_VAR_P (op)
820 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
821 && GET_MODE (XEXP (op, 0)) == mode)
822 return XEXP (op, 0);
824 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
825 if (! POINTERS_EXTEND_UNSIGNED
826 && mode == Pmode && GET_MODE (op) == ptr_mode
827 && (CONSTANT_P (op)
828 || (GET_CODE (op) == SUBREG
829 && REG_P (SUBREG_REG (op))
830 && REG_POINTER (SUBREG_REG (op))
831 && GET_MODE (SUBREG_REG (op)) == Pmode)))
832 return convert_memory_address (Pmode, op);
833 #endif
834 break;
836 case ZERO_EXTEND:
837 /* Check for a zero extension of a subreg of a promoted
838 variable, where the promotion is zero-extended, and the
839 target mode is the same as the variable's promotion. */
840 if (GET_CODE (op) == SUBREG
841 && SUBREG_PROMOTED_VAR_P (op)
842 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
843 && GET_MODE (XEXP (op, 0)) == mode)
844 return XEXP (op, 0);
846 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
847 if (POINTERS_EXTEND_UNSIGNED > 0
848 && mode == Pmode && GET_MODE (op) == ptr_mode
849 && (CONSTANT_P (op)
850 || (GET_CODE (op) == SUBREG
851 && REG_P (SUBREG_REG (op))
852 && REG_POINTER (SUBREG_REG (op))
853 && GET_MODE (SUBREG_REG (op)) == Pmode)))
854 return convert_memory_address (Pmode, op);
855 #endif
856 break;
858 default:
859 break;
862 return 0;
865 /* Try to compute the value of a unary operation CODE whose output mode is to
866 be MODE with input operand OP whose mode was originally OP_MODE.
867 Return zero if the value cannot be computed. */
869 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
870 rtx op, enum machine_mode op_mode)
872 unsigned int width = GET_MODE_BITSIZE (mode);
874 if (code == VEC_DUPLICATE)
876 gcc_assert (VECTOR_MODE_P (mode));
877 if (GET_MODE (op) != VOIDmode)
879 if (!VECTOR_MODE_P (GET_MODE (op)))
880 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
881 else
882 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
883 (GET_MODE (op)));
885 if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
886 || GET_CODE (op) == CONST_VECTOR)
888 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
889 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
890 rtvec v = rtvec_alloc (n_elts);
891 unsigned int i;
893 if (GET_CODE (op) != CONST_VECTOR)
894 for (i = 0; i < n_elts; i++)
895 RTVEC_ELT (v, i) = op;
896 else
898 enum machine_mode inmode = GET_MODE (op);
899 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
900 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
902 gcc_assert (in_n_elts < n_elts);
903 gcc_assert ((n_elts % in_n_elts) == 0);
904 for (i = 0; i < n_elts; i++)
905 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
907 return gen_rtx_CONST_VECTOR (mode, v);
911 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
913 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
914 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
915 enum machine_mode opmode = GET_MODE (op);
916 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
917 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
918 rtvec v = rtvec_alloc (n_elts);
919 unsigned int i;
921 gcc_assert (op_n_elts == n_elts);
922 for (i = 0; i < n_elts; i++)
924 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
925 CONST_VECTOR_ELT (op, i),
926 GET_MODE_INNER (opmode));
927 if (!x)
928 return 0;
929 RTVEC_ELT (v, i) = x;
931 return gen_rtx_CONST_VECTOR (mode, v);
934 /* The order of these tests is critical so that, for example, we don't
935 check the wrong mode (input vs. output) for a conversion operation,
936 such as FIX. At some point, this should be simplified. */
938 if (code == FLOAT && GET_MODE (op) == VOIDmode
939 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
941 HOST_WIDE_INT hv, lv;
942 REAL_VALUE_TYPE d;
944 if (GET_CODE (op) == CONST_INT)
945 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
946 else
947 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
949 REAL_VALUE_FROM_INT (d, lv, hv, mode);
950 d = real_value_truncate (mode, d);
951 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
953 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
954 && (GET_CODE (op) == CONST_DOUBLE
955 || GET_CODE (op) == CONST_INT))
957 HOST_WIDE_INT hv, lv;
958 REAL_VALUE_TYPE d;
960 if (GET_CODE (op) == CONST_INT)
961 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
962 else
963 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
965 if (op_mode == VOIDmode)
967 /* We don't know how to interpret negative-looking numbers in
968 this case, so don't try to fold those. */
969 if (hv < 0)
970 return 0;
972 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
974 else
975 hv = 0, lv &= GET_MODE_MASK (op_mode);
977 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
978 d = real_value_truncate (mode, d);
979 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
982 if (GET_CODE (op) == CONST_INT
983 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
985 HOST_WIDE_INT arg0 = INTVAL (op);
986 HOST_WIDE_INT val;
988 switch (code)
990 case NOT:
991 val = ~ arg0;
992 break;
994 case NEG:
995 val = - arg0;
996 break;
998 case ABS:
999 val = (arg0 >= 0 ? arg0 : - arg0);
1000 break;
1002 case FFS:
1003 /* Don't use ffs here. Instead, get low order bit and then its
1004 number. If arg0 is zero, this will return 0, as desired. */
1005 arg0 &= GET_MODE_MASK (mode);
1006 val = exact_log2 (arg0 & (- arg0)) + 1;
1007 break;
1009 case CLZ:
1010 arg0 &= GET_MODE_MASK (mode);
1011 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1013 else
1014 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1015 break;
1017 case CTZ:
1018 arg0 &= GET_MODE_MASK (mode);
1019 if (arg0 == 0)
1021 /* Even if the value at zero is undefined, we have to come
1022 up with some replacement. Seems good enough. */
1023 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1024 val = GET_MODE_BITSIZE (mode);
1026 else
1027 val = exact_log2 (arg0 & -arg0);
1028 break;
1030 case POPCOUNT:
1031 arg0 &= GET_MODE_MASK (mode);
1032 val = 0;
1033 while (arg0)
1034 val++, arg0 &= arg0 - 1;
1035 break;
1037 case PARITY:
1038 arg0 &= GET_MODE_MASK (mode);
1039 val = 0;
1040 while (arg0)
1041 val++, arg0 &= arg0 - 1;
1042 val &= 1;
1043 break;
1045 case TRUNCATE:
1046 val = arg0;
1047 break;
1049 case ZERO_EXTEND:
1050 /* When zero-extending a CONST_INT, we need to know its
1051 original mode. */
1052 gcc_assert (op_mode != VOIDmode);
1053 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1055 /* If we were really extending the mode,
1056 we would have to distinguish between zero-extension
1057 and sign-extension. */
1058 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1059 val = arg0;
1061 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1062 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1063 else
1064 return 0;
1065 break;
1067 case SIGN_EXTEND:
1068 if (op_mode == VOIDmode)
1069 op_mode = mode;
1070 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1072 /* If we were really extending the mode,
1073 we would have to distinguish between zero-extension
1074 and sign-extension. */
1075 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1076 val = arg0;
1078 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1081 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1082 if (val
1083 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1084 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1086 else
1087 return 0;
1088 break;
1090 case SQRT:
1091 case FLOAT_EXTEND:
1092 case FLOAT_TRUNCATE:
1093 case SS_TRUNCATE:
1094 case US_TRUNCATE:
1095 case SS_NEG:
1096 return 0;
1098 default:
1099 gcc_unreachable ();
1102 return gen_int_mode (val, mode);
1105 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1106 for a DImode operation on a CONST_INT. */
1107 else if (GET_MODE (op) == VOIDmode
1108 && width <= HOST_BITS_PER_WIDE_INT * 2
1109 && (GET_CODE (op) == CONST_DOUBLE
1110 || GET_CODE (op) == CONST_INT))
1112 unsigned HOST_WIDE_INT l1, lv;
1113 HOST_WIDE_INT h1, hv;
1115 if (GET_CODE (op) == CONST_DOUBLE)
1116 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1117 else
1118 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1120 switch (code)
1122 case NOT:
1123 lv = ~ l1;
1124 hv = ~ h1;
1125 break;
1127 case NEG:
1128 neg_double (l1, h1, &lv, &hv);
1129 break;
1131 case ABS:
1132 if (h1 < 0)
1133 neg_double (l1, h1, &lv, &hv);
1134 else
1135 lv = l1, hv = h1;
1136 break;
1138 case FFS:
1139 hv = 0;
1140 if (l1 == 0)
1142 if (h1 == 0)
1143 lv = 0;
1144 else
1145 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1147 else
1148 lv = exact_log2 (l1 & -l1) + 1;
1149 break;
1151 case CLZ:
1152 hv = 0;
1153 if (h1 != 0)
1154 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1155 - HOST_BITS_PER_WIDE_INT;
1156 else if (l1 != 0)
1157 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1158 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1159 lv = GET_MODE_BITSIZE (mode);
1160 break;
1162 case CTZ:
1163 hv = 0;
1164 if (l1 != 0)
1165 lv = exact_log2 (l1 & -l1);
1166 else if (h1 != 0)
1167 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1168 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1169 lv = GET_MODE_BITSIZE (mode);
1170 break;
1172 case POPCOUNT:
1173 hv = 0;
1174 lv = 0;
1175 while (l1)
1176 lv++, l1 &= l1 - 1;
1177 while (h1)
1178 lv++, h1 &= h1 - 1;
1179 break;
1181 case PARITY:
1182 hv = 0;
1183 lv = 0;
1184 while (l1)
1185 lv++, l1 &= l1 - 1;
1186 while (h1)
1187 lv++, h1 &= h1 - 1;
1188 lv &= 1;
1189 break;
1191 case TRUNCATE:
1192 /* This is just a change-of-mode, so do nothing. */
1193 lv = l1, hv = h1;
1194 break;
1196 case ZERO_EXTEND:
1197 gcc_assert (op_mode != VOIDmode);
1199 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1200 return 0;
1202 hv = 0;
1203 lv = l1 & GET_MODE_MASK (op_mode);
1204 break;
1206 case SIGN_EXTEND:
1207 if (op_mode == VOIDmode
1208 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1209 return 0;
1210 else
1212 lv = l1 & GET_MODE_MASK (op_mode);
1213 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1214 && (lv & ((HOST_WIDE_INT) 1
1215 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1216 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1218 hv = HWI_SIGN_EXTEND (lv);
1220 break;
1222 case SQRT:
1223 return 0;
1225 default:
1226 return 0;
1229 return immed_double_const (lv, hv, mode);
1232 else if (GET_CODE (op) == CONST_DOUBLE
1233 && SCALAR_FLOAT_MODE_P (mode))
1235 REAL_VALUE_TYPE d, t;
1236 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1238 switch (code)
1240 case SQRT:
1241 if (HONOR_SNANS (mode) && real_isnan (&d))
1242 return 0;
1243 real_sqrt (&t, mode, &d);
1244 d = t;
1245 break;
1246 case ABS:
1247 d = REAL_VALUE_ABS (d);
1248 break;
1249 case NEG:
1250 d = REAL_VALUE_NEGATE (d);
1251 break;
1252 case FLOAT_TRUNCATE:
1253 d = real_value_truncate (mode, d);
1254 break;
1255 case FLOAT_EXTEND:
1256 /* All this does is change the mode. */
1257 break;
1258 case FIX:
1259 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1260 break;
1261 case NOT:
1263 long tmp[4];
1264 int i;
1266 real_to_target (tmp, &d, GET_MODE (op));
1267 for (i = 0; i < 4; i++)
1268 tmp[i] = ~tmp[i];
1269 real_from_target (&d, tmp, mode);
1270 break;
1272 default:
1273 gcc_unreachable ();
1275 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1278 else if (GET_CODE (op) == CONST_DOUBLE
1279 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1280 && GET_MODE_CLASS (mode) == MODE_INT
1281 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1283 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1284 operators are intentionally left unspecified (to ease implementation
1285 by target backends), for consistency, this routine implements the
1286 same semantics for constant folding as used by the middle-end. */
1288 /* This was formerly used only for non-IEEE float.
1289 eggert@twinsun.com says it is safe for IEEE also. */
1290 HOST_WIDE_INT xh, xl, th, tl;
1291 REAL_VALUE_TYPE x, t;
1292 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1293 switch (code)
1295 case FIX:
1296 if (REAL_VALUE_ISNAN (x))
1297 return const0_rtx;
1299 /* Test against the signed upper bound. */
1300 if (width > HOST_BITS_PER_WIDE_INT)
1302 th = ((unsigned HOST_WIDE_INT) 1
1303 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1304 tl = -1;
1306 else
1308 th = 0;
1309 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1311 real_from_integer (&t, VOIDmode, tl, th, 0);
1312 if (REAL_VALUES_LESS (t, x))
1314 xh = th;
1315 xl = tl;
1316 break;
1319 /* Test against the signed lower bound. */
1320 if (width > HOST_BITS_PER_WIDE_INT)
1322 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1323 tl = 0;
1325 else
1327 th = -1;
1328 tl = (HOST_WIDE_INT) -1 << (width - 1);
1330 real_from_integer (&t, VOIDmode, tl, th, 0);
1331 if (REAL_VALUES_LESS (x, t))
1333 xh = th;
1334 xl = tl;
1335 break;
1337 REAL_VALUE_TO_INT (&xl, &xh, x);
1338 break;
1340 case UNSIGNED_FIX:
1341 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1342 return const0_rtx;
1344 /* Test against the unsigned upper bound. */
1345 if (width == 2*HOST_BITS_PER_WIDE_INT)
1347 th = -1;
1348 tl = -1;
1350 else if (width >= HOST_BITS_PER_WIDE_INT)
1352 th = ((unsigned HOST_WIDE_INT) 1
1353 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1354 tl = -1;
1356 else
1358 th = 0;
1359 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1361 real_from_integer (&t, VOIDmode, tl, th, 1);
1362 if (REAL_VALUES_LESS (t, x))
1364 xh = th;
1365 xl = tl;
1366 break;
1369 REAL_VALUE_TO_INT (&xl, &xh, x);
1370 break;
1372 default:
1373 gcc_unreachable ();
1375 return immed_double_const (xl, xh, mode);
1378 return NULL_RTX;
1381 /* Subroutine of simplify_binary_operation to simplify a commutative,
1382 associative binary operation CODE with result mode MODE, operating
1383 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1384 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1385 canonicalization is possible. */
1387 static rtx
1388 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1389 rtx op0, rtx op1)
1391 rtx tem;
1393 /* Linearize the operator to the left. */
1394 if (GET_CODE (op1) == code)
1396 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1397 if (GET_CODE (op0) == code)
1399 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1400 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1403 /* "a op (b op c)" becomes "(b op c) op a". */
1404 if (! swap_commutative_operands_p (op1, op0))
1405 return simplify_gen_binary (code, mode, op1, op0);
1407 tem = op0;
1408 op0 = op1;
1409 op1 = tem;
1412 if (GET_CODE (op0) == code)
1414 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1415 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1417 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1418 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1421 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1422 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1423 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1424 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1425 if (tem != 0)
1426 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1428 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1429 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1430 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1431 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1432 if (tem != 0)
1433 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1436 return 0;
1440 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1441 and OP1. Return 0 if no simplification is possible.
1443 Don't use this for relational operations such as EQ or LT.
1444 Use simplify_relational_operation instead. */
1446 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1447 rtx op0, rtx op1)
1449 rtx trueop0, trueop1;
1450 rtx tem;
1452 /* Relational operations don't work here. We must know the mode
1453 of the operands in order to do the comparison correctly.
1454 Assuming a full word can give incorrect results.
1455 Consider comparing 128 with -128 in QImode. */
1456 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1457 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1459 /* Make sure the constant is second. */
1460 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1461 && swap_commutative_operands_p (op0, op1))
1463 tem = op0, op0 = op1, op1 = tem;
1466 trueop0 = avoid_constant_pool_reference (op0);
1467 trueop1 = avoid_constant_pool_reference (op1);
1469 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1470 if (tem)
1471 return tem;
1472 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1475 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1476 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1477 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1478 actual constants. */
1480 static rtx
1481 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1482 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1484 rtx tem, reversed, opleft, opright;
1485 HOST_WIDE_INT val;
1486 unsigned int width = GET_MODE_BITSIZE (mode);
1488 /* Even if we can't compute a constant result,
1489 there are some cases worth simplifying. */
1491 switch (code)
1493 case PLUS:
1494 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1495 when x is NaN, infinite, or finite and nonzero. They aren't
1496 when x is -0 and the rounding mode is not towards -infinity,
1497 since (-0) + 0 is then 0. */
1498 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1499 return op0;
1501 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1502 transformations are safe even for IEEE. */
1503 if (GET_CODE (op0) == NEG)
1504 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1505 else if (GET_CODE (op1) == NEG)
1506 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1508 /* (~a) + 1 -> -a */
1509 if (INTEGRAL_MODE_P (mode)
1510 && GET_CODE (op0) == NOT
1511 && trueop1 == const1_rtx)
1512 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1514 /* Handle both-operands-constant cases. We can only add
1515 CONST_INTs to constants since the sum of relocatable symbols
1516 can't be handled by most assemblers. Don't add CONST_INT
1517 to CONST_INT since overflow won't be computed properly if wider
1518 than HOST_BITS_PER_WIDE_INT. */
1520 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1521 && GET_CODE (op1) == CONST_INT)
1522 return plus_constant (op0, INTVAL (op1));
1523 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1524 && GET_CODE (op0) == CONST_INT)
1525 return plus_constant (op1, INTVAL (op0));
1527 /* See if this is something like X * C - X or vice versa or
1528 if the multiplication is written as a shift. If so, we can
1529 distribute and make a new multiply, shift, or maybe just
1530 have X (if C is 2 in the example above). But don't make
1531 something more expensive than we had before. */
1533 if (SCALAR_INT_MODE_P (mode))
1535 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1536 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1537 rtx lhs = op0, rhs = op1;
1539 if (GET_CODE (lhs) == NEG)
1541 coeff0l = -1;
1542 coeff0h = -1;
1543 lhs = XEXP (lhs, 0);
1545 else if (GET_CODE (lhs) == MULT
1546 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1548 coeff0l = INTVAL (XEXP (lhs, 1));
1549 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1550 lhs = XEXP (lhs, 0);
1552 else if (GET_CODE (lhs) == ASHIFT
1553 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1554 && INTVAL (XEXP (lhs, 1)) >= 0
1555 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1557 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1558 coeff0h = 0;
1559 lhs = XEXP (lhs, 0);
1562 if (GET_CODE (rhs) == NEG)
1564 coeff1l = -1;
1565 coeff1h = -1;
1566 rhs = XEXP (rhs, 0);
1568 else if (GET_CODE (rhs) == MULT
1569 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1571 coeff1l = INTVAL (XEXP (rhs, 1));
1572 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1573 rhs = XEXP (rhs, 0);
1575 else if (GET_CODE (rhs) == ASHIFT
1576 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1577 && INTVAL (XEXP (rhs, 1)) >= 0
1578 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1580 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1581 coeff1h = 0;
1582 rhs = XEXP (rhs, 0);
1585 if (rtx_equal_p (lhs, rhs))
1587 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1588 rtx coeff;
1589 unsigned HOST_WIDE_INT l;
1590 HOST_WIDE_INT h;
1592 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1593 coeff = immed_double_const (l, h, mode);
1595 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1596 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1597 ? tem : 0;
1601 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1602 if ((GET_CODE (op1) == CONST_INT
1603 || GET_CODE (op1) == CONST_DOUBLE)
1604 && GET_CODE (op0) == XOR
1605 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1606 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1607 && mode_signbit_p (mode, op1))
1608 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1609 simplify_gen_binary (XOR, mode, op1,
1610 XEXP (op0, 1)));
1612 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1613 if (GET_CODE (op0) == MULT
1614 && GET_CODE (XEXP (op0, 0)) == NEG)
1616 rtx in1, in2;
1618 in1 = XEXP (XEXP (op0, 0), 0);
1619 in2 = XEXP (op0, 1);
1620 return simplify_gen_binary (MINUS, mode, op1,
1621 simplify_gen_binary (MULT, mode,
1622 in1, in2));
1625 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1626 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1627 is 1. */
1628 if (COMPARISON_P (op0)
1629 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1630 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1631 && (reversed = reversed_comparison (op0, mode)))
1632 return
1633 simplify_gen_unary (NEG, mode, reversed, mode);
1635 /* If one of the operands is a PLUS or a MINUS, see if we can
1636 simplify this by the associative law.
1637 Don't use the associative law for floating point.
1638 The inaccuracy makes it nonassociative,
1639 and subtle programs can break if operations are associated. */
1641 if (INTEGRAL_MODE_P (mode)
1642 && (plus_minus_operand_p (op0)
1643 || plus_minus_operand_p (op1))
1644 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1645 return tem;
1647 /* Reassociate floating point addition only when the user
1648 specifies unsafe math optimizations. */
1649 if (FLOAT_MODE_P (mode)
1650 && flag_unsafe_math_optimizations)
1652 tem = simplify_associative_operation (code, mode, op0, op1);
1653 if (tem)
1654 return tem;
1656 break;
1658 case COMPARE:
1659 #ifdef HAVE_cc0
1660 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1661 using cc0, in which case we want to leave it as a COMPARE
1662 so we can distinguish it from a register-register-copy.
1664 In IEEE floating point, x-0 is not the same as x. */
1666 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1667 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1668 && trueop1 == CONST0_RTX (mode))
1669 return op0;
1670 #endif
1672 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1673 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1674 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1675 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1677 rtx xop00 = XEXP (op0, 0);
1678 rtx xop10 = XEXP (op1, 0);
1680 #ifdef HAVE_cc0
1681 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1682 #else
1683 if (REG_P (xop00) && REG_P (xop10)
1684 && GET_MODE (xop00) == GET_MODE (xop10)
1685 && REGNO (xop00) == REGNO (xop10)
1686 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1687 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1688 #endif
1689 return xop00;
1691 break;
1693 case MINUS:
1694 /* We can't assume x-x is 0 even with non-IEEE floating point,
1695 but since it is zero except in very strange circumstances, we
1696 will treat it as zero with -funsafe-math-optimizations. */
1697 if (rtx_equal_p (trueop0, trueop1)
1698 && ! side_effects_p (op0)
1699 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1700 return CONST0_RTX (mode);
1702 /* Change subtraction from zero into negation. (0 - x) is the
1703 same as -x when x is NaN, infinite, or finite and nonzero.
1704 But if the mode has signed zeros, and does not round towards
1705 -infinity, then 0 - 0 is 0, not -0. */
1706 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1707 return simplify_gen_unary (NEG, mode, op1, mode);
1709 /* (-1 - a) is ~a. */
1710 if (trueop0 == constm1_rtx)
1711 return simplify_gen_unary (NOT, mode, op1, mode);
1713 /* Subtracting 0 has no effect unless the mode has signed zeros
1714 and supports rounding towards -infinity. In such a case,
1715 0 - 0 is -0. */
1716 if (!(HONOR_SIGNED_ZEROS (mode)
1717 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1718 && trueop1 == CONST0_RTX (mode))
1719 return op0;
1721 /* See if this is something like X * C - X or vice versa or
1722 if the multiplication is written as a shift. If so, we can
1723 distribute and make a new multiply, shift, or maybe just
1724 have X (if C is 2 in the example above). But don't make
1725 something more expensive than we had before. */
1727 if (SCALAR_INT_MODE_P (mode))
1729 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1730 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1731 rtx lhs = op0, rhs = op1;
1733 if (GET_CODE (lhs) == NEG)
1735 coeff0l = -1;
1736 coeff0h = -1;
1737 lhs = XEXP (lhs, 0);
1739 else if (GET_CODE (lhs) == MULT
1740 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1742 coeff0l = INTVAL (XEXP (lhs, 1));
1743 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1744 lhs = XEXP (lhs, 0);
1746 else if (GET_CODE (lhs) == ASHIFT
1747 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1748 && INTVAL (XEXP (lhs, 1)) >= 0
1749 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1751 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1752 coeff0h = 0;
1753 lhs = XEXP (lhs, 0);
1756 if (GET_CODE (rhs) == NEG)
1758 negcoeff1l = 1;
1759 negcoeff1h = 0;
1760 rhs = XEXP (rhs, 0);
1762 else if (GET_CODE (rhs) == MULT
1763 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1765 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1766 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1767 rhs = XEXP (rhs, 0);
1769 else if (GET_CODE (rhs) == ASHIFT
1770 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1771 && INTVAL (XEXP (rhs, 1)) >= 0
1772 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1774 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1775 negcoeff1h = -1;
1776 rhs = XEXP (rhs, 0);
1779 if (rtx_equal_p (lhs, rhs))
1781 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1782 rtx coeff;
1783 unsigned HOST_WIDE_INT l;
1784 HOST_WIDE_INT h;
1786 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1787 coeff = immed_double_const (l, h, mode);
1789 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1790 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1791 ? tem : 0;
1795 /* (a - (-b)) -> (a + b). True even for IEEE. */
1796 if (GET_CODE (op1) == NEG)
1797 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1799 /* (-x - c) may be simplified as (-c - x). */
1800 if (GET_CODE (op0) == NEG
1801 && (GET_CODE (op1) == CONST_INT
1802 || GET_CODE (op1) == CONST_DOUBLE))
1804 tem = simplify_unary_operation (NEG, mode, op1, mode);
1805 if (tem)
1806 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1809 /* Don't let a relocatable value get a negative coeff. */
1810 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1811 return simplify_gen_binary (PLUS, mode,
1812 op0,
1813 neg_const_int (mode, op1));
1815 /* (x - (x & y)) -> (x & ~y) */
1816 if (GET_CODE (op1) == AND)
1818 if (rtx_equal_p (op0, XEXP (op1, 0)))
1820 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1821 GET_MODE (XEXP (op1, 1)));
1822 return simplify_gen_binary (AND, mode, op0, tem);
1824 if (rtx_equal_p (op0, XEXP (op1, 1)))
1826 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1827 GET_MODE (XEXP (op1, 0)));
1828 return simplify_gen_binary (AND, mode, op0, tem);
1832 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1833 by reversing the comparison code if valid. */
1834 if (STORE_FLAG_VALUE == 1
1835 && trueop0 == const1_rtx
1836 && COMPARISON_P (op1)
1837 && (reversed = reversed_comparison (op1, mode)))
1838 return reversed;
1840 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
1841 if (GET_CODE (op1) == MULT
1842 && GET_CODE (XEXP (op1, 0)) == NEG)
1844 rtx in1, in2;
1846 in1 = XEXP (XEXP (op1, 0), 0);
1847 in2 = XEXP (op1, 1);
1848 return simplify_gen_binary (PLUS, mode,
1849 simplify_gen_binary (MULT, mode,
1850 in1, in2),
1851 op0);
1854 /* Canonicalize (minus (neg A) (mult B C)) to
1855 (minus (mult (neg B) C) A). */
1856 if (GET_CODE (op1) == MULT
1857 && GET_CODE (op0) == NEG)
1859 rtx in1, in2;
1861 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
1862 in2 = XEXP (op1, 1);
1863 return simplify_gen_binary (MINUS, mode,
1864 simplify_gen_binary (MULT, mode,
1865 in1, in2),
1866 XEXP (op0, 0));
1869 /* If one of the operands is a PLUS or a MINUS, see if we can
1870 simplify this by the associative law. This will, for example,
1871 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1872 Don't use the associative law for floating point.
1873 The inaccuracy makes it nonassociative,
1874 and subtle programs can break if operations are associated. */
1876 if (INTEGRAL_MODE_P (mode)
1877 && (plus_minus_operand_p (op0)
1878 || plus_minus_operand_p (op1))
1879 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1880 return tem;
1881 break;
1883 case MULT:
1884 if (trueop1 == constm1_rtx)
1885 return simplify_gen_unary (NEG, mode, op0, mode);
1887 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1888 x is NaN, since x * 0 is then also NaN. Nor is it valid
1889 when the mode has signed zeros, since multiplying a negative
1890 number by 0 will give -0, not 0. */
1891 if (!HONOR_NANS (mode)
1892 && !HONOR_SIGNED_ZEROS (mode)
1893 && trueop1 == CONST0_RTX (mode)
1894 && ! side_effects_p (op0))
1895 return op1;
1897 /* In IEEE floating point, x*1 is not equivalent to x for
1898 signalling NaNs. */
1899 if (!HONOR_SNANS (mode)
1900 && trueop1 == CONST1_RTX (mode))
1901 return op0;
1903 /* Convert multiply by constant power of two into shift unless
1904 we are still generating RTL. This test is a kludge. */
1905 if (GET_CODE (trueop1) == CONST_INT
1906 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1907 /* If the mode is larger than the host word size, and the
1908 uppermost bit is set, then this isn't a power of two due
1909 to implicit sign extension. */
1910 && (width <= HOST_BITS_PER_WIDE_INT
1911 || val != HOST_BITS_PER_WIDE_INT - 1))
1912 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1914 /* Likewise for multipliers wider than a word. */
1915 if (GET_CODE (trueop1) == CONST_DOUBLE
1916 && (GET_MODE (trueop1) == VOIDmode
1917 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
1918 && GET_MODE (op0) == mode
1919 && CONST_DOUBLE_LOW (trueop1) == 0
1920 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
1921 return simplify_gen_binary (ASHIFT, mode, op0,
1922 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
1924 /* x*2 is x+x and x*(-1) is -x */
1925 if (GET_CODE (trueop1) == CONST_DOUBLE
1926 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
1927 && GET_MODE (op0) == mode)
1929 REAL_VALUE_TYPE d;
1930 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1932 if (REAL_VALUES_EQUAL (d, dconst2))
1933 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1935 if (!HONOR_SNANS (mode)
1936 && REAL_VALUES_EQUAL (d, dconstm1))
1937 return simplify_gen_unary (NEG, mode, op0, mode);
1940 /* Optimize -x * -x as x * x. */
1941 if (FLOAT_MODE_P (mode)
1942 && GET_CODE (op0) == NEG
1943 && GET_CODE (op1) == NEG
1944 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
1945 && !side_effects_p (XEXP (op0, 0)))
1946 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
1948 /* Likewise, optimize abs(x) * abs(x) as x * x. */
1949 if (SCALAR_FLOAT_MODE_P (mode)
1950 && GET_CODE (op0) == ABS
1951 && GET_CODE (op1) == ABS
1952 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
1953 && !side_effects_p (XEXP (op0, 0)))
1954 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
1956 /* Reassociate multiplication, but for floating point MULTs
1957 only when the user specifies unsafe math optimizations. */
1958 if (! FLOAT_MODE_P (mode)
1959 || flag_unsafe_math_optimizations)
1961 tem = simplify_associative_operation (code, mode, op0, op1);
1962 if (tem)
1963 return tem;
1965 break;
1967 case IOR:
1968 if (trueop1 == const0_rtx)
1969 return op0;
1970 if (GET_CODE (trueop1) == CONST_INT
1971 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1972 == GET_MODE_MASK (mode)))
1973 return op1;
1974 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1975 return op0;
1976 /* A | (~A) -> -1 */
1977 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1978 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1979 && ! side_effects_p (op0)
1980 && SCALAR_INT_MODE_P (mode))
1981 return constm1_rtx;
1983 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
1984 if (GET_CODE (op1) == CONST_INT
1985 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1986 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
1987 return op1;
1989 /* Convert (A & B) | A to A. */
1990 if (GET_CODE (op0) == AND
1991 && (rtx_equal_p (XEXP (op0, 0), op1)
1992 || rtx_equal_p (XEXP (op0, 1), op1))
1993 && ! side_effects_p (XEXP (op0, 0))
1994 && ! side_effects_p (XEXP (op0, 1)))
1995 return op1;
1997 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
1998 mode size to (rotate A CX). */
2000 if (GET_CODE (op1) == ASHIFT
2001 || GET_CODE (op1) == SUBREG)
2003 opleft = op1;
2004 opright = op0;
2006 else
2008 opright = op1;
2009 opleft = op0;
2012 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2013 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2014 && GET_CODE (XEXP (opleft, 1)) == CONST_INT
2015 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2016 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2017 == GET_MODE_BITSIZE (mode)))
2018 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2020 /* Same, but for ashift that has been "simplified" to a wider mode
2021 by simplify_shift_const. */
2023 if (GET_CODE (opleft) == SUBREG
2024 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2025 && GET_CODE (opright) == LSHIFTRT
2026 && GET_CODE (XEXP (opright, 0)) == SUBREG
2027 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2028 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2029 && (GET_MODE_SIZE (GET_MODE (opleft))
2030 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2031 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2032 SUBREG_REG (XEXP (opright, 0)))
2033 && GET_CODE (XEXP (SUBREG_REG (opleft), 1)) == CONST_INT
2034 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2035 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2036 == GET_MODE_BITSIZE (mode)))
2037 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2038 XEXP (SUBREG_REG (opleft), 1));
2040 /* If we have (ior (and (X C1) C2)), simplify this by making
2041 C1 as small as possible if C1 actually changes. */
2042 if (GET_CODE (op1) == CONST_INT
2043 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2044 || INTVAL (op1) > 0)
2045 && GET_CODE (op0) == AND
2046 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2047 && GET_CODE (op1) == CONST_INT
2048 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2049 return simplify_gen_binary (IOR, mode,
2050 simplify_gen_binary
2051 (AND, mode, XEXP (op0, 0),
2052 GEN_INT (INTVAL (XEXP (op0, 1))
2053 & ~INTVAL (op1))),
2054 op1);
2056 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2057 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2058 the PLUS does not affect any of the bits in OP1: then we can do
2059 the IOR as a PLUS and we can associate. This is valid if OP1
2060 can be safely shifted left C bits. */
2061 if (GET_CODE (trueop1) == CONST_INT && GET_CODE (op0) == ASHIFTRT
2062 && GET_CODE (XEXP (op0, 0)) == PLUS
2063 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
2064 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2065 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2067 int count = INTVAL (XEXP (op0, 1));
2068 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2070 if (mask >> count == INTVAL (trueop1)
2071 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2072 return simplify_gen_binary (ASHIFTRT, mode,
2073 plus_constant (XEXP (op0, 0), mask),
2074 XEXP (op0, 1));
2077 tem = simplify_associative_operation (code, mode, op0, op1);
2078 if (tem)
2079 return tem;
2080 break;
2082 case XOR:
2083 if (trueop1 == const0_rtx)
2084 return op0;
2085 if (GET_CODE (trueop1) == CONST_INT
2086 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2087 == GET_MODE_MASK (mode)))
2088 return simplify_gen_unary (NOT, mode, op0, mode);
2089 if (rtx_equal_p (trueop0, trueop1)
2090 && ! side_effects_p (op0)
2091 && GET_MODE_CLASS (mode) != MODE_CC)
2092 return CONST0_RTX (mode);
2094 /* Canonicalize XOR of the most significant bit to PLUS. */
2095 if ((GET_CODE (op1) == CONST_INT
2096 || GET_CODE (op1) == CONST_DOUBLE)
2097 && mode_signbit_p (mode, op1))
2098 return simplify_gen_binary (PLUS, mode, op0, op1);
2099 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2100 if ((GET_CODE (op1) == CONST_INT
2101 || GET_CODE (op1) == CONST_DOUBLE)
2102 && GET_CODE (op0) == PLUS
2103 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
2104 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2105 && mode_signbit_p (mode, XEXP (op0, 1)))
2106 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2107 simplify_gen_binary (XOR, mode, op1,
2108 XEXP (op0, 1)));
2110 /* If we are XORing two things that have no bits in common,
2111 convert them into an IOR. This helps to detect rotation encoded
2112 using those methods and possibly other simplifications. */
2114 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2115 && (nonzero_bits (op0, mode)
2116 & nonzero_bits (op1, mode)) == 0)
2117 return (simplify_gen_binary (IOR, mode, op0, op1));
2119 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2120 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2121 (NOT y). */
2123 int num_negated = 0;
2125 if (GET_CODE (op0) == NOT)
2126 num_negated++, op0 = XEXP (op0, 0);
2127 if (GET_CODE (op1) == NOT)
2128 num_negated++, op1 = XEXP (op1, 0);
2130 if (num_negated == 2)
2131 return simplify_gen_binary (XOR, mode, op0, op1);
2132 else if (num_negated == 1)
2133 return simplify_gen_unary (NOT, mode,
2134 simplify_gen_binary (XOR, mode, op0, op1),
2135 mode);
2138 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2139 correspond to a machine insn or result in further simplifications
2140 if B is a constant. */
2142 if (GET_CODE (op0) == AND
2143 && rtx_equal_p (XEXP (op0, 1), op1)
2144 && ! side_effects_p (op1))
2145 return simplify_gen_binary (AND, mode,
2146 simplify_gen_unary (NOT, mode,
2147 XEXP (op0, 0), mode),
2148 op1);
2150 else if (GET_CODE (op0) == AND
2151 && rtx_equal_p (XEXP (op0, 0), op1)
2152 && ! side_effects_p (op1))
2153 return simplify_gen_binary (AND, mode,
2154 simplify_gen_unary (NOT, mode,
2155 XEXP (op0, 1), mode),
2156 op1);
2158 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2159 comparison if STORE_FLAG_VALUE is 1. */
2160 if (STORE_FLAG_VALUE == 1
2161 && trueop1 == const1_rtx
2162 && COMPARISON_P (op0)
2163 && (reversed = reversed_comparison (op0, mode)))
2164 return reversed;
2166 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2167 is (lt foo (const_int 0)), so we can perform the above
2168 simplification if STORE_FLAG_VALUE is 1. */
2170 if (STORE_FLAG_VALUE == 1
2171 && trueop1 == const1_rtx
2172 && GET_CODE (op0) == LSHIFTRT
2173 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2174 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2175 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2177 /* (xor (comparison foo bar) (const_int sign-bit))
2178 when STORE_FLAG_VALUE is the sign bit. */
2179 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2180 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2181 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2182 && trueop1 == const_true_rtx
2183 && COMPARISON_P (op0)
2184 && (reversed = reversed_comparison (op0, mode)))
2185 return reversed;
2187 break;
2189 tem = simplify_associative_operation (code, mode, op0, op1);
2190 if (tem)
2191 return tem;
2192 break;
2194 case AND:
2195 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2196 return trueop1;
2197 /* If we are turning off bits already known off in OP0, we need
2198 not do an AND. */
2199 if (GET_CODE (trueop1) == CONST_INT
2200 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2201 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
2202 return op0;
2203 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2204 && GET_MODE_CLASS (mode) != MODE_CC)
2205 return op0;
2206 /* A & (~A) -> 0 */
2207 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2208 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2209 && ! side_effects_p (op0)
2210 && GET_MODE_CLASS (mode) != MODE_CC)
2211 return CONST0_RTX (mode);
2213 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2214 there are no nonzero bits of C outside of X's mode. */
2215 if ((GET_CODE (op0) == SIGN_EXTEND
2216 || GET_CODE (op0) == ZERO_EXTEND)
2217 && GET_CODE (trueop1) == CONST_INT
2218 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2219 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2220 & INTVAL (trueop1)) == 0)
2222 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2223 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2224 gen_int_mode (INTVAL (trueop1),
2225 imode));
2226 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2229 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2230 insn (and may simplify more). */
2231 if (GET_CODE (op0) == XOR
2232 && rtx_equal_p (XEXP (op0, 0), op1)
2233 && ! side_effects_p (op1))
2234 return simplify_gen_binary (AND, mode,
2235 simplify_gen_unary (NOT, mode,
2236 XEXP (op0, 1), mode),
2237 op1);
2239 if (GET_CODE (op0) == XOR
2240 && rtx_equal_p (XEXP (op0, 1), op1)
2241 && ! side_effects_p (op1))
2242 return simplify_gen_binary (AND, mode,
2243 simplify_gen_unary (NOT, mode,
2244 XEXP (op0, 0), mode),
2245 op1);
2247 /* Similarly for (~(A ^ B)) & A. */
2248 if (GET_CODE (op0) == NOT
2249 && GET_CODE (XEXP (op0, 0)) == XOR
2250 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2251 && ! side_effects_p (op1))
2252 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2254 if (GET_CODE (op0) == NOT
2255 && GET_CODE (XEXP (op0, 0)) == XOR
2256 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2257 && ! side_effects_p (op1))
2258 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2260 /* Convert (A | B) & A to A. */
2261 if (GET_CODE (op0) == IOR
2262 && (rtx_equal_p (XEXP (op0, 0), op1)
2263 || rtx_equal_p (XEXP (op0, 1), op1))
2264 && ! side_effects_p (XEXP (op0, 0))
2265 && ! side_effects_p (XEXP (op0, 1)))
2266 return op1;
2268 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2269 ((A & N) + B) & M -> (A + B) & M
2270 Similarly if (N & M) == 0,
2271 ((A | N) + B) & M -> (A + B) & M
2272 and for - instead of + and/or ^ instead of |. */
2273 if (GET_CODE (trueop1) == CONST_INT
2274 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2275 && ~INTVAL (trueop1)
2276 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2277 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2279 rtx pmop[2];
2280 int which;
2282 pmop[0] = XEXP (op0, 0);
2283 pmop[1] = XEXP (op0, 1);
2285 for (which = 0; which < 2; which++)
2287 tem = pmop[which];
2288 switch (GET_CODE (tem))
2290 case AND:
2291 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2292 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2293 == INTVAL (trueop1))
2294 pmop[which] = XEXP (tem, 0);
2295 break;
2296 case IOR:
2297 case XOR:
2298 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2299 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2300 pmop[which] = XEXP (tem, 0);
2301 break;
2302 default:
2303 break;
2307 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2309 tem = simplify_gen_binary (GET_CODE (op0), mode,
2310 pmop[0], pmop[1]);
2311 return simplify_gen_binary (code, mode, tem, op1);
2314 tem = simplify_associative_operation (code, mode, op0, op1);
2315 if (tem)
2316 return tem;
2317 break;
2319 case UDIV:
2320 /* 0/x is 0 (or x&0 if x has side-effects). */
2321 if (trueop0 == CONST0_RTX (mode))
2323 if (side_effects_p (op1))
2324 return simplify_gen_binary (AND, mode, op1, trueop0);
2325 return trueop0;
2327 /* x/1 is x. */
2328 if (trueop1 == CONST1_RTX (mode))
2329 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2330 /* Convert divide by power of two into shift. */
2331 if (GET_CODE (trueop1) == CONST_INT
2332 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2333 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2334 break;
2336 case DIV:
2337 /* Handle floating point and integers separately. */
2338 if (SCALAR_FLOAT_MODE_P (mode))
2340 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2341 safe for modes with NaNs, since 0.0 / 0.0 will then be
2342 NaN rather than 0.0. Nor is it safe for modes with signed
2343 zeros, since dividing 0 by a negative number gives -0.0 */
2344 if (trueop0 == CONST0_RTX (mode)
2345 && !HONOR_NANS (mode)
2346 && !HONOR_SIGNED_ZEROS (mode)
2347 && ! side_effects_p (op1))
2348 return op0;
2349 /* x/1.0 is x. */
2350 if (trueop1 == CONST1_RTX (mode)
2351 && !HONOR_SNANS (mode))
2352 return op0;
2354 if (GET_CODE (trueop1) == CONST_DOUBLE
2355 && trueop1 != CONST0_RTX (mode))
2357 REAL_VALUE_TYPE d;
2358 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2360 /* x/-1.0 is -x. */
2361 if (REAL_VALUES_EQUAL (d, dconstm1)
2362 && !HONOR_SNANS (mode))
2363 return simplify_gen_unary (NEG, mode, op0, mode);
2365 /* Change FP division by a constant into multiplication.
2366 Only do this with -funsafe-math-optimizations. */
2367 if (flag_unsafe_math_optimizations
2368 && !REAL_VALUES_EQUAL (d, dconst0))
2370 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2371 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2372 return simplify_gen_binary (MULT, mode, op0, tem);
2376 else
2378 /* 0/x is 0 (or x&0 if x has side-effects). */
2379 if (trueop0 == CONST0_RTX (mode))
2381 if (side_effects_p (op1))
2382 return simplify_gen_binary (AND, mode, op1, trueop0);
2383 return trueop0;
2385 /* x/1 is x. */
2386 if (trueop1 == CONST1_RTX (mode))
2387 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2388 /* x/-1 is -x. */
2389 if (trueop1 == constm1_rtx)
2391 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2392 return simplify_gen_unary (NEG, mode, x, mode);
2395 break;
2397 case UMOD:
2398 /* 0%x is 0 (or x&0 if x has side-effects). */
2399 if (trueop0 == CONST0_RTX (mode))
2401 if (side_effects_p (op1))
2402 return simplify_gen_binary (AND, mode, op1, trueop0);
2403 return trueop0;
2405 /* x%1 is 0 (of x&0 if x has side-effects). */
2406 if (trueop1 == CONST1_RTX (mode))
2408 if (side_effects_p (op0))
2409 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2410 return CONST0_RTX (mode);
2412 /* Implement modulus by power of two as AND. */
2413 if (GET_CODE (trueop1) == CONST_INT
2414 && exact_log2 (INTVAL (trueop1)) > 0)
2415 return simplify_gen_binary (AND, mode, op0,
2416 GEN_INT (INTVAL (op1) - 1));
2417 break;
2419 case MOD:
2420 /* 0%x is 0 (or x&0 if x has side-effects). */
2421 if (trueop0 == CONST0_RTX (mode))
2423 if (side_effects_p (op1))
2424 return simplify_gen_binary (AND, mode, op1, trueop0);
2425 return trueop0;
2427 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2428 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2430 if (side_effects_p (op0))
2431 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2432 return CONST0_RTX (mode);
2434 break;
2436 case ROTATERT:
2437 case ROTATE:
2438 case ASHIFTRT:
2439 if (trueop1 == CONST0_RTX (mode))
2440 return op0;
2441 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2442 return op0;
2443 /* Rotating ~0 always results in ~0. */
2444 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2445 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2446 && ! side_effects_p (op1))
2447 return op0;
2448 break;
2450 case ASHIFT:
2451 case SS_ASHIFT:
2452 if (trueop1 == CONST0_RTX (mode))
2453 return op0;
2454 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2455 return op0;
2456 break;
2458 case LSHIFTRT:
2459 if (trueop1 == CONST0_RTX (mode))
2460 return op0;
2461 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2462 return op0;
2463 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2464 if (GET_CODE (op0) == CLZ
2465 && GET_CODE (trueop1) == CONST_INT
2466 && STORE_FLAG_VALUE == 1
2467 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2469 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2470 unsigned HOST_WIDE_INT zero_val = 0;
2472 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2473 && zero_val == GET_MODE_BITSIZE (imode)
2474 && INTVAL (trueop1) == exact_log2 (zero_val))
2475 return simplify_gen_relational (EQ, mode, imode,
2476 XEXP (op0, 0), const0_rtx);
2478 break;
2480 case SMIN:
2481 if (width <= HOST_BITS_PER_WIDE_INT
2482 && GET_CODE (trueop1) == CONST_INT
2483 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2484 && ! side_effects_p (op0))
2485 return op1;
2486 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2487 return op0;
2488 tem = simplify_associative_operation (code, mode, op0, op1);
2489 if (tem)
2490 return tem;
2491 break;
2493 case SMAX:
2494 if (width <= HOST_BITS_PER_WIDE_INT
2495 && GET_CODE (trueop1) == CONST_INT
2496 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2497 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2498 && ! side_effects_p (op0))
2499 return op1;
2500 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2501 return op0;
2502 tem = simplify_associative_operation (code, mode, op0, op1);
2503 if (tem)
2504 return tem;
2505 break;
2507 case UMIN:
2508 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2509 return op1;
2510 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2511 return op0;
2512 tem = simplify_associative_operation (code, mode, op0, op1);
2513 if (tem)
2514 return tem;
2515 break;
2517 case UMAX:
2518 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2519 return op1;
2520 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2521 return op0;
2522 tem = simplify_associative_operation (code, mode, op0, op1);
2523 if (tem)
2524 return tem;
2525 break;
2527 case SS_PLUS:
2528 case US_PLUS:
2529 case SS_MINUS:
2530 case US_MINUS:
2531 /* ??? There are simplifications that can be done. */
2532 return 0;
2534 case VEC_SELECT:
2535 if (!VECTOR_MODE_P (mode))
2537 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2538 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2539 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2540 gcc_assert (XVECLEN (trueop1, 0) == 1);
2541 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2543 if (GET_CODE (trueop0) == CONST_VECTOR)
2544 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2545 (trueop1, 0, 0)));
2547 else
2549 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2550 gcc_assert (GET_MODE_INNER (mode)
2551 == GET_MODE_INNER (GET_MODE (trueop0)));
2552 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2554 if (GET_CODE (trueop0) == CONST_VECTOR)
2556 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2557 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2558 rtvec v = rtvec_alloc (n_elts);
2559 unsigned int i;
2561 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2562 for (i = 0; i < n_elts; i++)
2564 rtx x = XVECEXP (trueop1, 0, i);
2566 gcc_assert (GET_CODE (x) == CONST_INT);
2567 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2568 INTVAL (x));
2571 return gen_rtx_CONST_VECTOR (mode, v);
2575 if (XVECLEN (trueop1, 0) == 1
2576 && GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT
2577 && GET_CODE (trueop0) == VEC_CONCAT)
2579 rtx vec = trueop0;
2580 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2582 /* Try to find the element in the VEC_CONCAT. */
2583 while (GET_MODE (vec) != mode
2584 && GET_CODE (vec) == VEC_CONCAT)
2586 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2587 if (offset < vec_size)
2588 vec = XEXP (vec, 0);
2589 else
2591 offset -= vec_size;
2592 vec = XEXP (vec, 1);
2594 vec = avoid_constant_pool_reference (vec);
2597 if (GET_MODE (vec) == mode)
2598 return vec;
2601 return 0;
2602 case VEC_CONCAT:
2604 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2605 ? GET_MODE (trueop0)
2606 : GET_MODE_INNER (mode));
2607 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2608 ? GET_MODE (trueop1)
2609 : GET_MODE_INNER (mode));
2611 gcc_assert (VECTOR_MODE_P (mode));
2612 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2613 == GET_MODE_SIZE (mode));
2615 if (VECTOR_MODE_P (op0_mode))
2616 gcc_assert (GET_MODE_INNER (mode)
2617 == GET_MODE_INNER (op0_mode));
2618 else
2619 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2621 if (VECTOR_MODE_P (op1_mode))
2622 gcc_assert (GET_MODE_INNER (mode)
2623 == GET_MODE_INNER (op1_mode));
2624 else
2625 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2627 if ((GET_CODE (trueop0) == CONST_VECTOR
2628 || GET_CODE (trueop0) == CONST_INT
2629 || GET_CODE (trueop0) == CONST_DOUBLE)
2630 && (GET_CODE (trueop1) == CONST_VECTOR
2631 || GET_CODE (trueop1) == CONST_INT
2632 || GET_CODE (trueop1) == CONST_DOUBLE))
2634 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2635 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2636 rtvec v = rtvec_alloc (n_elts);
2637 unsigned int i;
2638 unsigned in_n_elts = 1;
2640 if (VECTOR_MODE_P (op0_mode))
2641 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2642 for (i = 0; i < n_elts; i++)
2644 if (i < in_n_elts)
2646 if (!VECTOR_MODE_P (op0_mode))
2647 RTVEC_ELT (v, i) = trueop0;
2648 else
2649 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2651 else
2653 if (!VECTOR_MODE_P (op1_mode))
2654 RTVEC_ELT (v, i) = trueop1;
2655 else
2656 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2657 i - in_n_elts);
2661 return gen_rtx_CONST_VECTOR (mode, v);
2664 return 0;
2666 default:
2667 gcc_unreachable ();
2670 return 0;
2674 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
2675 rtx op0, rtx op1)
2677 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
2678 HOST_WIDE_INT val;
2679 unsigned int width = GET_MODE_BITSIZE (mode);
2681 if (VECTOR_MODE_P (mode)
2682 && code != VEC_CONCAT
2683 && GET_CODE (op0) == CONST_VECTOR
2684 && GET_CODE (op1) == CONST_VECTOR)
2686 unsigned n_elts = GET_MODE_NUNITS (mode);
2687 enum machine_mode op0mode = GET_MODE (op0);
2688 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2689 enum machine_mode op1mode = GET_MODE (op1);
2690 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2691 rtvec v = rtvec_alloc (n_elts);
2692 unsigned int i;
2694 gcc_assert (op0_n_elts == n_elts);
2695 gcc_assert (op1_n_elts == n_elts);
2696 for (i = 0; i < n_elts; i++)
2698 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2699 CONST_VECTOR_ELT (op0, i),
2700 CONST_VECTOR_ELT (op1, i));
2701 if (!x)
2702 return 0;
2703 RTVEC_ELT (v, i) = x;
2706 return gen_rtx_CONST_VECTOR (mode, v);
2709 if (VECTOR_MODE_P (mode)
2710 && code == VEC_CONCAT
2711 && CONSTANT_P (op0) && CONSTANT_P (op1))
2713 unsigned n_elts = GET_MODE_NUNITS (mode);
2714 rtvec v = rtvec_alloc (n_elts);
2716 gcc_assert (n_elts >= 2);
2717 if (n_elts == 2)
2719 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2720 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2722 RTVEC_ELT (v, 0) = op0;
2723 RTVEC_ELT (v, 1) = op1;
2725 else
2727 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2728 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2729 unsigned i;
2731 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2732 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2733 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2735 for (i = 0; i < op0_n_elts; ++i)
2736 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2737 for (i = 0; i < op1_n_elts; ++i)
2738 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2741 return gen_rtx_CONST_VECTOR (mode, v);
2744 if (SCALAR_FLOAT_MODE_P (mode)
2745 && GET_CODE (op0) == CONST_DOUBLE
2746 && GET_CODE (op1) == CONST_DOUBLE
2747 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2749 if (code == AND
2750 || code == IOR
2751 || code == XOR)
2753 long tmp0[4];
2754 long tmp1[4];
2755 REAL_VALUE_TYPE r;
2756 int i;
2758 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
2759 GET_MODE (op0));
2760 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
2761 GET_MODE (op1));
2762 for (i = 0; i < 4; i++)
2764 switch (code)
2766 case AND:
2767 tmp0[i] &= tmp1[i];
2768 break;
2769 case IOR:
2770 tmp0[i] |= tmp1[i];
2771 break;
2772 case XOR:
2773 tmp0[i] ^= tmp1[i];
2774 break;
2775 default:
2776 gcc_unreachable ();
2779 real_from_target (&r, tmp0, mode);
2780 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
2782 else
2784 REAL_VALUE_TYPE f0, f1, value, result;
2785 bool inexact;
2787 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
2788 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
2789 real_convert (&f0, mode, &f0);
2790 real_convert (&f1, mode, &f1);
2792 if (HONOR_SNANS (mode)
2793 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
2794 return 0;
2796 if (code == DIV
2797 && REAL_VALUES_EQUAL (f1, dconst0)
2798 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
2799 return 0;
2801 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2802 && flag_trapping_math
2803 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
2805 int s0 = REAL_VALUE_NEGATIVE (f0);
2806 int s1 = REAL_VALUE_NEGATIVE (f1);
2808 switch (code)
2810 case PLUS:
2811 /* Inf + -Inf = NaN plus exception. */
2812 if (s0 != s1)
2813 return 0;
2814 break;
2815 case MINUS:
2816 /* Inf - Inf = NaN plus exception. */
2817 if (s0 == s1)
2818 return 0;
2819 break;
2820 case DIV:
2821 /* Inf / Inf = NaN plus exception. */
2822 return 0;
2823 default:
2824 break;
2828 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2829 && flag_trapping_math
2830 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
2831 || (REAL_VALUE_ISINF (f1)
2832 && REAL_VALUES_EQUAL (f0, dconst0))))
2833 /* Inf * 0 = NaN plus exception. */
2834 return 0;
2836 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
2837 &f0, &f1);
2838 real_convert (&result, mode, &value);
2840 /* Don't constant fold this floating point operation if
2841 the result has overflowed and flag_trapping_math. */
2843 if (flag_trapping_math
2844 && MODE_HAS_INFINITIES (mode)
2845 && REAL_VALUE_ISINF (result)
2846 && !REAL_VALUE_ISINF (f0)
2847 && !REAL_VALUE_ISINF (f1))
2848 /* Overflow plus exception. */
2849 return 0;
2851 /* Don't constant fold this floating point operation if the
2852 result may dependent upon the run-time rounding mode and
2853 flag_rounding_math is set, or if GCC's software emulation
2854 is unable to accurately represent the result. */
2856 if ((flag_rounding_math
2857 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
2858 && !flag_unsafe_math_optimizations))
2859 && (inexact || !real_identical (&result, &value)))
2860 return NULL_RTX;
2862 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
2866 /* We can fold some multi-word operations. */
2867 if (GET_MODE_CLASS (mode) == MODE_INT
2868 && width == HOST_BITS_PER_WIDE_INT * 2
2869 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
2870 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
2872 unsigned HOST_WIDE_INT l1, l2, lv, lt;
2873 HOST_WIDE_INT h1, h2, hv, ht;
2875 if (GET_CODE (op0) == CONST_DOUBLE)
2876 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
2877 else
2878 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
2880 if (GET_CODE (op1) == CONST_DOUBLE)
2881 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
2882 else
2883 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
2885 switch (code)
2887 case MINUS:
2888 /* A - B == A + (-B). */
2889 neg_double (l2, h2, &lv, &hv);
2890 l2 = lv, h2 = hv;
2892 /* Fall through.... */
2894 case PLUS:
2895 add_double (l1, h1, l2, h2, &lv, &hv);
2896 break;
2898 case MULT:
2899 mul_double (l1, h1, l2, h2, &lv, &hv);
2900 break;
2902 case DIV:
2903 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2904 &lv, &hv, &lt, &ht))
2905 return 0;
2906 break;
2908 case MOD:
2909 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2910 &lt, &ht, &lv, &hv))
2911 return 0;
2912 break;
2914 case UDIV:
2915 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2916 &lv, &hv, &lt, &ht))
2917 return 0;
2918 break;
2920 case UMOD:
2921 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2922 &lt, &ht, &lv, &hv))
2923 return 0;
2924 break;
2926 case AND:
2927 lv = l1 & l2, hv = h1 & h2;
2928 break;
2930 case IOR:
2931 lv = l1 | l2, hv = h1 | h2;
2932 break;
2934 case XOR:
2935 lv = l1 ^ l2, hv = h1 ^ h2;
2936 break;
2938 case SMIN:
2939 if (h1 < h2
2940 || (h1 == h2
2941 && ((unsigned HOST_WIDE_INT) l1
2942 < (unsigned HOST_WIDE_INT) l2)))
2943 lv = l1, hv = h1;
2944 else
2945 lv = l2, hv = h2;
2946 break;
2948 case SMAX:
2949 if (h1 > h2
2950 || (h1 == h2
2951 && ((unsigned HOST_WIDE_INT) l1
2952 > (unsigned HOST_WIDE_INT) l2)))
2953 lv = l1, hv = h1;
2954 else
2955 lv = l2, hv = h2;
2956 break;
2958 case UMIN:
2959 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
2960 || (h1 == h2
2961 && ((unsigned HOST_WIDE_INT) l1
2962 < (unsigned HOST_WIDE_INT) l2)))
2963 lv = l1, hv = h1;
2964 else
2965 lv = l2, hv = h2;
2966 break;
2968 case UMAX:
2969 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
2970 || (h1 == h2
2971 && ((unsigned HOST_WIDE_INT) l1
2972 > (unsigned HOST_WIDE_INT) l2)))
2973 lv = l1, hv = h1;
2974 else
2975 lv = l2, hv = h2;
2976 break;
2978 case LSHIFTRT: case ASHIFTRT:
2979 case ASHIFT:
2980 case ROTATE: case ROTATERT:
2981 if (SHIFT_COUNT_TRUNCATED)
2982 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
2984 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
2985 return 0;
2987 if (code == LSHIFTRT || code == ASHIFTRT)
2988 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
2989 code == ASHIFTRT);
2990 else if (code == ASHIFT)
2991 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
2992 else if (code == ROTATE)
2993 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2994 else /* code == ROTATERT */
2995 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2996 break;
2998 default:
2999 return 0;
3002 return immed_double_const (lv, hv, mode);
3005 if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
3006 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3008 /* Get the integer argument values in two forms:
3009 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3011 arg0 = INTVAL (op0);
3012 arg1 = INTVAL (op1);
3014 if (width < HOST_BITS_PER_WIDE_INT)
3016 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
3017 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
3019 arg0s = arg0;
3020 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3021 arg0s |= ((HOST_WIDE_INT) (-1) << width);
3023 arg1s = arg1;
3024 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3025 arg1s |= ((HOST_WIDE_INT) (-1) << width);
3027 else
3029 arg0s = arg0;
3030 arg1s = arg1;
3033 /* Compute the value of the arithmetic. */
3035 switch (code)
3037 case PLUS:
3038 val = arg0s + arg1s;
3039 break;
3041 case MINUS:
3042 val = arg0s - arg1s;
3043 break;
3045 case MULT:
3046 val = arg0s * arg1s;
3047 break;
3049 case DIV:
3050 if (arg1s == 0
3051 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3052 && arg1s == -1))
3053 return 0;
3054 val = arg0s / arg1s;
3055 break;
3057 case MOD:
3058 if (arg1s == 0
3059 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3060 && arg1s == -1))
3061 return 0;
3062 val = arg0s % arg1s;
3063 break;
3065 case UDIV:
3066 if (arg1 == 0
3067 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3068 && arg1s == -1))
3069 return 0;
3070 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3071 break;
3073 case UMOD:
3074 if (arg1 == 0
3075 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3076 && arg1s == -1))
3077 return 0;
3078 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3079 break;
3081 case AND:
3082 val = arg0 & arg1;
3083 break;
3085 case IOR:
3086 val = arg0 | arg1;
3087 break;
3089 case XOR:
3090 val = arg0 ^ arg1;
3091 break;
3093 case LSHIFTRT:
3094 case ASHIFT:
3095 case ASHIFTRT:
3096 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3097 the value is in range. We can't return any old value for
3098 out-of-range arguments because either the middle-end (via
3099 shift_truncation_mask) or the back-end might be relying on
3100 target-specific knowledge. Nor can we rely on
3101 shift_truncation_mask, since the shift might not be part of an
3102 ashlM3, lshrM3 or ashrM3 instruction. */
3103 if (SHIFT_COUNT_TRUNCATED)
3104 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3105 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3106 return 0;
3108 val = (code == ASHIFT
3109 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3110 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3112 /* Sign-extend the result for arithmetic right shifts. */
3113 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3114 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3115 break;
3117 case ROTATERT:
3118 if (arg1 < 0)
3119 return 0;
3121 arg1 %= width;
3122 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3123 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3124 break;
3126 case ROTATE:
3127 if (arg1 < 0)
3128 return 0;
3130 arg1 %= width;
3131 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3132 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3133 break;
3135 case COMPARE:
3136 /* Do nothing here. */
3137 return 0;
3139 case SMIN:
3140 val = arg0s <= arg1s ? arg0s : arg1s;
3141 break;
3143 case UMIN:
3144 val = ((unsigned HOST_WIDE_INT) arg0
3145 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3146 break;
3148 case SMAX:
3149 val = arg0s > arg1s ? arg0s : arg1s;
3150 break;
3152 case UMAX:
3153 val = ((unsigned HOST_WIDE_INT) arg0
3154 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3155 break;
3157 case SS_PLUS:
3158 case US_PLUS:
3159 case SS_MINUS:
3160 case US_MINUS:
3161 case SS_ASHIFT:
3162 /* ??? There are simplifications that can be done. */
3163 return 0;
3165 default:
3166 gcc_unreachable ();
3169 return gen_int_mode (val, mode);
3172 return NULL_RTX;
3177 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3178 PLUS or MINUS.
3180 Rather than test for specific case, we do this by a brute-force method
3181 and do all possible simplifications until no more changes occur. Then
3182 we rebuild the operation. */
3184 struct simplify_plus_minus_op_data
3186 rtx op;
3187 short neg;
3190 static int
3191 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
3193 const struct simplify_plus_minus_op_data *d1 = p1;
3194 const struct simplify_plus_minus_op_data *d2 = p2;
3195 int result;
3197 result = (commutative_operand_precedence (d2->op)
3198 - commutative_operand_precedence (d1->op));
3199 if (result)
3200 return result;
3202 /* Group together equal REGs to do more simplification. */
3203 if (REG_P (d1->op) && REG_P (d2->op))
3204 return REGNO (d1->op) - REGNO (d2->op);
3205 else
3206 return 0;
3209 static rtx
3210 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3211 rtx op1)
3213 struct simplify_plus_minus_op_data ops[8];
3214 rtx result, tem;
3215 int n_ops = 2, input_ops = 2;
3216 int changed, n_constants = 0, canonicalized = 0;
3217 int i, j;
3219 memset (ops, 0, sizeof ops);
3221 /* Set up the two operands and then expand them until nothing has been
3222 changed. If we run out of room in our array, give up; this should
3223 almost never happen. */
3225 ops[0].op = op0;
3226 ops[0].neg = 0;
3227 ops[1].op = op1;
3228 ops[1].neg = (code == MINUS);
3232 changed = 0;
3234 for (i = 0; i < n_ops; i++)
3236 rtx this_op = ops[i].op;
3237 int this_neg = ops[i].neg;
3238 enum rtx_code this_code = GET_CODE (this_op);
3240 switch (this_code)
3242 case PLUS:
3243 case MINUS:
3244 if (n_ops == 7)
3245 return NULL_RTX;
3247 ops[n_ops].op = XEXP (this_op, 1);
3248 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3249 n_ops++;
3251 ops[i].op = XEXP (this_op, 0);
3252 input_ops++;
3253 changed = 1;
3254 canonicalized |= this_neg;
3255 break;
3257 case NEG:
3258 ops[i].op = XEXP (this_op, 0);
3259 ops[i].neg = ! this_neg;
3260 changed = 1;
3261 canonicalized = 1;
3262 break;
3264 case CONST:
3265 if (n_ops < 7
3266 && GET_CODE (XEXP (this_op, 0)) == PLUS
3267 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3268 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3270 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3271 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3272 ops[n_ops].neg = this_neg;
3273 n_ops++;
3274 changed = 1;
3275 canonicalized = 1;
3277 break;
3279 case NOT:
3280 /* ~a -> (-a - 1) */
3281 if (n_ops != 7)
3283 ops[n_ops].op = constm1_rtx;
3284 ops[n_ops++].neg = this_neg;
3285 ops[i].op = XEXP (this_op, 0);
3286 ops[i].neg = !this_neg;
3287 changed = 1;
3288 canonicalized = 1;
3290 break;
3292 case CONST_INT:
3293 n_constants++;
3294 if (this_neg)
3296 ops[i].op = neg_const_int (mode, this_op);
3297 ops[i].neg = 0;
3298 changed = 1;
3299 canonicalized = 1;
3301 break;
3303 default:
3304 break;
3308 while (changed);
3310 if (n_constants > 1)
3311 canonicalized = 1;
3313 gcc_assert (n_ops >= 2);
3315 /* If we only have two operands, we can avoid the loops. */
3316 if (n_ops == 2)
3318 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3319 rtx lhs, rhs;
3321 /* Get the two operands. Be careful with the order, especially for
3322 the cases where code == MINUS. */
3323 if (ops[0].neg && ops[1].neg)
3325 lhs = gen_rtx_NEG (mode, ops[0].op);
3326 rhs = ops[1].op;
3328 else if (ops[0].neg)
3330 lhs = ops[1].op;
3331 rhs = ops[0].op;
3333 else
3335 lhs = ops[0].op;
3336 rhs = ops[1].op;
3339 return simplify_const_binary_operation (code, mode, lhs, rhs);
3342 /* Now simplify each pair of operands until nothing changes. */
3345 /* Insertion sort is good enough for an eight-element array. */
3346 for (i = 1; i < n_ops; i++)
3348 struct simplify_plus_minus_op_data save;
3349 j = i - 1;
3350 if (simplify_plus_minus_op_data_cmp (&ops[j], &ops[i]) < 0)
3351 continue;
3353 canonicalized = 1;
3354 save = ops[i];
3356 ops[j + 1] = ops[j];
3357 while (j-- && simplify_plus_minus_op_data_cmp (&ops[j], &save) > 0);
3358 ops[j + 1] = save;
3361 /* This is only useful the first time through. */
3362 if (!canonicalized)
3363 return NULL_RTX;
3365 changed = 0;
3366 for (i = n_ops - 1; i > 0; i--)
3367 for (j = i - 1; j >= 0; j--)
3369 rtx lhs = ops[j].op, rhs = ops[i].op;
3370 int lneg = ops[j].neg, rneg = ops[i].neg;
3372 if (lhs != 0 && rhs != 0)
3374 enum rtx_code ncode = PLUS;
3376 if (lneg != rneg)
3378 ncode = MINUS;
3379 if (lneg)
3380 tem = lhs, lhs = rhs, rhs = tem;
3382 else if (swap_commutative_operands_p (lhs, rhs))
3383 tem = lhs, lhs = rhs, rhs = tem;
3385 if ((GET_CODE (lhs) == CONST || GET_CODE (lhs) == CONST_INT)
3386 && (GET_CODE (rhs) == CONST || GET_CODE (rhs) == CONST_INT))
3388 rtx tem_lhs, tem_rhs;
3390 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3391 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3392 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3394 if (tem && !CONSTANT_P (tem))
3395 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3397 else
3398 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3400 /* Reject "simplifications" that just wrap the two
3401 arguments in a CONST. Failure to do so can result
3402 in infinite recursion with simplify_binary_operation
3403 when it calls us to simplify CONST operations. */
3404 if (tem
3405 && ! (GET_CODE (tem) == CONST
3406 && GET_CODE (XEXP (tem, 0)) == ncode
3407 && XEXP (XEXP (tem, 0), 0) == lhs
3408 && XEXP (XEXP (tem, 0), 1) == rhs))
3410 lneg &= rneg;
3411 if (GET_CODE (tem) == NEG)
3412 tem = XEXP (tem, 0), lneg = !lneg;
3413 if (GET_CODE (tem) == CONST_INT && lneg)
3414 tem = neg_const_int (mode, tem), lneg = 0;
3416 ops[i].op = tem;
3417 ops[i].neg = lneg;
3418 ops[j].op = NULL_RTX;
3419 changed = 1;
3424 /* Pack all the operands to the lower-numbered entries. */
3425 for (i = 0, j = 0; j < n_ops; j++)
3426 if (ops[j].op)
3428 ops[i] = ops[j];
3429 i++;
3431 n_ops = i;
3433 while (changed);
3435 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3436 if (n_ops == 2
3437 && GET_CODE (ops[1].op) == CONST_INT
3438 && CONSTANT_P (ops[0].op)
3439 && ops[0].neg)
3440 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3442 /* We suppressed creation of trivial CONST expressions in the
3443 combination loop to avoid recursion. Create one manually now.
3444 The combination loop should have ensured that there is exactly
3445 one CONST_INT, and the sort will have ensured that it is last
3446 in the array and that any other constant will be next-to-last. */
3448 if (n_ops > 1
3449 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
3450 && CONSTANT_P (ops[n_ops - 2].op))
3452 rtx value = ops[n_ops - 1].op;
3453 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3454 value = neg_const_int (mode, value);
3455 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3456 n_ops--;
3459 /* Put a non-negated operand first, if possible. */
3461 for (i = 0; i < n_ops && ops[i].neg; i++)
3462 continue;
3463 if (i == n_ops)
3464 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3465 else if (i != 0)
3467 tem = ops[0].op;
3468 ops[0] = ops[i];
3469 ops[i].op = tem;
3470 ops[i].neg = 1;
3473 /* Now make the result by performing the requested operations. */
3474 result = ops[0].op;
3475 for (i = 1; i < n_ops; i++)
3476 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3477 mode, result, ops[i].op);
3479 return result;
3482 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3483 static bool
3484 plus_minus_operand_p (rtx x)
3486 return GET_CODE (x) == PLUS
3487 || GET_CODE (x) == MINUS
3488 || (GET_CODE (x) == CONST
3489 && GET_CODE (XEXP (x, 0)) == PLUS
3490 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3491 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3494 /* Like simplify_binary_operation except used for relational operators.
3495 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3496 not also be VOIDmode.
3498 CMP_MODE specifies in which mode the comparison is done in, so it is
3499 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3500 the operands or, if both are VOIDmode, the operands are compared in
3501 "infinite precision". */
3503 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3504 enum machine_mode cmp_mode, rtx op0, rtx op1)
3506 rtx tem, trueop0, trueop1;
3508 if (cmp_mode == VOIDmode)
3509 cmp_mode = GET_MODE (op0);
3510 if (cmp_mode == VOIDmode)
3511 cmp_mode = GET_MODE (op1);
3513 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3514 if (tem)
3516 if (SCALAR_FLOAT_MODE_P (mode))
3518 if (tem == const0_rtx)
3519 return CONST0_RTX (mode);
3520 #ifdef FLOAT_STORE_FLAG_VALUE
3522 REAL_VALUE_TYPE val;
3523 val = FLOAT_STORE_FLAG_VALUE (mode);
3524 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3526 #else
3527 return NULL_RTX;
3528 #endif
3530 if (VECTOR_MODE_P (mode))
3532 if (tem == const0_rtx)
3533 return CONST0_RTX (mode);
3534 #ifdef VECTOR_STORE_FLAG_VALUE
3536 int i, units;
3537 rtvec v;
3539 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3540 if (val == NULL_RTX)
3541 return NULL_RTX;
3542 if (val == const1_rtx)
3543 return CONST1_RTX (mode);
3545 units = GET_MODE_NUNITS (mode);
3546 v = rtvec_alloc (units);
3547 for (i = 0; i < units; i++)
3548 RTVEC_ELT (v, i) = val;
3549 return gen_rtx_raw_CONST_VECTOR (mode, v);
3551 #else
3552 return NULL_RTX;
3553 #endif
3556 return tem;
3559 /* For the following tests, ensure const0_rtx is op1. */
3560 if (swap_commutative_operands_p (op0, op1)
3561 || (op0 == const0_rtx && op1 != const0_rtx))
3562 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3564 /* If op0 is a compare, extract the comparison arguments from it. */
3565 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3566 return simplify_relational_operation (code, mode, VOIDmode,
3567 XEXP (op0, 0), XEXP (op0, 1));
3569 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
3570 || CC0_P (op0))
3571 return NULL_RTX;
3573 trueop0 = avoid_constant_pool_reference (op0);
3574 trueop1 = avoid_constant_pool_reference (op1);
3575 return simplify_relational_operation_1 (code, mode, cmp_mode,
3576 trueop0, trueop1);
3579 /* This part of simplify_relational_operation is only used when CMP_MODE
3580 is not in class MODE_CC (i.e. it is a real comparison).
3582 MODE is the mode of the result, while CMP_MODE specifies in which
3583 mode the comparison is done in, so it is the mode of the operands. */
3585 static rtx
3586 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
3587 enum machine_mode cmp_mode, rtx op0, rtx op1)
3589 enum rtx_code op0code = GET_CODE (op0);
3591 if (GET_CODE (op1) == CONST_INT)
3593 if (INTVAL (op1) == 0 && COMPARISON_P (op0))
3595 /* If op0 is a comparison, extract the comparison arguments
3596 from it. */
3597 if (code == NE)
3599 if (GET_MODE (op0) == mode)
3600 return simplify_rtx (op0);
3601 else
3602 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
3603 XEXP (op0, 0), XEXP (op0, 1));
3605 else if (code == EQ)
3607 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
3608 if (new_code != UNKNOWN)
3609 return simplify_gen_relational (new_code, mode, VOIDmode,
3610 XEXP (op0, 0), XEXP (op0, 1));
3615 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
3616 if ((code == EQ || code == NE)
3617 && (op0code == PLUS || op0code == MINUS)
3618 && CONSTANT_P (op1)
3619 && CONSTANT_P (XEXP (op0, 1))
3620 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
3622 rtx x = XEXP (op0, 0);
3623 rtx c = XEXP (op0, 1);
3625 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
3626 cmp_mode, op1, c);
3627 return simplify_gen_relational (code, mode, cmp_mode, x, c);
3630 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3631 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3632 if (code == NE
3633 && op1 == const0_rtx
3634 && GET_MODE_CLASS (mode) == MODE_INT
3635 && cmp_mode != VOIDmode
3636 /* ??? Work-around BImode bugs in the ia64 backend. */
3637 && mode != BImode
3638 && cmp_mode != BImode
3639 && nonzero_bits (op0, cmp_mode) == 1
3640 && STORE_FLAG_VALUE == 1)
3641 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
3642 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
3643 : lowpart_subreg (mode, op0, cmp_mode);
3645 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
3646 if ((code == EQ || code == NE)
3647 && op1 == const0_rtx
3648 && op0code == XOR)
3649 return simplify_gen_relational (code, mode, cmp_mode,
3650 XEXP (op0, 0), XEXP (op0, 1));
3652 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
3653 if ((code == EQ || code == NE)
3654 && op0code == XOR
3655 && rtx_equal_p (XEXP (op0, 0), op1)
3656 && !side_effects_p (XEXP (op0, 0)))
3657 return simplify_gen_relational (code, mode, cmp_mode,
3658 XEXP (op0, 1), const0_rtx);
3660 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
3661 if ((code == EQ || code == NE)
3662 && op0code == XOR
3663 && rtx_equal_p (XEXP (op0, 1), op1)
3664 && !side_effects_p (XEXP (op0, 1)))
3665 return simplify_gen_relational (code, mode, cmp_mode,
3666 XEXP (op0, 0), const0_rtx);
3668 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
3669 if ((code == EQ || code == NE)
3670 && op0code == XOR
3671 && (GET_CODE (op1) == CONST_INT
3672 || GET_CODE (op1) == CONST_DOUBLE)
3673 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
3674 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
3675 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
3676 simplify_gen_binary (XOR, cmp_mode,
3677 XEXP (op0, 1), op1));
3679 return NULL_RTX;
3682 /* Check if the given comparison (done in the given MODE) is actually a
3683 tautology or a contradiction.
3684 If no simplification is possible, this function returns zero.
3685 Otherwise, it returns either const_true_rtx or const0_rtx. */
3688 simplify_const_relational_operation (enum rtx_code code,
3689 enum machine_mode mode,
3690 rtx op0, rtx op1)
3692 int equal, op0lt, op0ltu, op1lt, op1ltu;
3693 rtx tem;
3694 rtx trueop0;
3695 rtx trueop1;
3697 gcc_assert (mode != VOIDmode
3698 || (GET_MODE (op0) == VOIDmode
3699 && GET_MODE (op1) == VOIDmode));
3701 /* If op0 is a compare, extract the comparison arguments from it. */
3702 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3704 op1 = XEXP (op0, 1);
3705 op0 = XEXP (op0, 0);
3707 if (GET_MODE (op0) != VOIDmode)
3708 mode = GET_MODE (op0);
3709 else if (GET_MODE (op1) != VOIDmode)
3710 mode = GET_MODE (op1);
3711 else
3712 return 0;
3715 /* We can't simplify MODE_CC values since we don't know what the
3716 actual comparison is. */
3717 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
3718 return 0;
3720 /* Make sure the constant is second. */
3721 if (swap_commutative_operands_p (op0, op1))
3723 tem = op0, op0 = op1, op1 = tem;
3724 code = swap_condition (code);
3727 trueop0 = avoid_constant_pool_reference (op0);
3728 trueop1 = avoid_constant_pool_reference (op1);
3730 /* For integer comparisons of A and B maybe we can simplify A - B and can
3731 then simplify a comparison of that with zero. If A and B are both either
3732 a register or a CONST_INT, this can't help; testing for these cases will
3733 prevent infinite recursion here and speed things up.
3735 We can only do this for EQ and NE comparisons as otherwise we may
3736 lose or introduce overflow which we cannot disregard as undefined as
3737 we do not know the signedness of the operation on either the left or
3738 the right hand side of the comparison. */
3740 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
3741 && (code == EQ || code == NE)
3742 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
3743 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
3744 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
3745 /* We cannot do this if tem is a nonzero address. */
3746 && ! nonzero_address_p (tem))
3747 return simplify_const_relational_operation (signed_condition (code),
3748 mode, tem, const0_rtx);
3750 if (flag_unsafe_math_optimizations && code == ORDERED)
3751 return const_true_rtx;
3753 if (flag_unsafe_math_optimizations && code == UNORDERED)
3754 return const0_rtx;
3756 /* For modes without NaNs, if the two operands are equal, we know the
3757 result except if they have side-effects. */
3758 if (! HONOR_NANS (GET_MODE (trueop0))
3759 && rtx_equal_p (trueop0, trueop1)
3760 && ! side_effects_p (trueop0))
3761 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
3763 /* If the operands are floating-point constants, see if we can fold
3764 the result. */
3765 else if (GET_CODE (trueop0) == CONST_DOUBLE
3766 && GET_CODE (trueop1) == CONST_DOUBLE
3767 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
3769 REAL_VALUE_TYPE d0, d1;
3771 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
3772 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
3774 /* Comparisons are unordered iff at least one of the values is NaN. */
3775 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
3776 switch (code)
3778 case UNEQ:
3779 case UNLT:
3780 case UNGT:
3781 case UNLE:
3782 case UNGE:
3783 case NE:
3784 case UNORDERED:
3785 return const_true_rtx;
3786 case EQ:
3787 case LT:
3788 case GT:
3789 case LE:
3790 case GE:
3791 case LTGT:
3792 case ORDERED:
3793 return const0_rtx;
3794 default:
3795 return 0;
3798 equal = REAL_VALUES_EQUAL (d0, d1);
3799 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
3800 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
3803 /* Otherwise, see if the operands are both integers. */
3804 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
3805 && (GET_CODE (trueop0) == CONST_DOUBLE
3806 || GET_CODE (trueop0) == CONST_INT)
3807 && (GET_CODE (trueop1) == CONST_DOUBLE
3808 || GET_CODE (trueop1) == CONST_INT))
3810 int width = GET_MODE_BITSIZE (mode);
3811 HOST_WIDE_INT l0s, h0s, l1s, h1s;
3812 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
3814 /* Get the two words comprising each integer constant. */
3815 if (GET_CODE (trueop0) == CONST_DOUBLE)
3817 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
3818 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
3820 else
3822 l0u = l0s = INTVAL (trueop0);
3823 h0u = h0s = HWI_SIGN_EXTEND (l0s);
3826 if (GET_CODE (trueop1) == CONST_DOUBLE)
3828 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
3829 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
3831 else
3833 l1u = l1s = INTVAL (trueop1);
3834 h1u = h1s = HWI_SIGN_EXTEND (l1s);
3837 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
3838 we have to sign or zero-extend the values. */
3839 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
3841 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
3842 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
3844 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3845 l0s |= ((HOST_WIDE_INT) (-1) << width);
3847 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3848 l1s |= ((HOST_WIDE_INT) (-1) << width);
3850 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
3851 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
3853 equal = (h0u == h1u && l0u == l1u);
3854 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
3855 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
3856 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
3857 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
3860 /* Otherwise, there are some code-specific tests we can make. */
3861 else
3863 /* Optimize comparisons with upper and lower bounds. */
3864 if (SCALAR_INT_MODE_P (mode)
3865 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3867 rtx mmin, mmax;
3868 int sign;
3870 if (code == GEU
3871 || code == LEU
3872 || code == GTU
3873 || code == LTU)
3874 sign = 0;
3875 else
3876 sign = 1;
3878 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
3880 tem = NULL_RTX;
3881 switch (code)
3883 case GEU:
3884 case GE:
3885 /* x >= min is always true. */
3886 if (rtx_equal_p (trueop1, mmin))
3887 tem = const_true_rtx;
3888 else
3889 break;
3891 case LEU:
3892 case LE:
3893 /* x <= max is always true. */
3894 if (rtx_equal_p (trueop1, mmax))
3895 tem = const_true_rtx;
3896 break;
3898 case GTU:
3899 case GT:
3900 /* x > max is always false. */
3901 if (rtx_equal_p (trueop1, mmax))
3902 tem = const0_rtx;
3903 break;
3905 case LTU:
3906 case LT:
3907 /* x < min is always false. */
3908 if (rtx_equal_p (trueop1, mmin))
3909 tem = const0_rtx;
3910 break;
3912 default:
3913 break;
3915 if (tem == const0_rtx
3916 || tem == const_true_rtx)
3917 return tem;
3920 switch (code)
3922 case EQ:
3923 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3924 return const0_rtx;
3925 break;
3927 case NE:
3928 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3929 return const_true_rtx;
3930 break;
3932 case LT:
3933 /* Optimize abs(x) < 0.0. */
3934 if (trueop1 == CONST0_RTX (mode)
3935 && !HONOR_SNANS (mode)
3936 && !(flag_wrapv && INTEGRAL_MODE_P (mode)))
3938 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3939 : trueop0;
3940 if (GET_CODE (tem) == ABS)
3941 return const0_rtx;
3943 break;
3945 case GE:
3946 /* Optimize abs(x) >= 0.0. */
3947 if (trueop1 == CONST0_RTX (mode)
3948 && !HONOR_NANS (mode)
3949 && !(flag_wrapv && INTEGRAL_MODE_P (mode)))
3951 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3952 : trueop0;
3953 if (GET_CODE (tem) == ABS)
3954 return const_true_rtx;
3956 break;
3958 case UNGE:
3959 /* Optimize ! (abs(x) < 0.0). */
3960 if (trueop1 == CONST0_RTX (mode))
3962 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3963 : trueop0;
3964 if (GET_CODE (tem) == ABS)
3965 return const_true_rtx;
3967 break;
3969 default:
3970 break;
3973 return 0;
3976 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3977 as appropriate. */
3978 switch (code)
3980 case EQ:
3981 case UNEQ:
3982 return equal ? const_true_rtx : const0_rtx;
3983 case NE:
3984 case LTGT:
3985 return ! equal ? const_true_rtx : const0_rtx;
3986 case LT:
3987 case UNLT:
3988 return op0lt ? const_true_rtx : const0_rtx;
3989 case GT:
3990 case UNGT:
3991 return op1lt ? const_true_rtx : const0_rtx;
3992 case LTU:
3993 return op0ltu ? const_true_rtx : const0_rtx;
3994 case GTU:
3995 return op1ltu ? const_true_rtx : const0_rtx;
3996 case LE:
3997 case UNLE:
3998 return equal || op0lt ? const_true_rtx : const0_rtx;
3999 case GE:
4000 case UNGE:
4001 return equal || op1lt ? const_true_rtx : const0_rtx;
4002 case LEU:
4003 return equal || op0ltu ? const_true_rtx : const0_rtx;
4004 case GEU:
4005 return equal || op1ltu ? const_true_rtx : const0_rtx;
4006 case ORDERED:
4007 return const_true_rtx;
4008 case UNORDERED:
4009 return const0_rtx;
4010 default:
4011 gcc_unreachable ();
4015 /* Simplify CODE, an operation with result mode MODE and three operands,
4016 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4017 a constant. Return 0 if no simplifications is possible. */
4020 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4021 enum machine_mode op0_mode, rtx op0, rtx op1,
4022 rtx op2)
4024 unsigned int width = GET_MODE_BITSIZE (mode);
4026 /* VOIDmode means "infinite" precision. */
4027 if (width == 0)
4028 width = HOST_BITS_PER_WIDE_INT;
4030 switch (code)
4032 case SIGN_EXTRACT:
4033 case ZERO_EXTRACT:
4034 if (GET_CODE (op0) == CONST_INT
4035 && GET_CODE (op1) == CONST_INT
4036 && GET_CODE (op2) == CONST_INT
4037 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4038 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4040 /* Extracting a bit-field from a constant */
4041 HOST_WIDE_INT val = INTVAL (op0);
4043 if (BITS_BIG_ENDIAN)
4044 val >>= (GET_MODE_BITSIZE (op0_mode)
4045 - INTVAL (op2) - INTVAL (op1));
4046 else
4047 val >>= INTVAL (op2);
4049 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4051 /* First zero-extend. */
4052 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4053 /* If desired, propagate sign bit. */
4054 if (code == SIGN_EXTRACT
4055 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4056 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4059 /* Clear the bits that don't belong in our mode,
4060 unless they and our sign bit are all one.
4061 So we get either a reasonable negative value or a reasonable
4062 unsigned value for this mode. */
4063 if (width < HOST_BITS_PER_WIDE_INT
4064 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4065 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4066 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4068 return gen_int_mode (val, mode);
4070 break;
4072 case IF_THEN_ELSE:
4073 if (GET_CODE (op0) == CONST_INT)
4074 return op0 != const0_rtx ? op1 : op2;
4076 /* Convert c ? a : a into "a". */
4077 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4078 return op1;
4080 /* Convert a != b ? a : b into "a". */
4081 if (GET_CODE (op0) == NE
4082 && ! side_effects_p (op0)
4083 && ! HONOR_NANS (mode)
4084 && ! HONOR_SIGNED_ZEROS (mode)
4085 && ((rtx_equal_p (XEXP (op0, 0), op1)
4086 && rtx_equal_p (XEXP (op0, 1), op2))
4087 || (rtx_equal_p (XEXP (op0, 0), op2)
4088 && rtx_equal_p (XEXP (op0, 1), op1))))
4089 return op1;
4091 /* Convert a == b ? a : b into "b". */
4092 if (GET_CODE (op0) == EQ
4093 && ! side_effects_p (op0)
4094 && ! HONOR_NANS (mode)
4095 && ! HONOR_SIGNED_ZEROS (mode)
4096 && ((rtx_equal_p (XEXP (op0, 0), op1)
4097 && rtx_equal_p (XEXP (op0, 1), op2))
4098 || (rtx_equal_p (XEXP (op0, 0), op2)
4099 && rtx_equal_p (XEXP (op0, 1), op1))))
4100 return op2;
4102 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4104 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4105 ? GET_MODE (XEXP (op0, 1))
4106 : GET_MODE (XEXP (op0, 0)));
4107 rtx temp;
4109 /* Look for happy constants in op1 and op2. */
4110 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
4112 HOST_WIDE_INT t = INTVAL (op1);
4113 HOST_WIDE_INT f = INTVAL (op2);
4115 if (t == STORE_FLAG_VALUE && f == 0)
4116 code = GET_CODE (op0);
4117 else if (t == 0 && f == STORE_FLAG_VALUE)
4119 enum rtx_code tmp;
4120 tmp = reversed_comparison_code (op0, NULL_RTX);
4121 if (tmp == UNKNOWN)
4122 break;
4123 code = tmp;
4125 else
4126 break;
4128 return simplify_gen_relational (code, mode, cmp_mode,
4129 XEXP (op0, 0), XEXP (op0, 1));
4132 if (cmp_mode == VOIDmode)
4133 cmp_mode = op0_mode;
4134 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4135 cmp_mode, XEXP (op0, 0),
4136 XEXP (op0, 1));
4138 /* See if any simplifications were possible. */
4139 if (temp)
4141 if (GET_CODE (temp) == CONST_INT)
4142 return temp == const0_rtx ? op2 : op1;
4143 else if (temp)
4144 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4147 break;
4149 case VEC_MERGE:
4150 gcc_assert (GET_MODE (op0) == mode);
4151 gcc_assert (GET_MODE (op1) == mode);
4152 gcc_assert (VECTOR_MODE_P (mode));
4153 op2 = avoid_constant_pool_reference (op2);
4154 if (GET_CODE (op2) == CONST_INT)
4156 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4157 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4158 int mask = (1 << n_elts) - 1;
4160 if (!(INTVAL (op2) & mask))
4161 return op1;
4162 if ((INTVAL (op2) & mask) == mask)
4163 return op0;
4165 op0 = avoid_constant_pool_reference (op0);
4166 op1 = avoid_constant_pool_reference (op1);
4167 if (GET_CODE (op0) == CONST_VECTOR
4168 && GET_CODE (op1) == CONST_VECTOR)
4170 rtvec v = rtvec_alloc (n_elts);
4171 unsigned int i;
4173 for (i = 0; i < n_elts; i++)
4174 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4175 ? CONST_VECTOR_ELT (op0, i)
4176 : CONST_VECTOR_ELT (op1, i));
4177 return gen_rtx_CONST_VECTOR (mode, v);
4180 break;
4182 default:
4183 gcc_unreachable ();
4186 return 0;
4189 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
4190 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
4192 Works by unpacking OP into a collection of 8-bit values
4193 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4194 and then repacking them again for OUTERMODE. */
4196 static rtx
4197 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4198 enum machine_mode innermode, unsigned int byte)
4200 /* We support up to 512-bit values (for V8DFmode). */
4201 enum {
4202 max_bitsize = 512,
4203 value_bit = 8,
4204 value_mask = (1 << value_bit) - 1
4206 unsigned char value[max_bitsize / value_bit];
4207 int value_start;
4208 int i;
4209 int elem;
4211 int num_elem;
4212 rtx * elems;
4213 int elem_bitsize;
4214 rtx result_s;
4215 rtvec result_v = NULL;
4216 enum mode_class outer_class;
4217 enum machine_mode outer_submode;
4219 /* Some ports misuse CCmode. */
4220 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
4221 return op;
4223 /* We have no way to represent a complex constant at the rtl level. */
4224 if (COMPLEX_MODE_P (outermode))
4225 return NULL_RTX;
4227 /* Unpack the value. */
4229 if (GET_CODE (op) == CONST_VECTOR)
4231 num_elem = CONST_VECTOR_NUNITS (op);
4232 elems = &CONST_VECTOR_ELT (op, 0);
4233 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4235 else
4237 num_elem = 1;
4238 elems = &op;
4239 elem_bitsize = max_bitsize;
4241 /* If this asserts, it is too complicated; reducing value_bit may help. */
4242 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4243 /* I don't know how to handle endianness of sub-units. */
4244 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4246 for (elem = 0; elem < num_elem; elem++)
4248 unsigned char * vp;
4249 rtx el = elems[elem];
4251 /* Vectors are kept in target memory order. (This is probably
4252 a mistake.) */
4254 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4255 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4256 / BITS_PER_UNIT);
4257 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4258 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4259 unsigned bytele = (subword_byte % UNITS_PER_WORD
4260 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4261 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4264 switch (GET_CODE (el))
4266 case CONST_INT:
4267 for (i = 0;
4268 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4269 i += value_bit)
4270 *vp++ = INTVAL (el) >> i;
4271 /* CONST_INTs are always logically sign-extended. */
4272 for (; i < elem_bitsize; i += value_bit)
4273 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4274 break;
4276 case CONST_DOUBLE:
4277 if (GET_MODE (el) == VOIDmode)
4279 /* If this triggers, someone should have generated a
4280 CONST_INT instead. */
4281 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4283 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4284 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4285 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4287 *vp++
4288 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4289 i += value_bit;
4291 /* It shouldn't matter what's done here, so fill it with
4292 zero. */
4293 for (; i < elem_bitsize; i += value_bit)
4294 *vp++ = 0;
4296 else
4298 long tmp[max_bitsize / 32];
4299 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4301 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4302 gcc_assert (bitsize <= elem_bitsize);
4303 gcc_assert (bitsize % value_bit == 0);
4305 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4306 GET_MODE (el));
4308 /* real_to_target produces its result in words affected by
4309 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4310 and use WORDS_BIG_ENDIAN instead; see the documentation
4311 of SUBREG in rtl.texi. */
4312 for (i = 0; i < bitsize; i += value_bit)
4314 int ibase;
4315 if (WORDS_BIG_ENDIAN)
4316 ibase = bitsize - 1 - i;
4317 else
4318 ibase = i;
4319 *vp++ = tmp[ibase / 32] >> i % 32;
4322 /* It shouldn't matter what's done here, so fill it with
4323 zero. */
4324 for (; i < elem_bitsize; i += value_bit)
4325 *vp++ = 0;
4327 break;
4329 default:
4330 gcc_unreachable ();
4334 /* Now, pick the right byte to start with. */
4335 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4336 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4337 will already have offset 0. */
4338 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4340 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4341 - byte);
4342 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4343 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4344 byte = (subword_byte % UNITS_PER_WORD
4345 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4348 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4349 so if it's become negative it will instead be very large.) */
4350 gcc_assert (byte < GET_MODE_SIZE (innermode));
4352 /* Convert from bytes to chunks of size value_bit. */
4353 value_start = byte * (BITS_PER_UNIT / value_bit);
4355 /* Re-pack the value. */
4357 if (VECTOR_MODE_P (outermode))
4359 num_elem = GET_MODE_NUNITS (outermode);
4360 result_v = rtvec_alloc (num_elem);
4361 elems = &RTVEC_ELT (result_v, 0);
4362 outer_submode = GET_MODE_INNER (outermode);
4364 else
4366 num_elem = 1;
4367 elems = &result_s;
4368 outer_submode = outermode;
4371 outer_class = GET_MODE_CLASS (outer_submode);
4372 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
4374 gcc_assert (elem_bitsize % value_bit == 0);
4375 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
4377 for (elem = 0; elem < num_elem; elem++)
4379 unsigned char *vp;
4381 /* Vectors are stored in target memory order. (This is probably
4382 a mistake.) */
4384 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4385 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4386 / BITS_PER_UNIT);
4387 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4388 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4389 unsigned bytele = (subword_byte % UNITS_PER_WORD
4390 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4391 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
4394 switch (outer_class)
4396 case MODE_INT:
4397 case MODE_PARTIAL_INT:
4399 unsigned HOST_WIDE_INT hi = 0, lo = 0;
4401 for (i = 0;
4402 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4403 i += value_bit)
4404 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
4405 for (; i < elem_bitsize; i += value_bit)
4406 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
4407 << (i - HOST_BITS_PER_WIDE_INT));
4409 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4410 know why. */
4411 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4412 elems[elem] = gen_int_mode (lo, outer_submode);
4413 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
4414 elems[elem] = immed_double_const (lo, hi, outer_submode);
4415 else
4416 return NULL_RTX;
4418 break;
4420 case MODE_FLOAT:
4421 case MODE_DECIMAL_FLOAT:
4423 REAL_VALUE_TYPE r;
4424 long tmp[max_bitsize / 32];
4426 /* real_from_target wants its input in words affected by
4427 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4428 and use WORDS_BIG_ENDIAN instead; see the documentation
4429 of SUBREG in rtl.texi. */
4430 for (i = 0; i < max_bitsize / 32; i++)
4431 tmp[i] = 0;
4432 for (i = 0; i < elem_bitsize; i += value_bit)
4434 int ibase;
4435 if (WORDS_BIG_ENDIAN)
4436 ibase = elem_bitsize - 1 - i;
4437 else
4438 ibase = i;
4439 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
4442 real_from_target (&r, tmp, outer_submode);
4443 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
4445 break;
4447 default:
4448 gcc_unreachable ();
4451 if (VECTOR_MODE_P (outermode))
4452 return gen_rtx_CONST_VECTOR (outermode, result_v);
4453 else
4454 return result_s;
4457 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
4458 Return 0 if no simplifications are possible. */
4460 simplify_subreg (enum machine_mode outermode, rtx op,
4461 enum machine_mode innermode, unsigned int byte)
4463 /* Little bit of sanity checking. */
4464 gcc_assert (innermode != VOIDmode);
4465 gcc_assert (outermode != VOIDmode);
4466 gcc_assert (innermode != BLKmode);
4467 gcc_assert (outermode != BLKmode);
4469 gcc_assert (GET_MODE (op) == innermode
4470 || GET_MODE (op) == VOIDmode);
4472 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
4473 gcc_assert (byte < GET_MODE_SIZE (innermode));
4475 if (outermode == innermode && !byte)
4476 return op;
4478 if (GET_CODE (op) == CONST_INT
4479 || GET_CODE (op) == CONST_DOUBLE
4480 || GET_CODE (op) == CONST_VECTOR)
4481 return simplify_immed_subreg (outermode, op, innermode, byte);
4483 /* Changing mode twice with SUBREG => just change it once,
4484 or not at all if changing back op starting mode. */
4485 if (GET_CODE (op) == SUBREG)
4487 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
4488 int final_offset = byte + SUBREG_BYTE (op);
4489 rtx newx;
4491 if (outermode == innermostmode
4492 && byte == 0 && SUBREG_BYTE (op) == 0)
4493 return SUBREG_REG (op);
4495 /* The SUBREG_BYTE represents offset, as if the value were stored
4496 in memory. Irritating exception is paradoxical subreg, where
4497 we define SUBREG_BYTE to be 0. On big endian machines, this
4498 value should be negative. For a moment, undo this exception. */
4499 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4501 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
4502 if (WORDS_BIG_ENDIAN)
4503 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4504 if (BYTES_BIG_ENDIAN)
4505 final_offset += difference % UNITS_PER_WORD;
4507 if (SUBREG_BYTE (op) == 0
4508 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
4510 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
4511 if (WORDS_BIG_ENDIAN)
4512 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4513 if (BYTES_BIG_ENDIAN)
4514 final_offset += difference % UNITS_PER_WORD;
4517 /* See whether resulting subreg will be paradoxical. */
4518 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
4520 /* In nonparadoxical subregs we can't handle negative offsets. */
4521 if (final_offset < 0)
4522 return NULL_RTX;
4523 /* Bail out in case resulting subreg would be incorrect. */
4524 if (final_offset % GET_MODE_SIZE (outermode)
4525 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
4526 return NULL_RTX;
4528 else
4530 int offset = 0;
4531 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
4533 /* In paradoxical subreg, see if we are still looking on lower part.
4534 If so, our SUBREG_BYTE will be 0. */
4535 if (WORDS_BIG_ENDIAN)
4536 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4537 if (BYTES_BIG_ENDIAN)
4538 offset += difference % UNITS_PER_WORD;
4539 if (offset == final_offset)
4540 final_offset = 0;
4541 else
4542 return NULL_RTX;
4545 /* Recurse for further possible simplifications. */
4546 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
4547 final_offset);
4548 if (newx)
4549 return newx;
4550 if (validate_subreg (outermode, innermostmode,
4551 SUBREG_REG (op), final_offset))
4552 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
4553 return NULL_RTX;
4556 /* Merge implicit and explicit truncations. */
4558 if (GET_CODE (op) == TRUNCATE
4559 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
4560 && subreg_lowpart_offset (outermode, innermode) == byte)
4561 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
4562 GET_MODE (XEXP (op, 0)));
4564 /* SUBREG of a hard register => just change the register number
4565 and/or mode. If the hard register is not valid in that mode,
4566 suppress this simplification. If the hard register is the stack,
4567 frame, or argument pointer, leave this as a SUBREG. */
4569 if (REG_P (op)
4570 && REGNO (op) < FIRST_PSEUDO_REGISTER
4571 #ifdef CANNOT_CHANGE_MODE_CLASS
4572 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
4573 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
4574 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
4575 #endif
4576 && ((reload_completed && !frame_pointer_needed)
4577 || (REGNO (op) != FRAME_POINTER_REGNUM
4578 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
4579 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
4580 #endif
4582 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4583 && REGNO (op) != ARG_POINTER_REGNUM
4584 #endif
4585 && REGNO (op) != STACK_POINTER_REGNUM
4586 && subreg_offset_representable_p (REGNO (op), innermode,
4587 byte, outermode))
4589 unsigned int regno = REGNO (op);
4590 unsigned int final_regno
4591 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
4593 /* ??? We do allow it if the current REG is not valid for
4594 its mode. This is a kludge to work around how float/complex
4595 arguments are passed on 32-bit SPARC and should be fixed. */
4596 if (HARD_REGNO_MODE_OK (final_regno, outermode)
4597 || ! HARD_REGNO_MODE_OK (regno, innermode))
4599 rtx x;
4600 int final_offset = byte;
4602 /* Adjust offset for paradoxical subregs. */
4603 if (byte == 0
4604 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4606 int difference = (GET_MODE_SIZE (innermode)
4607 - GET_MODE_SIZE (outermode));
4608 if (WORDS_BIG_ENDIAN)
4609 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4610 if (BYTES_BIG_ENDIAN)
4611 final_offset += difference % UNITS_PER_WORD;
4614 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
4616 /* Propagate original regno. We don't have any way to specify
4617 the offset inside original regno, so do so only for lowpart.
4618 The information is used only by alias analysis that can not
4619 grog partial register anyway. */
4621 if (subreg_lowpart_offset (outermode, innermode) == byte)
4622 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
4623 return x;
4627 /* If we have a SUBREG of a register that we are replacing and we are
4628 replacing it with a MEM, make a new MEM and try replacing the
4629 SUBREG with it. Don't do this if the MEM has a mode-dependent address
4630 or if we would be widening it. */
4632 if (MEM_P (op)
4633 && ! mode_dependent_address_p (XEXP (op, 0))
4634 /* Allow splitting of volatile memory references in case we don't
4635 have instruction to move the whole thing. */
4636 && (! MEM_VOLATILE_P (op)
4637 || ! have_insn_for (SET, innermode))
4638 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
4639 return adjust_address_nv (op, outermode, byte);
4641 /* Handle complex values represented as CONCAT
4642 of real and imaginary part. */
4643 if (GET_CODE (op) == CONCAT)
4645 unsigned int inner_size, final_offset;
4646 rtx part, res;
4648 inner_size = GET_MODE_UNIT_SIZE (innermode);
4649 part = byte < inner_size ? XEXP (op, 0) : XEXP (op, 1);
4650 final_offset = byte % inner_size;
4651 if (final_offset + GET_MODE_SIZE (outermode) > inner_size)
4652 return NULL_RTX;
4654 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
4655 if (res)
4656 return res;
4657 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
4658 return gen_rtx_SUBREG (outermode, part, final_offset);
4659 return NULL_RTX;
4662 /* Optimize SUBREG truncations of zero and sign extended values. */
4663 if ((GET_CODE (op) == ZERO_EXTEND
4664 || GET_CODE (op) == SIGN_EXTEND)
4665 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
4667 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
4669 /* If we're requesting the lowpart of a zero or sign extension,
4670 there are three possibilities. If the outermode is the same
4671 as the origmode, we can omit both the extension and the subreg.
4672 If the outermode is not larger than the origmode, we can apply
4673 the truncation without the extension. Finally, if the outermode
4674 is larger than the origmode, but both are integer modes, we
4675 can just extend to the appropriate mode. */
4676 if (bitpos == 0)
4678 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
4679 if (outermode == origmode)
4680 return XEXP (op, 0);
4681 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
4682 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
4683 subreg_lowpart_offset (outermode,
4684 origmode));
4685 if (SCALAR_INT_MODE_P (outermode))
4686 return simplify_gen_unary (GET_CODE (op), outermode,
4687 XEXP (op, 0), origmode);
4690 /* A SUBREG resulting from a zero extension may fold to zero if
4691 it extracts higher bits that the ZERO_EXTEND's source bits. */
4692 if (GET_CODE (op) == ZERO_EXTEND
4693 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
4694 return CONST0_RTX (outermode);
4697 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
4698 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
4699 the outer subreg is effectively a truncation to the original mode. */
4700 if ((GET_CODE (op) == LSHIFTRT
4701 || GET_CODE (op) == ASHIFTRT)
4702 && SCALAR_INT_MODE_P (outermode)
4703 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
4704 to avoid the possibility that an outer LSHIFTRT shifts by more
4705 than the sign extension's sign_bit_copies and introduces zeros
4706 into the high bits of the result. */
4707 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
4708 && GET_CODE (XEXP (op, 1)) == CONST_INT
4709 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
4710 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4711 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4712 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4713 return simplify_gen_binary (ASHIFTRT, outermode,
4714 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4716 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
4717 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
4718 the outer subreg is effectively a truncation to the original mode. */
4719 if ((GET_CODE (op) == LSHIFTRT
4720 || GET_CODE (op) == ASHIFTRT)
4721 && SCALAR_INT_MODE_P (outermode)
4722 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4723 && GET_CODE (XEXP (op, 1)) == CONST_INT
4724 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4725 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4726 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4727 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4728 return simplify_gen_binary (LSHIFTRT, outermode,
4729 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4731 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
4732 to (ashift:QI (x:QI) C), where C is a suitable small constant and
4733 the outer subreg is effectively a truncation to the original mode. */
4734 if (GET_CODE (op) == ASHIFT
4735 && SCALAR_INT_MODE_P (outermode)
4736 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4737 && GET_CODE (XEXP (op, 1)) == CONST_INT
4738 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4739 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
4740 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4741 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4742 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4743 return simplify_gen_binary (ASHIFT, outermode,
4744 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4746 return NULL_RTX;
4749 /* Make a SUBREG operation or equivalent if it folds. */
4752 simplify_gen_subreg (enum machine_mode outermode, rtx op,
4753 enum machine_mode innermode, unsigned int byte)
4755 rtx newx;
4757 newx = simplify_subreg (outermode, op, innermode, byte);
4758 if (newx)
4759 return newx;
4761 if (GET_CODE (op) == SUBREG
4762 || GET_CODE (op) == CONCAT
4763 || GET_MODE (op) == VOIDmode)
4764 return NULL_RTX;
4766 if (validate_subreg (outermode, innermode, op, byte))
4767 return gen_rtx_SUBREG (outermode, op, byte);
4769 return NULL_RTX;
4772 /* Simplify X, an rtx expression.
4774 Return the simplified expression or NULL if no simplifications
4775 were possible.
4777 This is the preferred entry point into the simplification routines;
4778 however, we still allow passes to call the more specific routines.
4780 Right now GCC has three (yes, three) major bodies of RTL simplification
4781 code that need to be unified.
4783 1. fold_rtx in cse.c. This code uses various CSE specific
4784 information to aid in RTL simplification.
4786 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
4787 it uses combine specific information to aid in RTL
4788 simplification.
4790 3. The routines in this file.
4793 Long term we want to only have one body of simplification code; to
4794 get to that state I recommend the following steps:
4796 1. Pour over fold_rtx & simplify_rtx and move any simplifications
4797 which are not pass dependent state into these routines.
4799 2. As code is moved by #1, change fold_rtx & simplify_rtx to
4800 use this routine whenever possible.
4802 3. Allow for pass dependent state to be provided to these
4803 routines and add simplifications based on the pass dependent
4804 state. Remove code from cse.c & combine.c that becomes
4805 redundant/dead.
4807 It will take time, but ultimately the compiler will be easier to
4808 maintain and improve. It's totally silly that when we add a
4809 simplification that it needs to be added to 4 places (3 for RTL
4810 simplification and 1 for tree simplification. */
4813 simplify_rtx (rtx x)
4815 enum rtx_code code = GET_CODE (x);
4816 enum machine_mode mode = GET_MODE (x);
4818 switch (GET_RTX_CLASS (code))
4820 case RTX_UNARY:
4821 return simplify_unary_operation (code, mode,
4822 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
4823 case RTX_COMM_ARITH:
4824 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
4825 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
4827 /* Fall through.... */
4829 case RTX_BIN_ARITH:
4830 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
4832 case RTX_TERNARY:
4833 case RTX_BITFIELD_OPS:
4834 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
4835 XEXP (x, 0), XEXP (x, 1),
4836 XEXP (x, 2));
4838 case RTX_COMPARE:
4839 case RTX_COMM_COMPARE:
4840 return simplify_relational_operation (code, mode,
4841 ((GET_MODE (XEXP (x, 0))
4842 != VOIDmode)
4843 ? GET_MODE (XEXP (x, 0))
4844 : GET_MODE (XEXP (x, 1))),
4845 XEXP (x, 0),
4846 XEXP (x, 1));
4848 case RTX_EXTRA:
4849 if (code == SUBREG)
4850 return simplify_gen_subreg (mode, SUBREG_REG (x),
4851 GET_MODE (SUBREG_REG (x)),
4852 SUBREG_BYTE (x));
4853 break;
4855 case RTX_OBJ:
4856 if (code == LO_SUM)
4858 /* Convert (lo_sum (high FOO) FOO) to FOO. */
4859 if (GET_CODE (XEXP (x, 0)) == HIGH
4860 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
4861 return XEXP (x, 1);
4863 break;
4865 default:
4866 break;
4868 return NULL;