* Mainline merge as of 2006-02-16 (@111136).
[official-gcc.git] / gcc / simplify-rtx.c
blob6f4f09b8e092c0f1d56bf22b8f358fd619d6fe52
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
20 02110-1301, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static bool plus_minus_operand_p (rtx);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
57 unsigned int);
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
59 rtx, rtx);
60 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
61 enum machine_mode, rtx, rtx);
62 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
63 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
64 rtx, rtx, rtx, rtx);
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
68 static rtx
69 neg_const_int (enum machine_mode mode, rtx i)
71 return gen_int_mode (- INTVAL (i), mode);
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
77 bool
78 mode_signbit_p (enum machine_mode mode, rtx x)
80 unsigned HOST_WIDE_INT val;
81 unsigned int width;
83 if (GET_MODE_CLASS (mode) != MODE_INT)
84 return false;
86 width = GET_MODE_BITSIZE (mode);
87 if (width == 0)
88 return false;
90 if (width <= HOST_BITS_PER_WIDE_INT
91 && GET_CODE (x) == CONST_INT)
92 val = INTVAL (x);
93 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x) == 0)
97 val = CONST_DOUBLE_HIGH (x);
98 width -= HOST_BITS_PER_WIDE_INT;
100 else
101 return false;
103 if (width < HOST_BITS_PER_WIDE_INT)
104 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
105 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
112 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
113 rtx op1)
115 rtx tem;
117 /* If this simplifies, do it. */
118 tem = simplify_binary_operation (code, mode, op0, op1);
119 if (tem)
120 return tem;
122 /* Put complex operands first and constants second if commutative. */
123 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
124 && swap_commutative_operands_p (op0, op1))
125 tem = op0, op0 = op1, op1 = tem;
127 return gen_rtx_fmt_ee (code, mode, op0, op1);
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
133 avoid_constant_pool_reference (rtx x)
135 rtx c, tmp, addr;
136 enum machine_mode cmode;
137 HOST_WIDE_INT offset = 0;
139 switch (GET_CODE (x))
141 case MEM:
142 break;
144 case FLOAT_EXTEND:
145 /* Handle float extensions of constant pool references. */
146 tmp = XEXP (x, 0);
147 c = avoid_constant_pool_reference (tmp);
148 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
150 REAL_VALUE_TYPE d;
152 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
155 return x;
157 default:
158 return x;
161 addr = XEXP (x, 0);
163 /* Call target hook to avoid the effects of -fpic etc.... */
164 addr = targetm.delegitimize_address (addr);
166 /* Split the address into a base and integer offset. */
167 if (GET_CODE (addr) == CONST
168 && GET_CODE (XEXP (addr, 0)) == PLUS
169 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
171 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
172 addr = XEXP (XEXP (addr, 0), 0);
175 if (GET_CODE (addr) == LO_SUM)
176 addr = XEXP (addr, 1);
178 /* If this is a constant pool reference, we can turn it into its
179 constant and hope that simplifications happen. */
180 if (GET_CODE (addr) == SYMBOL_REF
181 && CONSTANT_POOL_ADDRESS_P (addr))
183 c = get_pool_constant (addr);
184 cmode = get_pool_mode (addr);
186 /* If we're accessing the constant in a different mode than it was
187 originally stored, attempt to fix that up via subreg simplifications.
188 If that fails we have no choice but to return the original memory. */
189 if (offset != 0 || cmode != GET_MODE (x))
191 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
192 if (tem && CONSTANT_P (tem))
193 return tem;
195 else
196 return c;
199 return x;
202 /* Return true if X is a MEM referencing the constant pool. */
204 bool
205 constant_pool_reference_p (rtx x)
207 return avoid_constant_pool_reference (x) != x;
210 /* Make a unary operation by first seeing if it folds and otherwise making
211 the specified operation. */
214 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
215 enum machine_mode op_mode)
217 rtx tem;
219 /* If this simplifies, use it. */
220 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
221 return tem;
223 return gen_rtx_fmt_e (code, mode, op);
226 /* Likewise for ternary operations. */
229 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
230 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
232 rtx tem;
234 /* If this simplifies, use it. */
235 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
236 op0, op1, op2)))
237 return tem;
239 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
242 /* Likewise, for relational operations.
243 CMP_MODE specifies mode comparison is done in. */
246 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
247 enum machine_mode cmp_mode, rtx op0, rtx op1)
249 rtx tem;
251 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
252 op0, op1)))
253 return tem;
255 return gen_rtx_fmt_ee (code, mode, op0, op1);
258 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
259 resulting RTX. Return a new RTX which is as simplified as possible. */
262 simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
264 enum rtx_code code = GET_CODE (x);
265 enum machine_mode mode = GET_MODE (x);
266 enum machine_mode op_mode;
267 rtx op0, op1, op2;
269 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
270 to build a new expression substituting recursively. If we can't do
271 anything, return our input. */
273 if (x == old_rtx)
274 return new_rtx;
276 switch (GET_RTX_CLASS (code))
278 case RTX_UNARY:
279 op0 = XEXP (x, 0);
280 op_mode = GET_MODE (op0);
281 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
282 if (op0 == XEXP (x, 0))
283 return x;
284 return simplify_gen_unary (code, mode, op0, op_mode);
286 case RTX_BIN_ARITH:
287 case RTX_COMM_ARITH:
288 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
289 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
290 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
291 return x;
292 return simplify_gen_binary (code, mode, op0, op1);
294 case RTX_COMPARE:
295 case RTX_COMM_COMPARE:
296 op0 = XEXP (x, 0);
297 op1 = XEXP (x, 1);
298 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
299 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
300 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
301 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
302 return x;
303 return simplify_gen_relational (code, mode, op_mode, op0, op1);
305 case RTX_TERNARY:
306 case RTX_BITFIELD_OPS:
307 op0 = XEXP (x, 0);
308 op_mode = GET_MODE (op0);
309 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
310 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
311 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
312 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
313 return x;
314 if (op_mode == VOIDmode)
315 op_mode = GET_MODE (op0);
316 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
318 case RTX_EXTRA:
319 /* The only case we try to handle is a SUBREG. */
320 if (code == SUBREG)
322 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
323 if (op0 == SUBREG_REG (x))
324 return x;
325 op0 = simplify_gen_subreg (GET_MODE (x), op0,
326 GET_MODE (SUBREG_REG (x)),
327 SUBREG_BYTE (x));
328 return op0 ? op0 : x;
330 break;
332 case RTX_OBJ:
333 if (code == MEM)
335 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
336 if (op0 == XEXP (x, 0))
337 return x;
338 return replace_equiv_address_nv (x, op0);
340 else if (code == LO_SUM)
342 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
343 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
345 /* (lo_sum (high x) x) -> x */
346 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
347 return op1;
349 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
350 return x;
351 return gen_rtx_LO_SUM (mode, op0, op1);
353 else if (code == REG)
355 if (rtx_equal_p (x, old_rtx))
356 return new_rtx;
358 break;
360 default:
361 break;
363 return x;
366 /* Try to simplify a unary operation CODE whose output mode is to be
367 MODE with input operand OP whose mode was originally OP_MODE.
368 Return zero if no simplification can be made. */
370 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
371 rtx op, enum machine_mode op_mode)
373 rtx trueop, tem;
375 if (GET_CODE (op) == CONST)
376 op = XEXP (op, 0);
378 trueop = avoid_constant_pool_reference (op);
380 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
381 if (tem)
382 return tem;
384 return simplify_unary_operation_1 (code, mode, op);
387 /* Perform some simplifications we can do even if the operands
388 aren't constant. */
389 static rtx
390 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
392 enum rtx_code reversed;
393 rtx temp;
395 switch (code)
397 case NOT:
398 /* (not (not X)) == X. */
399 if (GET_CODE (op) == NOT)
400 return XEXP (op, 0);
402 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
403 comparison is all ones. */
404 if (COMPARISON_P (op)
405 && (mode == BImode || STORE_FLAG_VALUE == -1)
406 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
407 return simplify_gen_relational (reversed, mode, VOIDmode,
408 XEXP (op, 0), XEXP (op, 1));
410 /* (not (plus X -1)) can become (neg X). */
411 if (GET_CODE (op) == PLUS
412 && XEXP (op, 1) == constm1_rtx)
413 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
415 /* Similarly, (not (neg X)) is (plus X -1). */
416 if (GET_CODE (op) == NEG)
417 return plus_constant (XEXP (op, 0), -1);
419 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
420 if (GET_CODE (op) == XOR
421 && GET_CODE (XEXP (op, 1)) == CONST_INT
422 && (temp = simplify_unary_operation (NOT, mode,
423 XEXP (op, 1), mode)) != 0)
424 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
426 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
427 if (GET_CODE (op) == PLUS
428 && GET_CODE (XEXP (op, 1)) == CONST_INT
429 && mode_signbit_p (mode, XEXP (op, 1))
430 && (temp = simplify_unary_operation (NOT, mode,
431 XEXP (op, 1), mode)) != 0)
432 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
435 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
436 operands other than 1, but that is not valid. We could do a
437 similar simplification for (not (lshiftrt C X)) where C is
438 just the sign bit, but this doesn't seem common enough to
439 bother with. */
440 if (GET_CODE (op) == ASHIFT
441 && XEXP (op, 0) == const1_rtx)
443 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
444 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
447 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
448 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
449 so we can perform the above simplification. */
451 if (STORE_FLAG_VALUE == -1
452 && GET_CODE (op) == ASHIFTRT
453 && GET_CODE (XEXP (op, 1)) == CONST_INT
454 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
455 return simplify_gen_relational (GE, mode, VOIDmode,
456 XEXP (op, 0), const0_rtx);
459 if (GET_CODE (op) == SUBREG
460 && subreg_lowpart_p (op)
461 && (GET_MODE_SIZE (GET_MODE (op))
462 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
463 && GET_CODE (SUBREG_REG (op)) == ASHIFT
464 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
466 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
467 rtx x;
469 x = gen_rtx_ROTATE (inner_mode,
470 simplify_gen_unary (NOT, inner_mode, const1_rtx,
471 inner_mode),
472 XEXP (SUBREG_REG (op), 1));
473 return rtl_hooks.gen_lowpart_no_emit (mode, x);
476 /* Apply De Morgan's laws to reduce number of patterns for machines
477 with negating logical insns (and-not, nand, etc.). If result has
478 only one NOT, put it first, since that is how the patterns are
479 coded. */
481 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
483 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
484 enum machine_mode op_mode;
486 op_mode = GET_MODE (in1);
487 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
489 op_mode = GET_MODE (in2);
490 if (op_mode == VOIDmode)
491 op_mode = mode;
492 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
494 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
496 rtx tem = in2;
497 in2 = in1; in1 = tem;
500 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
501 mode, in1, in2);
503 break;
505 case NEG:
506 /* (neg (neg X)) == X. */
507 if (GET_CODE (op) == NEG)
508 return XEXP (op, 0);
510 /* (neg (plus X 1)) can become (not X). */
511 if (GET_CODE (op) == PLUS
512 && XEXP (op, 1) == const1_rtx)
513 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
515 /* Similarly, (neg (not X)) is (plus X 1). */
516 if (GET_CODE (op) == NOT)
517 return plus_constant (XEXP (op, 0), 1);
519 /* (neg (minus X Y)) can become (minus Y X). This transformation
520 isn't safe for modes with signed zeros, since if X and Y are
521 both +0, (minus Y X) is the same as (minus X Y). If the
522 rounding mode is towards +infinity (or -infinity) then the two
523 expressions will be rounded differently. */
524 if (GET_CODE (op) == MINUS
525 && !HONOR_SIGNED_ZEROS (mode)
526 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
527 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
529 if (GET_CODE (op) == PLUS
530 && !HONOR_SIGNED_ZEROS (mode)
531 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
533 /* (neg (plus A C)) is simplified to (minus -C A). */
534 if (GET_CODE (XEXP (op, 1)) == CONST_INT
535 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
537 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
538 if (temp)
539 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
542 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
543 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
544 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
547 /* (neg (mult A B)) becomes (mult (neg A) B).
548 This works even for floating-point values. */
549 if (GET_CODE (op) == MULT
550 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
552 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
553 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
556 /* NEG commutes with ASHIFT since it is multiplication. Only do
557 this if we can then eliminate the NEG (e.g., if the operand
558 is a constant). */
559 if (GET_CODE (op) == ASHIFT)
561 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
562 if (temp)
563 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
566 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
567 C is equal to the width of MODE minus 1. */
568 if (GET_CODE (op) == ASHIFTRT
569 && GET_CODE (XEXP (op, 1)) == CONST_INT
570 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
571 return simplify_gen_binary (LSHIFTRT, mode,
572 XEXP (op, 0), XEXP (op, 1));
574 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
575 C is equal to the width of MODE minus 1. */
576 if (GET_CODE (op) == LSHIFTRT
577 && GET_CODE (XEXP (op, 1)) == CONST_INT
578 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
579 return simplify_gen_binary (ASHIFTRT, mode,
580 XEXP (op, 0), XEXP (op, 1));
582 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
583 if (GET_CODE (op) == XOR
584 && XEXP (op, 1) == const1_rtx
585 && nonzero_bits (XEXP (op, 0), mode) == 1)
586 return plus_constant (XEXP (op, 0), -1);
587 break;
589 case TRUNCATE:
590 /* We can't handle truncation to a partial integer mode here
591 because we don't know the real bitsize of the partial
592 integer mode. */
593 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
594 break;
596 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
597 if ((GET_CODE (op) == SIGN_EXTEND
598 || GET_CODE (op) == ZERO_EXTEND)
599 && GET_MODE (XEXP (op, 0)) == mode)
600 return XEXP (op, 0);
602 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
603 (OP:SI foo:SI) if OP is NEG or ABS. */
604 if ((GET_CODE (op) == ABS
605 || GET_CODE (op) == NEG)
606 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
607 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
608 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
609 return simplify_gen_unary (GET_CODE (op), mode,
610 XEXP (XEXP (op, 0), 0), mode);
612 /* (truncate:A (subreg:B (truncate:C X) 0)) is
613 (truncate:A X). */
614 if (GET_CODE (op) == SUBREG
615 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
616 && subreg_lowpart_p (op))
617 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
618 GET_MODE (XEXP (SUBREG_REG (op), 0)));
620 /* If we know that the value is already truncated, we can
621 replace the TRUNCATE with a SUBREG if TRULY_NOOP_TRUNCATION
622 is nonzero for the corresponding modes. But don't do this
623 for an (LSHIFTRT (MULT ...)) since this will cause problems
624 with the umulXi3_highpart patterns. */
625 if (TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
626 GET_MODE_BITSIZE (GET_MODE (op)))
627 && num_sign_bit_copies (op, GET_MODE (op))
628 >= (unsigned int) (GET_MODE_BITSIZE (mode) + 1)
629 && ! (GET_CODE (op) == LSHIFTRT
630 && GET_CODE (XEXP (op, 0)) == MULT))
631 return rtl_hooks.gen_lowpart_no_emit (mode, op);
633 /* A truncate of a comparison can be replaced with a subreg if
634 STORE_FLAG_VALUE permits. This is like the previous test,
635 but it works even if the comparison is done in a mode larger
636 than HOST_BITS_PER_WIDE_INT. */
637 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
638 && COMPARISON_P (op)
639 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
640 return rtl_hooks.gen_lowpart_no_emit (mode, op);
641 break;
643 case FLOAT_TRUNCATE:
644 if (DECIMAL_FLOAT_MODE_P (mode))
645 break;
647 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
648 if (GET_CODE (op) == FLOAT_EXTEND
649 && GET_MODE (XEXP (op, 0)) == mode)
650 return XEXP (op, 0);
652 /* (float_truncate:SF (float_truncate:DF foo:XF))
653 = (float_truncate:SF foo:XF).
654 This may eliminate double rounding, so it is unsafe.
656 (float_truncate:SF (float_extend:XF foo:DF))
657 = (float_truncate:SF foo:DF).
659 (float_truncate:DF (float_extend:XF foo:SF))
660 = (float_extend:SF foo:DF). */
661 if ((GET_CODE (op) == FLOAT_TRUNCATE
662 && flag_unsafe_math_optimizations)
663 || GET_CODE (op) == FLOAT_EXTEND)
664 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
665 0)))
666 > GET_MODE_SIZE (mode)
667 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
668 mode,
669 XEXP (op, 0), mode);
671 /* (float_truncate (float x)) is (float x) */
672 if (GET_CODE (op) == FLOAT
673 && (flag_unsafe_math_optimizations
674 || ((unsigned)significand_size (GET_MODE (op))
675 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
676 - num_sign_bit_copies (XEXP (op, 0),
677 GET_MODE (XEXP (op, 0)))))))
678 return simplify_gen_unary (FLOAT, mode,
679 XEXP (op, 0),
680 GET_MODE (XEXP (op, 0)));
682 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
683 (OP:SF foo:SF) if OP is NEG or ABS. */
684 if ((GET_CODE (op) == ABS
685 || GET_CODE (op) == NEG)
686 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
687 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
688 return simplify_gen_unary (GET_CODE (op), mode,
689 XEXP (XEXP (op, 0), 0), mode);
691 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
692 is (float_truncate:SF x). */
693 if (GET_CODE (op) == SUBREG
694 && subreg_lowpart_p (op)
695 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
696 return SUBREG_REG (op);
697 break;
699 case FLOAT_EXTEND:
700 if (DECIMAL_FLOAT_MODE_P (mode))
701 break;
703 /* (float_extend (float_extend x)) is (float_extend x)
705 (float_extend (float x)) is (float x) assuming that double
706 rounding can't happen.
708 if (GET_CODE (op) == FLOAT_EXTEND
709 || (GET_CODE (op) == FLOAT
710 && ((unsigned)significand_size (GET_MODE (op))
711 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
712 - num_sign_bit_copies (XEXP (op, 0),
713 GET_MODE (XEXP (op, 0)))))))
714 return simplify_gen_unary (GET_CODE (op), mode,
715 XEXP (op, 0),
716 GET_MODE (XEXP (op, 0)));
718 break;
720 case ABS:
721 /* (abs (neg <foo>)) -> (abs <foo>) */
722 if (GET_CODE (op) == NEG)
723 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
724 GET_MODE (XEXP (op, 0)));
726 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
727 do nothing. */
728 if (GET_MODE (op) == VOIDmode)
729 break;
731 /* If operand is something known to be positive, ignore the ABS. */
732 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
733 || ((GET_MODE_BITSIZE (GET_MODE (op))
734 <= HOST_BITS_PER_WIDE_INT)
735 && ((nonzero_bits (op, GET_MODE (op))
736 & ((HOST_WIDE_INT) 1
737 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
738 == 0)))
739 return op;
741 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
742 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
743 return gen_rtx_NEG (mode, op);
745 break;
747 case FFS:
748 /* (ffs (*_extend <X>)) = (ffs <X>) */
749 if (GET_CODE (op) == SIGN_EXTEND
750 || GET_CODE (op) == ZERO_EXTEND)
751 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
752 GET_MODE (XEXP (op, 0)));
753 break;
755 case POPCOUNT:
756 case PARITY:
757 /* (pop* (zero_extend <X>)) = (pop* <X>) */
758 if (GET_CODE (op) == ZERO_EXTEND)
759 return simplify_gen_unary (code, mode, XEXP (op, 0),
760 GET_MODE (XEXP (op, 0)));
761 break;
763 case FLOAT:
764 /* (float (sign_extend <X>)) = (float <X>). */
765 if (GET_CODE (op) == SIGN_EXTEND)
766 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
767 GET_MODE (XEXP (op, 0)));
768 break;
770 case SIGN_EXTEND:
771 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
772 becomes just the MINUS if its mode is MODE. This allows
773 folding switch statements on machines using casesi (such as
774 the VAX). */
775 if (GET_CODE (op) == TRUNCATE
776 && GET_MODE (XEXP (op, 0)) == mode
777 && GET_CODE (XEXP (op, 0)) == MINUS
778 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
779 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
780 return XEXP (op, 0);
782 /* Check for a sign extension of a subreg of a promoted
783 variable, where the promotion is sign-extended, and the
784 target mode is the same as the variable's promotion. */
785 if (GET_CODE (op) == SUBREG
786 && SUBREG_PROMOTED_VAR_P (op)
787 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
788 && GET_MODE (XEXP (op, 0)) == mode)
789 return XEXP (op, 0);
791 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
792 if (! POINTERS_EXTEND_UNSIGNED
793 && mode == Pmode && GET_MODE (op) == ptr_mode
794 && (CONSTANT_P (op)
795 || (GET_CODE (op) == SUBREG
796 && REG_P (SUBREG_REG (op))
797 && REG_POINTER (SUBREG_REG (op))
798 && GET_MODE (SUBREG_REG (op)) == Pmode)))
799 return convert_memory_address (Pmode, op);
800 #endif
801 break;
803 case ZERO_EXTEND:
804 /* Check for a zero extension of a subreg of a promoted
805 variable, where the promotion is zero-extended, and the
806 target mode is the same as the variable's promotion. */
807 if (GET_CODE (op) == SUBREG
808 && SUBREG_PROMOTED_VAR_P (op)
809 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
810 && GET_MODE (XEXP (op, 0)) == mode)
811 return XEXP (op, 0);
813 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
814 if (POINTERS_EXTEND_UNSIGNED > 0
815 && mode == Pmode && GET_MODE (op) == ptr_mode
816 && (CONSTANT_P (op)
817 || (GET_CODE (op) == SUBREG
818 && REG_P (SUBREG_REG (op))
819 && REG_POINTER (SUBREG_REG (op))
820 && GET_MODE (SUBREG_REG (op)) == Pmode)))
821 return convert_memory_address (Pmode, op);
822 #endif
823 break;
825 default:
826 break;
829 return 0;
832 /* Try to compute the value of a unary operation CODE whose output mode is to
833 be MODE with input operand OP whose mode was originally OP_MODE.
834 Return zero if the value cannot be computed. */
836 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
837 rtx op, enum machine_mode op_mode)
839 unsigned int width = GET_MODE_BITSIZE (mode);
841 if (code == VEC_DUPLICATE)
843 gcc_assert (VECTOR_MODE_P (mode));
844 if (GET_MODE (op) != VOIDmode)
846 if (!VECTOR_MODE_P (GET_MODE (op)))
847 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
848 else
849 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
850 (GET_MODE (op)));
852 if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
853 || GET_CODE (op) == CONST_VECTOR)
855 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
856 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
857 rtvec v = rtvec_alloc (n_elts);
858 unsigned int i;
860 if (GET_CODE (op) != CONST_VECTOR)
861 for (i = 0; i < n_elts; i++)
862 RTVEC_ELT (v, i) = op;
863 else
865 enum machine_mode inmode = GET_MODE (op);
866 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
867 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
869 gcc_assert (in_n_elts < n_elts);
870 gcc_assert ((n_elts % in_n_elts) == 0);
871 for (i = 0; i < n_elts; i++)
872 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
874 return gen_rtx_CONST_VECTOR (mode, v);
878 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
880 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
881 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
882 enum machine_mode opmode = GET_MODE (op);
883 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
884 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
885 rtvec v = rtvec_alloc (n_elts);
886 unsigned int i;
888 gcc_assert (op_n_elts == n_elts);
889 for (i = 0; i < n_elts; i++)
891 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
892 CONST_VECTOR_ELT (op, i),
893 GET_MODE_INNER (opmode));
894 if (!x)
895 return 0;
896 RTVEC_ELT (v, i) = x;
898 return gen_rtx_CONST_VECTOR (mode, v);
901 /* The order of these tests is critical so that, for example, we don't
902 check the wrong mode (input vs. output) for a conversion operation,
903 such as FIX. At some point, this should be simplified. */
905 if (code == FLOAT && GET_MODE (op) == VOIDmode
906 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
908 HOST_WIDE_INT hv, lv;
909 REAL_VALUE_TYPE d;
911 if (GET_CODE (op) == CONST_INT)
912 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
913 else
914 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
916 REAL_VALUE_FROM_INT (d, lv, hv, mode);
917 d = real_value_truncate (mode, d);
918 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
920 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
921 && (GET_CODE (op) == CONST_DOUBLE
922 || GET_CODE (op) == CONST_INT))
924 HOST_WIDE_INT hv, lv;
925 REAL_VALUE_TYPE d;
927 if (GET_CODE (op) == CONST_INT)
928 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
929 else
930 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
932 if (op_mode == VOIDmode)
934 /* We don't know how to interpret negative-looking numbers in
935 this case, so don't try to fold those. */
936 if (hv < 0)
937 return 0;
939 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
941 else
942 hv = 0, lv &= GET_MODE_MASK (op_mode);
944 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
945 d = real_value_truncate (mode, d);
946 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
949 if (GET_CODE (op) == CONST_INT
950 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
952 HOST_WIDE_INT arg0 = INTVAL (op);
953 HOST_WIDE_INT val;
955 switch (code)
957 case NOT:
958 val = ~ arg0;
959 break;
961 case NEG:
962 val = - arg0;
963 break;
965 case ABS:
966 val = (arg0 >= 0 ? arg0 : - arg0);
967 break;
969 case FFS:
970 /* Don't use ffs here. Instead, get low order bit and then its
971 number. If arg0 is zero, this will return 0, as desired. */
972 arg0 &= GET_MODE_MASK (mode);
973 val = exact_log2 (arg0 & (- arg0)) + 1;
974 break;
976 case CLZ:
977 arg0 &= GET_MODE_MASK (mode);
978 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
980 else
981 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
982 break;
984 case CTZ:
985 arg0 &= GET_MODE_MASK (mode);
986 if (arg0 == 0)
988 /* Even if the value at zero is undefined, we have to come
989 up with some replacement. Seems good enough. */
990 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
991 val = GET_MODE_BITSIZE (mode);
993 else
994 val = exact_log2 (arg0 & -arg0);
995 break;
997 case POPCOUNT:
998 arg0 &= GET_MODE_MASK (mode);
999 val = 0;
1000 while (arg0)
1001 val++, arg0 &= arg0 - 1;
1002 break;
1004 case PARITY:
1005 arg0 &= GET_MODE_MASK (mode);
1006 val = 0;
1007 while (arg0)
1008 val++, arg0 &= arg0 - 1;
1009 val &= 1;
1010 break;
1012 case TRUNCATE:
1013 val = arg0;
1014 break;
1016 case ZERO_EXTEND:
1017 /* When zero-extending a CONST_INT, we need to know its
1018 original mode. */
1019 gcc_assert (op_mode != VOIDmode);
1020 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1022 /* If we were really extending the mode,
1023 we would have to distinguish between zero-extension
1024 and sign-extension. */
1025 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1026 val = arg0;
1028 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1029 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1030 else
1031 return 0;
1032 break;
1034 case SIGN_EXTEND:
1035 if (op_mode == VOIDmode)
1036 op_mode = mode;
1037 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1039 /* If we were really extending the mode,
1040 we would have to distinguish between zero-extension
1041 and sign-extension. */
1042 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1043 val = arg0;
1045 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1048 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1049 if (val
1050 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1051 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1053 else
1054 return 0;
1055 break;
1057 case SQRT:
1058 case FLOAT_EXTEND:
1059 case FLOAT_TRUNCATE:
1060 case SS_TRUNCATE:
1061 case US_TRUNCATE:
1062 return 0;
1064 default:
1065 gcc_unreachable ();
1068 return gen_int_mode (val, mode);
1071 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1072 for a DImode operation on a CONST_INT. */
1073 else if (GET_MODE (op) == VOIDmode
1074 && width <= HOST_BITS_PER_WIDE_INT * 2
1075 && (GET_CODE (op) == CONST_DOUBLE
1076 || GET_CODE (op) == CONST_INT))
1078 unsigned HOST_WIDE_INT l1, lv;
1079 HOST_WIDE_INT h1, hv;
1081 if (GET_CODE (op) == CONST_DOUBLE)
1082 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1083 else
1084 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1086 switch (code)
1088 case NOT:
1089 lv = ~ l1;
1090 hv = ~ h1;
1091 break;
1093 case NEG:
1094 neg_double (l1, h1, &lv, &hv);
1095 break;
1097 case ABS:
1098 if (h1 < 0)
1099 neg_double (l1, h1, &lv, &hv);
1100 else
1101 lv = l1, hv = h1;
1102 break;
1104 case FFS:
1105 hv = 0;
1106 if (l1 == 0)
1108 if (h1 == 0)
1109 lv = 0;
1110 else
1111 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1113 else
1114 lv = exact_log2 (l1 & -l1) + 1;
1115 break;
1117 case CLZ:
1118 hv = 0;
1119 if (h1 != 0)
1120 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1121 - HOST_BITS_PER_WIDE_INT;
1122 else if (l1 != 0)
1123 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1124 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1125 lv = GET_MODE_BITSIZE (mode);
1126 break;
1128 case CTZ:
1129 hv = 0;
1130 if (l1 != 0)
1131 lv = exact_log2 (l1 & -l1);
1132 else if (h1 != 0)
1133 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1134 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1135 lv = GET_MODE_BITSIZE (mode);
1136 break;
1138 case POPCOUNT:
1139 hv = 0;
1140 lv = 0;
1141 while (l1)
1142 lv++, l1 &= l1 - 1;
1143 while (h1)
1144 lv++, h1 &= h1 - 1;
1145 break;
1147 case PARITY:
1148 hv = 0;
1149 lv = 0;
1150 while (l1)
1151 lv++, l1 &= l1 - 1;
1152 while (h1)
1153 lv++, h1 &= h1 - 1;
1154 lv &= 1;
1155 break;
1157 case TRUNCATE:
1158 /* This is just a change-of-mode, so do nothing. */
1159 lv = l1, hv = h1;
1160 break;
1162 case ZERO_EXTEND:
1163 gcc_assert (op_mode != VOIDmode);
1165 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1166 return 0;
1168 hv = 0;
1169 lv = l1 & GET_MODE_MASK (op_mode);
1170 break;
1172 case SIGN_EXTEND:
1173 if (op_mode == VOIDmode
1174 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1175 return 0;
1176 else
1178 lv = l1 & GET_MODE_MASK (op_mode);
1179 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1180 && (lv & ((HOST_WIDE_INT) 1
1181 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1182 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1184 hv = HWI_SIGN_EXTEND (lv);
1186 break;
1188 case SQRT:
1189 return 0;
1191 default:
1192 return 0;
1195 return immed_double_const (lv, hv, mode);
1198 else if (GET_CODE (op) == CONST_DOUBLE
1199 && SCALAR_FLOAT_MODE_P (mode))
1201 REAL_VALUE_TYPE d, t;
1202 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1204 switch (code)
1206 case SQRT:
1207 if (HONOR_SNANS (mode) && real_isnan (&d))
1208 return 0;
1209 real_sqrt (&t, mode, &d);
1210 d = t;
1211 break;
1212 case ABS:
1213 d = REAL_VALUE_ABS (d);
1214 break;
1215 case NEG:
1216 d = REAL_VALUE_NEGATE (d);
1217 break;
1218 case FLOAT_TRUNCATE:
1219 d = real_value_truncate (mode, d);
1220 break;
1221 case FLOAT_EXTEND:
1222 /* All this does is change the mode. */
1223 break;
1224 case FIX:
1225 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1226 break;
1227 case NOT:
1229 long tmp[4];
1230 int i;
1232 real_to_target (tmp, &d, GET_MODE (op));
1233 for (i = 0; i < 4; i++)
1234 tmp[i] = ~tmp[i];
1235 real_from_target (&d, tmp, mode);
1236 break;
1238 default:
1239 gcc_unreachable ();
1241 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1244 else if (GET_CODE (op) == CONST_DOUBLE
1245 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1246 && GET_MODE_CLASS (mode) == MODE_INT
1247 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1249 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1250 operators are intentionally left unspecified (to ease implementation
1251 by target backends), for consistency, this routine implements the
1252 same semantics for constant folding as used by the middle-end. */
1254 /* This was formerly used only for non-IEEE float.
1255 eggert@twinsun.com says it is safe for IEEE also. */
1256 HOST_WIDE_INT xh, xl, th, tl;
1257 REAL_VALUE_TYPE x, t;
1258 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1259 switch (code)
1261 case FIX:
1262 if (REAL_VALUE_ISNAN (x))
1263 return const0_rtx;
1265 /* Test against the signed upper bound. */
1266 if (width > HOST_BITS_PER_WIDE_INT)
1268 th = ((unsigned HOST_WIDE_INT) 1
1269 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1270 tl = -1;
1272 else
1274 th = 0;
1275 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1277 real_from_integer (&t, VOIDmode, tl, th, 0);
1278 if (REAL_VALUES_LESS (t, x))
1280 xh = th;
1281 xl = tl;
1282 break;
1285 /* Test against the signed lower bound. */
1286 if (width > HOST_BITS_PER_WIDE_INT)
1288 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1289 tl = 0;
1291 else
1293 th = -1;
1294 tl = (HOST_WIDE_INT) -1 << (width - 1);
1296 real_from_integer (&t, VOIDmode, tl, th, 0);
1297 if (REAL_VALUES_LESS (x, t))
1299 xh = th;
1300 xl = tl;
1301 break;
1303 REAL_VALUE_TO_INT (&xl, &xh, x);
1304 break;
1306 case UNSIGNED_FIX:
1307 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1308 return const0_rtx;
1310 /* Test against the unsigned upper bound. */
1311 if (width == 2*HOST_BITS_PER_WIDE_INT)
1313 th = -1;
1314 tl = -1;
1316 else if (width >= HOST_BITS_PER_WIDE_INT)
1318 th = ((unsigned HOST_WIDE_INT) 1
1319 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1320 tl = -1;
1322 else
1324 th = 0;
1325 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1327 real_from_integer (&t, VOIDmode, tl, th, 1);
1328 if (REAL_VALUES_LESS (t, x))
1330 xh = th;
1331 xl = tl;
1332 break;
1335 REAL_VALUE_TO_INT (&xl, &xh, x);
1336 break;
1338 default:
1339 gcc_unreachable ();
1341 return immed_double_const (xl, xh, mode);
1344 return NULL_RTX;
1347 /* Subroutine of simplify_binary_operation to simplify a commutative,
1348 associative binary operation CODE with result mode MODE, operating
1349 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1350 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1351 canonicalization is possible. */
1353 static rtx
1354 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1355 rtx op0, rtx op1)
1357 rtx tem;
1359 /* Linearize the operator to the left. */
1360 if (GET_CODE (op1) == code)
1362 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1363 if (GET_CODE (op0) == code)
1365 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1366 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1369 /* "a op (b op c)" becomes "(b op c) op a". */
1370 if (! swap_commutative_operands_p (op1, op0))
1371 return simplify_gen_binary (code, mode, op1, op0);
1373 tem = op0;
1374 op0 = op1;
1375 op1 = tem;
1378 if (GET_CODE (op0) == code)
1380 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1381 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1383 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1384 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1387 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1388 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1389 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1390 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1391 if (tem != 0)
1392 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1394 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1395 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1396 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1397 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1398 if (tem != 0)
1399 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1402 return 0;
1406 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1407 and OP1. Return 0 if no simplification is possible.
1409 Don't use this for relational operations such as EQ or LT.
1410 Use simplify_relational_operation instead. */
1412 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1413 rtx op0, rtx op1)
1415 rtx trueop0, trueop1;
1416 rtx tem;
1418 /* Relational operations don't work here. We must know the mode
1419 of the operands in order to do the comparison correctly.
1420 Assuming a full word can give incorrect results.
1421 Consider comparing 128 with -128 in QImode. */
1422 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1423 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1425 /* Make sure the constant is second. */
1426 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1427 && swap_commutative_operands_p (op0, op1))
1429 tem = op0, op0 = op1, op1 = tem;
1432 trueop0 = avoid_constant_pool_reference (op0);
1433 trueop1 = avoid_constant_pool_reference (op1);
1435 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1436 if (tem)
1437 return tem;
1438 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1441 static rtx
1442 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1443 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1445 rtx tem, reversed, opleft, opright;
1446 HOST_WIDE_INT val;
1447 unsigned int width = GET_MODE_BITSIZE (mode);
1449 /* Even if we can't compute a constant result,
1450 there are some cases worth simplifying. */
1452 switch (code)
1454 case PLUS:
1455 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1456 when x is NaN, infinite, or finite and nonzero. They aren't
1457 when x is -0 and the rounding mode is not towards -infinity,
1458 since (-0) + 0 is then 0. */
1459 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1460 return op0;
1462 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1463 transformations are safe even for IEEE. */
1464 if (GET_CODE (op0) == NEG)
1465 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1466 else if (GET_CODE (op1) == NEG)
1467 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1469 /* (~a) + 1 -> -a */
1470 if (INTEGRAL_MODE_P (mode)
1471 && GET_CODE (op0) == NOT
1472 && trueop1 == const1_rtx)
1473 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1475 /* Handle both-operands-constant cases. We can only add
1476 CONST_INTs to constants since the sum of relocatable symbols
1477 can't be handled by most assemblers. Don't add CONST_INT
1478 to CONST_INT since overflow won't be computed properly if wider
1479 than HOST_BITS_PER_WIDE_INT. */
1481 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1482 && GET_CODE (op1) == CONST_INT)
1483 return plus_constant (op0, INTVAL (op1));
1484 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1485 && GET_CODE (op0) == CONST_INT)
1486 return plus_constant (op1, INTVAL (op0));
1488 /* See if this is something like X * C - X or vice versa or
1489 if the multiplication is written as a shift. If so, we can
1490 distribute and make a new multiply, shift, or maybe just
1491 have X (if C is 2 in the example above). But don't make
1492 something more expensive than we had before. */
1494 if (SCALAR_INT_MODE_P (mode))
1496 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1497 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1498 rtx lhs = op0, rhs = op1;
1500 if (GET_CODE (lhs) == NEG)
1502 coeff0l = -1;
1503 coeff0h = -1;
1504 lhs = XEXP (lhs, 0);
1506 else if (GET_CODE (lhs) == MULT
1507 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1509 coeff0l = INTVAL (XEXP (lhs, 1));
1510 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1511 lhs = XEXP (lhs, 0);
1513 else if (GET_CODE (lhs) == ASHIFT
1514 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1515 && INTVAL (XEXP (lhs, 1)) >= 0
1516 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1518 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1519 coeff0h = 0;
1520 lhs = XEXP (lhs, 0);
1523 if (GET_CODE (rhs) == NEG)
1525 coeff1l = -1;
1526 coeff1h = -1;
1527 rhs = XEXP (rhs, 0);
1529 else if (GET_CODE (rhs) == MULT
1530 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1532 coeff1l = INTVAL (XEXP (rhs, 1));
1533 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1534 rhs = XEXP (rhs, 0);
1536 else if (GET_CODE (rhs) == ASHIFT
1537 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1538 && INTVAL (XEXP (rhs, 1)) >= 0
1539 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1541 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1542 coeff1h = 0;
1543 rhs = XEXP (rhs, 0);
1546 if (rtx_equal_p (lhs, rhs))
1548 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1549 rtx coeff;
1550 unsigned HOST_WIDE_INT l;
1551 HOST_WIDE_INT h;
1553 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1554 coeff = immed_double_const (l, h, mode);
1556 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1557 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1558 ? tem : 0;
1562 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1563 if ((GET_CODE (op1) == CONST_INT
1564 || GET_CODE (op1) == CONST_DOUBLE)
1565 && GET_CODE (op0) == XOR
1566 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1567 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1568 && mode_signbit_p (mode, op1))
1569 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1570 simplify_gen_binary (XOR, mode, op1,
1571 XEXP (op0, 1)));
1573 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1574 if (GET_CODE (op0) == MULT
1575 && GET_CODE (XEXP (op0, 0)) == NEG)
1577 rtx in1, in2;
1579 in1 = XEXP (XEXP (op0, 0), 0);
1580 in2 = XEXP (op0, 1);
1581 return simplify_gen_binary (MINUS, mode, op1,
1582 simplify_gen_binary (MULT, mode,
1583 in1, in2));
1586 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1587 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1588 is 1. */
1589 if (COMPARISON_P (op0)
1590 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1591 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1592 && (reversed = reversed_comparison (op0, mode)))
1593 return
1594 simplify_gen_unary (NEG, mode, reversed, mode);
1596 /* If one of the operands is a PLUS or a MINUS, see if we can
1597 simplify this by the associative law.
1598 Don't use the associative law for floating point.
1599 The inaccuracy makes it nonassociative,
1600 and subtle programs can break if operations are associated. */
1602 if (INTEGRAL_MODE_P (mode)
1603 && (plus_minus_operand_p (op0)
1604 || plus_minus_operand_p (op1))
1605 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1606 return tem;
1608 /* Reassociate floating point addition only when the user
1609 specifies unsafe math optimizations. */
1610 if (FLOAT_MODE_P (mode)
1611 && flag_unsafe_math_optimizations)
1613 tem = simplify_associative_operation (code, mode, op0, op1);
1614 if (tem)
1615 return tem;
1617 break;
1619 case COMPARE:
1620 #ifdef HAVE_cc0
1621 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1622 using cc0, in which case we want to leave it as a COMPARE
1623 so we can distinguish it from a register-register-copy.
1625 In IEEE floating point, x-0 is not the same as x. */
1627 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1628 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1629 && trueop1 == CONST0_RTX (mode))
1630 return op0;
1631 #endif
1633 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1634 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1635 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1636 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1638 rtx xop00 = XEXP (op0, 0);
1639 rtx xop10 = XEXP (op1, 0);
1641 #ifdef HAVE_cc0
1642 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1643 #else
1644 if (REG_P (xop00) && REG_P (xop10)
1645 && GET_MODE (xop00) == GET_MODE (xop10)
1646 && REGNO (xop00) == REGNO (xop10)
1647 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1648 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1649 #endif
1650 return xop00;
1652 break;
1654 case MINUS:
1655 /* We can't assume x-x is 0 even with non-IEEE floating point,
1656 but since it is zero except in very strange circumstances, we
1657 will treat it as zero with -funsafe-math-optimizations. */
1658 if (rtx_equal_p (trueop0, trueop1)
1659 && ! side_effects_p (op0)
1660 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1661 return CONST0_RTX (mode);
1663 /* Change subtraction from zero into negation. (0 - x) is the
1664 same as -x when x is NaN, infinite, or finite and nonzero.
1665 But if the mode has signed zeros, and does not round towards
1666 -infinity, then 0 - 0 is 0, not -0. */
1667 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1668 return simplify_gen_unary (NEG, mode, op1, mode);
1670 /* (-1 - a) is ~a. */
1671 if (trueop0 == constm1_rtx)
1672 return simplify_gen_unary (NOT, mode, op1, mode);
1674 /* Subtracting 0 has no effect unless the mode has signed zeros
1675 and supports rounding towards -infinity. In such a case,
1676 0 - 0 is -0. */
1677 if (!(HONOR_SIGNED_ZEROS (mode)
1678 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1679 && trueop1 == CONST0_RTX (mode))
1680 return op0;
1682 /* See if this is something like X * C - X or vice versa or
1683 if the multiplication is written as a shift. If so, we can
1684 distribute and make a new multiply, shift, or maybe just
1685 have X (if C is 2 in the example above). But don't make
1686 something more expensive than we had before. */
1688 if (SCALAR_INT_MODE_P (mode))
1690 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1691 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1692 rtx lhs = op0, rhs = op1;
1694 if (GET_CODE (lhs) == NEG)
1696 coeff0l = -1;
1697 coeff0h = -1;
1698 lhs = XEXP (lhs, 0);
1700 else if (GET_CODE (lhs) == MULT
1701 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1703 coeff0l = INTVAL (XEXP (lhs, 1));
1704 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1705 lhs = XEXP (lhs, 0);
1707 else if (GET_CODE (lhs) == ASHIFT
1708 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1709 && INTVAL (XEXP (lhs, 1)) >= 0
1710 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1712 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1713 coeff0h = 0;
1714 lhs = XEXP (lhs, 0);
1717 if (GET_CODE (rhs) == NEG)
1719 negcoeff1l = 1;
1720 negcoeff1h = 0;
1721 rhs = XEXP (rhs, 0);
1723 else if (GET_CODE (rhs) == MULT
1724 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1726 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1727 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1728 rhs = XEXP (rhs, 0);
1730 else if (GET_CODE (rhs) == ASHIFT
1731 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1732 && INTVAL (XEXP (rhs, 1)) >= 0
1733 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1735 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1736 negcoeff1h = -1;
1737 rhs = XEXP (rhs, 0);
1740 if (rtx_equal_p (lhs, rhs))
1742 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1743 rtx coeff;
1744 unsigned HOST_WIDE_INT l;
1745 HOST_WIDE_INT h;
1747 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1748 coeff = immed_double_const (l, h, mode);
1750 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1751 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1752 ? tem : 0;
1756 /* (a - (-b)) -> (a + b). True even for IEEE. */
1757 if (GET_CODE (op1) == NEG)
1758 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1760 /* (-x - c) may be simplified as (-c - x). */
1761 if (GET_CODE (op0) == NEG
1762 && (GET_CODE (op1) == CONST_INT
1763 || GET_CODE (op1) == CONST_DOUBLE))
1765 tem = simplify_unary_operation (NEG, mode, op1, mode);
1766 if (tem)
1767 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1770 /* Don't let a relocatable value get a negative coeff. */
1771 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1772 return simplify_gen_binary (PLUS, mode,
1773 op0,
1774 neg_const_int (mode, op1));
1776 /* (x - (x & y)) -> (x & ~y) */
1777 if (GET_CODE (op1) == AND)
1779 if (rtx_equal_p (op0, XEXP (op1, 0)))
1781 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1782 GET_MODE (XEXP (op1, 1)));
1783 return simplify_gen_binary (AND, mode, op0, tem);
1785 if (rtx_equal_p (op0, XEXP (op1, 1)))
1787 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1788 GET_MODE (XEXP (op1, 0)));
1789 return simplify_gen_binary (AND, mode, op0, tem);
1793 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1794 by reversing the comparison code if valid. */
1795 if (STORE_FLAG_VALUE == 1
1796 && trueop0 == const1_rtx
1797 && COMPARISON_P (op1)
1798 && (reversed = reversed_comparison (op1, mode)))
1799 return reversed;
1801 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
1802 if (GET_CODE (op1) == MULT
1803 && GET_CODE (XEXP (op1, 0)) == NEG)
1805 rtx in1, in2;
1807 in1 = XEXP (XEXP (op1, 0), 0);
1808 in2 = XEXP (op1, 1);
1809 return simplify_gen_binary (PLUS, mode,
1810 simplify_gen_binary (MULT, mode,
1811 in1, in2),
1812 op0);
1815 /* Canonicalize (minus (neg A) (mult B C)) to
1816 (minus (mult (neg B) C) A). */
1817 if (GET_CODE (op1) == MULT
1818 && GET_CODE (op0) == NEG)
1820 rtx in1, in2;
1822 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
1823 in2 = XEXP (op1, 1);
1824 return simplify_gen_binary (MINUS, mode,
1825 simplify_gen_binary (MULT, mode,
1826 in1, in2),
1827 XEXP (op0, 0));
1830 /* If one of the operands is a PLUS or a MINUS, see if we can
1831 simplify this by the associative law. This will, for example,
1832 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1833 Don't use the associative law for floating point.
1834 The inaccuracy makes it nonassociative,
1835 and subtle programs can break if operations are associated. */
1837 if (INTEGRAL_MODE_P (mode)
1838 && (plus_minus_operand_p (op0)
1839 || plus_minus_operand_p (op1))
1840 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1841 return tem;
1842 break;
1844 case MULT:
1845 if (trueop1 == constm1_rtx)
1846 return simplify_gen_unary (NEG, mode, op0, mode);
1848 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1849 x is NaN, since x * 0 is then also NaN. Nor is it valid
1850 when the mode has signed zeros, since multiplying a negative
1851 number by 0 will give -0, not 0. */
1852 if (!HONOR_NANS (mode)
1853 && !HONOR_SIGNED_ZEROS (mode)
1854 && trueop1 == CONST0_RTX (mode)
1855 && ! side_effects_p (op0))
1856 return op1;
1858 /* In IEEE floating point, x*1 is not equivalent to x for
1859 signalling NaNs. */
1860 if (!HONOR_SNANS (mode)
1861 && trueop1 == CONST1_RTX (mode))
1862 return op0;
1864 /* Convert multiply by constant power of two into shift unless
1865 we are still generating RTL. This test is a kludge. */
1866 if (GET_CODE (trueop1) == CONST_INT
1867 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1868 /* If the mode is larger than the host word size, and the
1869 uppermost bit is set, then this isn't a power of two due
1870 to implicit sign extension. */
1871 && (width <= HOST_BITS_PER_WIDE_INT
1872 || val != HOST_BITS_PER_WIDE_INT - 1))
1873 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1875 /* Likewise for multipliers wider than a word. */
1876 else if (GET_CODE (trueop1) == CONST_DOUBLE
1877 && (GET_MODE (trueop1) == VOIDmode
1878 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
1879 && GET_MODE (op0) == mode
1880 && CONST_DOUBLE_LOW (trueop1) == 0
1881 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
1882 return simplify_gen_binary (ASHIFT, mode, op0,
1883 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
1885 /* x*2 is x+x and x*(-1) is -x */
1886 if (GET_CODE (trueop1) == CONST_DOUBLE
1887 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
1888 && GET_MODE (op0) == mode)
1890 REAL_VALUE_TYPE d;
1891 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1893 if (REAL_VALUES_EQUAL (d, dconst2))
1894 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1896 if (REAL_VALUES_EQUAL (d, dconstm1))
1897 return simplify_gen_unary (NEG, mode, op0, mode);
1900 /* Reassociate multiplication, but for floating point MULTs
1901 only when the user specifies unsafe math optimizations. */
1902 if (! FLOAT_MODE_P (mode)
1903 || flag_unsafe_math_optimizations)
1905 tem = simplify_associative_operation (code, mode, op0, op1);
1906 if (tem)
1907 return tem;
1909 break;
1911 case IOR:
1912 if (trueop1 == const0_rtx)
1913 return op0;
1914 if (GET_CODE (trueop1) == CONST_INT
1915 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1916 == GET_MODE_MASK (mode)))
1917 return op1;
1918 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1919 return op0;
1920 /* A | (~A) -> -1 */
1921 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1922 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1923 && ! side_effects_p (op0)
1924 && SCALAR_INT_MODE_P (mode))
1925 return constm1_rtx;
1927 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
1928 if (GET_CODE (op1) == CONST_INT
1929 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1930 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
1931 return op1;
1933 /* Convert (A & B) | A to A. */
1934 if (GET_CODE (op0) == AND
1935 && (rtx_equal_p (XEXP (op0, 0), op1)
1936 || rtx_equal_p (XEXP (op0, 1), op1))
1937 && ! side_effects_p (XEXP (op0, 0))
1938 && ! side_effects_p (XEXP (op0, 1)))
1939 return op1;
1941 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
1942 mode size to (rotate A CX). */
1944 if (GET_CODE (op1) == ASHIFT
1945 || GET_CODE (op1) == SUBREG)
1947 opleft = op1;
1948 opright = op0;
1950 else
1952 opright = op1;
1953 opleft = op0;
1956 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
1957 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
1958 && GET_CODE (XEXP (opleft, 1)) == CONST_INT
1959 && GET_CODE (XEXP (opright, 1)) == CONST_INT
1960 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
1961 == GET_MODE_BITSIZE (mode)))
1962 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
1964 /* Same, but for ashift that has been "simplified" to a wider mode
1965 by simplify_shift_const. */
1967 if (GET_CODE (opleft) == SUBREG
1968 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
1969 && GET_CODE (opright) == LSHIFTRT
1970 && GET_CODE (XEXP (opright, 0)) == SUBREG
1971 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
1972 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
1973 && (GET_MODE_SIZE (GET_MODE (opleft))
1974 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
1975 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
1976 SUBREG_REG (XEXP (opright, 0)))
1977 && GET_CODE (XEXP (SUBREG_REG (opleft), 1)) == CONST_INT
1978 && GET_CODE (XEXP (opright, 1)) == CONST_INT
1979 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
1980 == GET_MODE_BITSIZE (mode)))
1981 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
1982 XEXP (SUBREG_REG (opleft), 1));
1984 /* If we have (ior (and (X C1) C2)), simplify this by making
1985 C1 as small as possible if C1 actually changes. */
1986 if (GET_CODE (op1) == CONST_INT
1987 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1988 || INTVAL (op1) > 0)
1989 && GET_CODE (op0) == AND
1990 && GET_CODE (XEXP (op0, 1)) == CONST_INT
1991 && GET_CODE (op1) == CONST_INT
1992 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
1993 return simplify_gen_binary (IOR, mode,
1994 simplify_gen_binary
1995 (AND, mode, XEXP (op0, 0),
1996 GEN_INT (INTVAL (XEXP (op0, 1))
1997 & ~INTVAL (op1))),
1998 op1);
2000 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2001 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2002 the PLUS does not affect any of the bits in OP1: then we can do
2003 the IOR as a PLUS and we can associate. This is valid if OP1
2004 can be safely shifted left C bits. */
2005 if (GET_CODE (trueop1) == CONST_INT && GET_CODE (op0) == ASHIFTRT
2006 && GET_CODE (XEXP (op0, 0)) == PLUS
2007 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
2008 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2009 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2011 int count = INTVAL (XEXP (op0, 1));
2012 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2014 if (mask >> count == INTVAL (trueop1)
2015 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2016 return simplify_gen_binary (ASHIFTRT, mode,
2017 plus_constant (XEXP (op0, 0), mask),
2018 XEXP (op0, 1));
2021 tem = simplify_associative_operation (code, mode, op0, op1);
2022 if (tem)
2023 return tem;
2024 break;
2026 case XOR:
2027 if (trueop1 == const0_rtx)
2028 return op0;
2029 if (GET_CODE (trueop1) == CONST_INT
2030 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2031 == GET_MODE_MASK (mode)))
2032 return simplify_gen_unary (NOT, mode, op0, mode);
2033 if (rtx_equal_p (trueop0, trueop1)
2034 && ! side_effects_p (op0)
2035 && GET_MODE_CLASS (mode) != MODE_CC)
2036 return CONST0_RTX (mode);
2038 /* Canonicalize XOR of the most significant bit to PLUS. */
2039 if ((GET_CODE (op1) == CONST_INT
2040 || GET_CODE (op1) == CONST_DOUBLE)
2041 && mode_signbit_p (mode, op1))
2042 return simplify_gen_binary (PLUS, mode, op0, op1);
2043 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2044 if ((GET_CODE (op1) == CONST_INT
2045 || GET_CODE (op1) == CONST_DOUBLE)
2046 && GET_CODE (op0) == PLUS
2047 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
2048 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2049 && mode_signbit_p (mode, XEXP (op0, 1)))
2050 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2051 simplify_gen_binary (XOR, mode, op1,
2052 XEXP (op0, 1)));
2054 /* If we are XORing two things that have no bits in common,
2055 convert them into an IOR. This helps to detect rotation encoded
2056 using those methods and possibly other simplifications. */
2058 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2059 && (nonzero_bits (op0, mode)
2060 & nonzero_bits (op1, mode)) == 0)
2061 return (simplify_gen_binary (IOR, mode, op0, op1));
2063 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2064 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2065 (NOT y). */
2067 int num_negated = 0;
2069 if (GET_CODE (op0) == NOT)
2070 num_negated++, op0 = XEXP (op0, 0);
2071 if (GET_CODE (op1) == NOT)
2072 num_negated++, op1 = XEXP (op1, 0);
2074 if (num_negated == 2)
2075 return simplify_gen_binary (XOR, mode, op0, op1);
2076 else if (num_negated == 1)
2077 return simplify_gen_unary (NOT, mode,
2078 simplify_gen_binary (XOR, mode, op0, op1),
2079 mode);
2082 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2083 correspond to a machine insn or result in further simplifications
2084 if B is a constant. */
2086 if (GET_CODE (op0) == AND
2087 && rtx_equal_p (XEXP (op0, 1), op1)
2088 && ! side_effects_p (op1))
2089 return simplify_gen_binary (AND, mode,
2090 simplify_gen_unary (NOT, mode,
2091 XEXP (op0, 0), mode),
2092 op1);
2094 else if (GET_CODE (op0) == AND
2095 && rtx_equal_p (XEXP (op0, 0), op1)
2096 && ! side_effects_p (op1))
2097 return simplify_gen_binary (AND, mode,
2098 simplify_gen_unary (NOT, mode,
2099 XEXP (op0, 1), mode),
2100 op1);
2102 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2103 comparison if STORE_FLAG_VALUE is 1. */
2104 if (STORE_FLAG_VALUE == 1
2105 && trueop1 == const1_rtx
2106 && COMPARISON_P (op0)
2107 && (reversed = reversed_comparison (op0, mode)))
2108 return reversed;
2110 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2111 is (lt foo (const_int 0)), so we can perform the above
2112 simplification if STORE_FLAG_VALUE is 1. */
2114 if (STORE_FLAG_VALUE == 1
2115 && trueop1 == const1_rtx
2116 && GET_CODE (op0) == LSHIFTRT
2117 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2118 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2119 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2121 /* (xor (comparison foo bar) (const_int sign-bit))
2122 when STORE_FLAG_VALUE is the sign bit. */
2123 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2124 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2125 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2126 && trueop1 == const_true_rtx
2127 && COMPARISON_P (op0)
2128 && (reversed = reversed_comparison (op0, mode)))
2129 return reversed;
2131 break;
2133 tem = simplify_associative_operation (code, mode, op0, op1);
2134 if (tem)
2135 return tem;
2136 break;
2138 case AND:
2139 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2140 return trueop1;
2141 /* If we are turning off bits already known off in OP0, we need
2142 not do an AND. */
2143 if (GET_CODE (trueop1) == CONST_INT
2144 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2145 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
2146 return op0;
2147 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2148 && GET_MODE_CLASS (mode) != MODE_CC)
2149 return op0;
2150 /* A & (~A) -> 0 */
2151 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2152 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2153 && ! side_effects_p (op0)
2154 && GET_MODE_CLASS (mode) != MODE_CC)
2155 return CONST0_RTX (mode);
2157 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2158 there are no nonzero bits of C outside of X's mode. */
2159 if ((GET_CODE (op0) == SIGN_EXTEND
2160 || GET_CODE (op0) == ZERO_EXTEND)
2161 && GET_CODE (trueop1) == CONST_INT
2162 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2163 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2164 & INTVAL (trueop1)) == 0)
2166 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2167 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2168 gen_int_mode (INTVAL (trueop1),
2169 imode));
2170 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2173 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2174 insn (and may simplify more). */
2175 if (GET_CODE (op0) == XOR
2176 && rtx_equal_p (XEXP (op0, 0), op1)
2177 && ! side_effects_p (op1))
2178 return simplify_gen_binary (AND, mode,
2179 simplify_gen_unary (NOT, mode,
2180 XEXP (op0, 1), mode),
2181 op1);
2183 if (GET_CODE (op0) == XOR
2184 && rtx_equal_p (XEXP (op0, 1), op1)
2185 && ! side_effects_p (op1))
2186 return simplify_gen_binary (AND, mode,
2187 simplify_gen_unary (NOT, mode,
2188 XEXP (op0, 0), mode),
2189 op1);
2191 /* Similarly for (~(A ^ B)) & A. */
2192 if (GET_CODE (op0) == NOT
2193 && GET_CODE (XEXP (op0, 0)) == XOR
2194 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2195 && ! side_effects_p (op1))
2196 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2198 if (GET_CODE (op0) == NOT
2199 && GET_CODE (XEXP (op0, 0)) == XOR
2200 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2201 && ! side_effects_p (op1))
2202 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2204 /* Convert (A | B) & A to A. */
2205 if (GET_CODE (op0) == IOR
2206 && (rtx_equal_p (XEXP (op0, 0), op1)
2207 || rtx_equal_p (XEXP (op0, 1), op1))
2208 && ! side_effects_p (XEXP (op0, 0))
2209 && ! side_effects_p (XEXP (op0, 1)))
2210 return op1;
2212 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2213 ((A & N) + B) & M -> (A + B) & M
2214 Similarly if (N & M) == 0,
2215 ((A | N) + B) & M -> (A + B) & M
2216 and for - instead of + and/or ^ instead of |. */
2217 if (GET_CODE (trueop1) == CONST_INT
2218 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2219 && ~INTVAL (trueop1)
2220 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2221 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2223 rtx pmop[2];
2224 int which;
2226 pmop[0] = XEXP (op0, 0);
2227 pmop[1] = XEXP (op0, 1);
2229 for (which = 0; which < 2; which++)
2231 tem = pmop[which];
2232 switch (GET_CODE (tem))
2234 case AND:
2235 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2236 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2237 == INTVAL (trueop1))
2238 pmop[which] = XEXP (tem, 0);
2239 break;
2240 case IOR:
2241 case XOR:
2242 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2243 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2244 pmop[which] = XEXP (tem, 0);
2245 break;
2246 default:
2247 break;
2251 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2253 tem = simplify_gen_binary (GET_CODE (op0), mode,
2254 pmop[0], pmop[1]);
2255 return simplify_gen_binary (code, mode, tem, op1);
2258 tem = simplify_associative_operation (code, mode, op0, op1);
2259 if (tem)
2260 return tem;
2261 break;
2263 case UDIV:
2264 /* 0/x is 0 (or x&0 if x has side-effects). */
2265 if (trueop0 == CONST0_RTX (mode))
2267 if (side_effects_p (op1))
2268 return simplify_gen_binary (AND, mode, op1, trueop0);
2269 return trueop0;
2271 /* x/1 is x. */
2272 if (trueop1 == CONST1_RTX (mode))
2273 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2274 /* Convert divide by power of two into shift. */
2275 if (GET_CODE (trueop1) == CONST_INT
2276 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2277 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2278 break;
2280 case DIV:
2281 /* Handle floating point and integers separately. */
2282 if (SCALAR_FLOAT_MODE_P (mode))
2284 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2285 safe for modes with NaNs, since 0.0 / 0.0 will then be
2286 NaN rather than 0.0. Nor is it safe for modes with signed
2287 zeros, since dividing 0 by a negative number gives -0.0 */
2288 if (trueop0 == CONST0_RTX (mode)
2289 && !HONOR_NANS (mode)
2290 && !HONOR_SIGNED_ZEROS (mode)
2291 && ! side_effects_p (op1))
2292 return op0;
2293 /* x/1.0 is x. */
2294 if (trueop1 == CONST1_RTX (mode)
2295 && !HONOR_SNANS (mode))
2296 return op0;
2298 if (GET_CODE (trueop1) == CONST_DOUBLE
2299 && trueop1 != CONST0_RTX (mode))
2301 REAL_VALUE_TYPE d;
2302 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2304 /* x/-1.0 is -x. */
2305 if (REAL_VALUES_EQUAL (d, dconstm1)
2306 && !HONOR_SNANS (mode))
2307 return simplify_gen_unary (NEG, mode, op0, mode);
2309 /* Change FP division by a constant into multiplication.
2310 Only do this with -funsafe-math-optimizations. */
2311 if (flag_unsafe_math_optimizations
2312 && !REAL_VALUES_EQUAL (d, dconst0))
2314 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2315 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2316 return simplify_gen_binary (MULT, mode, op0, tem);
2320 else
2322 /* 0/x is 0 (or x&0 if x has side-effects). */
2323 if (trueop0 == CONST0_RTX (mode))
2325 if (side_effects_p (op1))
2326 return simplify_gen_binary (AND, mode, op1, trueop0);
2327 return trueop0;
2329 /* x/1 is x. */
2330 if (trueop1 == CONST1_RTX (mode))
2331 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2332 /* x/-1 is -x. */
2333 if (trueop1 == constm1_rtx)
2335 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2336 return simplify_gen_unary (NEG, mode, x, mode);
2339 break;
2341 case UMOD:
2342 /* 0%x is 0 (or x&0 if x has side-effects). */
2343 if (trueop0 == CONST0_RTX (mode))
2345 if (side_effects_p (op1))
2346 return simplify_gen_binary (AND, mode, op1, trueop0);
2347 return trueop0;
2349 /* x%1 is 0 (of x&0 if x has side-effects). */
2350 if (trueop1 == CONST1_RTX (mode))
2352 if (side_effects_p (op0))
2353 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2354 return CONST0_RTX (mode);
2356 /* Implement modulus by power of two as AND. */
2357 if (GET_CODE (trueop1) == CONST_INT
2358 && exact_log2 (INTVAL (trueop1)) > 0)
2359 return simplify_gen_binary (AND, mode, op0,
2360 GEN_INT (INTVAL (op1) - 1));
2361 break;
2363 case MOD:
2364 /* 0%x is 0 (or x&0 if x has side-effects). */
2365 if (trueop0 == CONST0_RTX (mode))
2367 if (side_effects_p (op1))
2368 return simplify_gen_binary (AND, mode, op1, trueop0);
2369 return trueop0;
2371 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2372 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2374 if (side_effects_p (op0))
2375 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2376 return CONST0_RTX (mode);
2378 break;
2380 case ROTATERT:
2381 case ROTATE:
2382 case ASHIFTRT:
2383 /* Rotating ~0 always results in ~0. */
2384 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2385 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2386 && ! side_effects_p (op1))
2387 return op0;
2389 /* Fall through.... */
2391 case ASHIFT:
2392 case LSHIFTRT:
2393 if (trueop1 == CONST0_RTX (mode))
2394 return op0;
2395 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2396 return op0;
2397 break;
2399 case SMIN:
2400 if (width <= HOST_BITS_PER_WIDE_INT
2401 && GET_CODE (trueop1) == CONST_INT
2402 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2403 && ! side_effects_p (op0))
2404 return op1;
2405 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2406 return op0;
2407 tem = simplify_associative_operation (code, mode, op0, op1);
2408 if (tem)
2409 return tem;
2410 break;
2412 case SMAX:
2413 if (width <= HOST_BITS_PER_WIDE_INT
2414 && GET_CODE (trueop1) == CONST_INT
2415 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2416 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2417 && ! side_effects_p (op0))
2418 return op1;
2419 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2420 return op0;
2421 tem = simplify_associative_operation (code, mode, op0, op1);
2422 if (tem)
2423 return tem;
2424 break;
2426 case UMIN:
2427 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2428 return op1;
2429 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2430 return op0;
2431 tem = simplify_associative_operation (code, mode, op0, op1);
2432 if (tem)
2433 return tem;
2434 break;
2436 case UMAX:
2437 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2438 return op1;
2439 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2440 return op0;
2441 tem = simplify_associative_operation (code, mode, op0, op1);
2442 if (tem)
2443 return tem;
2444 break;
2446 case SS_PLUS:
2447 case US_PLUS:
2448 case SS_MINUS:
2449 case US_MINUS:
2450 /* ??? There are simplifications that can be done. */
2451 return 0;
2453 case VEC_SELECT:
2454 if (!VECTOR_MODE_P (mode))
2456 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2457 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2458 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2459 gcc_assert (XVECLEN (trueop1, 0) == 1);
2460 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2462 if (GET_CODE (trueop0) == CONST_VECTOR)
2463 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2464 (trueop1, 0, 0)));
2466 else
2468 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2469 gcc_assert (GET_MODE_INNER (mode)
2470 == GET_MODE_INNER (GET_MODE (trueop0)));
2471 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2473 if (GET_CODE (trueop0) == CONST_VECTOR)
2475 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2476 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2477 rtvec v = rtvec_alloc (n_elts);
2478 unsigned int i;
2480 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2481 for (i = 0; i < n_elts; i++)
2483 rtx x = XVECEXP (trueop1, 0, i);
2485 gcc_assert (GET_CODE (x) == CONST_INT);
2486 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2487 INTVAL (x));
2490 return gen_rtx_CONST_VECTOR (mode, v);
2494 if (XVECLEN (trueop1, 0) == 1
2495 && GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT
2496 && GET_CODE (trueop0) == VEC_CONCAT)
2498 rtx vec = trueop0;
2499 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2501 /* Try to find the element in the VEC_CONCAT. */
2502 while (GET_MODE (vec) != mode
2503 && GET_CODE (vec) == VEC_CONCAT)
2505 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2506 if (offset < vec_size)
2507 vec = XEXP (vec, 0);
2508 else
2510 offset -= vec_size;
2511 vec = XEXP (vec, 1);
2513 vec = avoid_constant_pool_reference (vec);
2516 if (GET_MODE (vec) == mode)
2517 return vec;
2520 return 0;
2521 case VEC_CONCAT:
2523 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2524 ? GET_MODE (trueop0)
2525 : GET_MODE_INNER (mode));
2526 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2527 ? GET_MODE (trueop1)
2528 : GET_MODE_INNER (mode));
2530 gcc_assert (VECTOR_MODE_P (mode));
2531 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2532 == GET_MODE_SIZE (mode));
2534 if (VECTOR_MODE_P (op0_mode))
2535 gcc_assert (GET_MODE_INNER (mode)
2536 == GET_MODE_INNER (op0_mode));
2537 else
2538 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2540 if (VECTOR_MODE_P (op1_mode))
2541 gcc_assert (GET_MODE_INNER (mode)
2542 == GET_MODE_INNER (op1_mode));
2543 else
2544 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2546 if ((GET_CODE (trueop0) == CONST_VECTOR
2547 || GET_CODE (trueop0) == CONST_INT
2548 || GET_CODE (trueop0) == CONST_DOUBLE)
2549 && (GET_CODE (trueop1) == CONST_VECTOR
2550 || GET_CODE (trueop1) == CONST_INT
2551 || GET_CODE (trueop1) == CONST_DOUBLE))
2553 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2554 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2555 rtvec v = rtvec_alloc (n_elts);
2556 unsigned int i;
2557 unsigned in_n_elts = 1;
2559 if (VECTOR_MODE_P (op0_mode))
2560 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2561 for (i = 0; i < n_elts; i++)
2563 if (i < in_n_elts)
2565 if (!VECTOR_MODE_P (op0_mode))
2566 RTVEC_ELT (v, i) = trueop0;
2567 else
2568 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2570 else
2572 if (!VECTOR_MODE_P (op1_mode))
2573 RTVEC_ELT (v, i) = trueop1;
2574 else
2575 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2576 i - in_n_elts);
2580 return gen_rtx_CONST_VECTOR (mode, v);
2583 return 0;
2585 default:
2586 gcc_unreachable ();
2589 return 0;
2593 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
2594 rtx op0, rtx op1)
2596 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
2597 HOST_WIDE_INT val;
2598 unsigned int width = GET_MODE_BITSIZE (mode);
2600 if (VECTOR_MODE_P (mode)
2601 && code != VEC_CONCAT
2602 && GET_CODE (op0) == CONST_VECTOR
2603 && GET_CODE (op1) == CONST_VECTOR)
2605 unsigned n_elts = GET_MODE_NUNITS (mode);
2606 enum machine_mode op0mode = GET_MODE (op0);
2607 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2608 enum machine_mode op1mode = GET_MODE (op1);
2609 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2610 rtvec v = rtvec_alloc (n_elts);
2611 unsigned int i;
2613 gcc_assert (op0_n_elts == n_elts);
2614 gcc_assert (op1_n_elts == n_elts);
2615 for (i = 0; i < n_elts; i++)
2617 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2618 CONST_VECTOR_ELT (op0, i),
2619 CONST_VECTOR_ELT (op1, i));
2620 if (!x)
2621 return 0;
2622 RTVEC_ELT (v, i) = x;
2625 return gen_rtx_CONST_VECTOR (mode, v);
2628 if (VECTOR_MODE_P (mode)
2629 && code == VEC_CONCAT
2630 && CONSTANT_P (op0) && CONSTANT_P (op1))
2632 unsigned n_elts = GET_MODE_NUNITS (mode);
2633 rtvec v = rtvec_alloc (n_elts);
2635 gcc_assert (n_elts >= 2);
2636 if (n_elts == 2)
2638 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2639 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2641 RTVEC_ELT (v, 0) = op0;
2642 RTVEC_ELT (v, 1) = op1;
2644 else
2646 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2647 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2648 unsigned i;
2650 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2651 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2652 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2654 for (i = 0; i < op0_n_elts; ++i)
2655 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2656 for (i = 0; i < op1_n_elts; ++i)
2657 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2660 return gen_rtx_CONST_VECTOR (mode, v);
2663 if (SCALAR_FLOAT_MODE_P (mode)
2664 && GET_CODE (op0) == CONST_DOUBLE
2665 && GET_CODE (op1) == CONST_DOUBLE
2666 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2668 if (code == AND
2669 || code == IOR
2670 || code == XOR)
2672 long tmp0[4];
2673 long tmp1[4];
2674 REAL_VALUE_TYPE r;
2675 int i;
2677 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
2678 GET_MODE (op0));
2679 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
2680 GET_MODE (op1));
2681 for (i = 0; i < 4; i++)
2683 switch (code)
2685 case AND:
2686 tmp0[i] &= tmp1[i];
2687 break;
2688 case IOR:
2689 tmp0[i] |= tmp1[i];
2690 break;
2691 case XOR:
2692 tmp0[i] ^= tmp1[i];
2693 break;
2694 default:
2695 gcc_unreachable ();
2698 real_from_target (&r, tmp0, mode);
2699 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
2701 else
2703 REAL_VALUE_TYPE f0, f1, value, result;
2704 bool inexact;
2706 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
2707 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
2708 real_convert (&f0, mode, &f0);
2709 real_convert (&f1, mode, &f1);
2711 if (HONOR_SNANS (mode)
2712 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
2713 return 0;
2715 if (code == DIV
2716 && REAL_VALUES_EQUAL (f1, dconst0)
2717 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
2718 return 0;
2720 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2721 && flag_trapping_math
2722 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
2724 int s0 = REAL_VALUE_NEGATIVE (f0);
2725 int s1 = REAL_VALUE_NEGATIVE (f1);
2727 switch (code)
2729 case PLUS:
2730 /* Inf + -Inf = NaN plus exception. */
2731 if (s0 != s1)
2732 return 0;
2733 break;
2734 case MINUS:
2735 /* Inf - Inf = NaN plus exception. */
2736 if (s0 == s1)
2737 return 0;
2738 break;
2739 case DIV:
2740 /* Inf / Inf = NaN plus exception. */
2741 return 0;
2742 default:
2743 break;
2747 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2748 && flag_trapping_math
2749 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
2750 || (REAL_VALUE_ISINF (f1)
2751 && REAL_VALUES_EQUAL (f0, dconst0))))
2752 /* Inf * 0 = NaN plus exception. */
2753 return 0;
2755 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
2756 &f0, &f1);
2757 real_convert (&result, mode, &value);
2759 /* Don't constant fold this floating point operation if
2760 the result has overflowed and flag_trapping_math. */
2762 if (flag_trapping_math
2763 && MODE_HAS_INFINITIES (mode)
2764 && REAL_VALUE_ISINF (result)
2765 && !REAL_VALUE_ISINF (f0)
2766 && !REAL_VALUE_ISINF (f1))
2767 /* Overflow plus exception. */
2768 return 0;
2770 /* Don't constant fold this floating point operation if the
2771 result may dependent upon the run-time rounding mode and
2772 flag_rounding_math is set, or if GCC's software emulation
2773 is unable to accurately represent the result. */
2775 if ((flag_rounding_math
2776 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
2777 && !flag_unsafe_math_optimizations))
2778 && (inexact || !real_identical (&result, &value)))
2779 return NULL_RTX;
2781 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
2785 /* We can fold some multi-word operations. */
2786 if (GET_MODE_CLASS (mode) == MODE_INT
2787 && width == HOST_BITS_PER_WIDE_INT * 2
2788 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
2789 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
2791 unsigned HOST_WIDE_INT l1, l2, lv, lt;
2792 HOST_WIDE_INT h1, h2, hv, ht;
2794 if (GET_CODE (op0) == CONST_DOUBLE)
2795 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
2796 else
2797 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
2799 if (GET_CODE (op1) == CONST_DOUBLE)
2800 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
2801 else
2802 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
2804 switch (code)
2806 case MINUS:
2807 /* A - B == A + (-B). */
2808 neg_double (l2, h2, &lv, &hv);
2809 l2 = lv, h2 = hv;
2811 /* Fall through.... */
2813 case PLUS:
2814 add_double (l1, h1, l2, h2, &lv, &hv);
2815 break;
2817 case MULT:
2818 mul_double (l1, h1, l2, h2, &lv, &hv);
2819 break;
2821 case DIV:
2822 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2823 &lv, &hv, &lt, &ht))
2824 return 0;
2825 break;
2827 case MOD:
2828 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2829 &lt, &ht, &lv, &hv))
2830 return 0;
2831 break;
2833 case UDIV:
2834 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2835 &lv, &hv, &lt, &ht))
2836 return 0;
2837 break;
2839 case UMOD:
2840 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2841 &lt, &ht, &lv, &hv))
2842 return 0;
2843 break;
2845 case AND:
2846 lv = l1 & l2, hv = h1 & h2;
2847 break;
2849 case IOR:
2850 lv = l1 | l2, hv = h1 | h2;
2851 break;
2853 case XOR:
2854 lv = l1 ^ l2, hv = h1 ^ h2;
2855 break;
2857 case SMIN:
2858 if (h1 < h2
2859 || (h1 == h2
2860 && ((unsigned HOST_WIDE_INT) l1
2861 < (unsigned HOST_WIDE_INT) l2)))
2862 lv = l1, hv = h1;
2863 else
2864 lv = l2, hv = h2;
2865 break;
2867 case SMAX:
2868 if (h1 > h2
2869 || (h1 == h2
2870 && ((unsigned HOST_WIDE_INT) l1
2871 > (unsigned HOST_WIDE_INT) l2)))
2872 lv = l1, hv = h1;
2873 else
2874 lv = l2, hv = h2;
2875 break;
2877 case UMIN:
2878 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
2879 || (h1 == h2
2880 && ((unsigned HOST_WIDE_INT) l1
2881 < (unsigned HOST_WIDE_INT) l2)))
2882 lv = l1, hv = h1;
2883 else
2884 lv = l2, hv = h2;
2885 break;
2887 case UMAX:
2888 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
2889 || (h1 == h2
2890 && ((unsigned HOST_WIDE_INT) l1
2891 > (unsigned HOST_WIDE_INT) l2)))
2892 lv = l1, hv = h1;
2893 else
2894 lv = l2, hv = h2;
2895 break;
2897 case LSHIFTRT: case ASHIFTRT:
2898 case ASHIFT:
2899 case ROTATE: case ROTATERT:
2900 if (SHIFT_COUNT_TRUNCATED)
2901 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
2903 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
2904 return 0;
2906 if (code == LSHIFTRT || code == ASHIFTRT)
2907 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
2908 code == ASHIFTRT);
2909 else if (code == ASHIFT)
2910 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
2911 else if (code == ROTATE)
2912 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2913 else /* code == ROTATERT */
2914 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2915 break;
2917 default:
2918 return 0;
2921 return immed_double_const (lv, hv, mode);
2924 if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
2925 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
2927 /* Get the integer argument values in two forms:
2928 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2930 arg0 = INTVAL (op0);
2931 arg1 = INTVAL (op1);
2933 if (width < HOST_BITS_PER_WIDE_INT)
2935 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2936 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2938 arg0s = arg0;
2939 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2940 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2942 arg1s = arg1;
2943 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2944 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2946 else
2948 arg0s = arg0;
2949 arg1s = arg1;
2952 /* Compute the value of the arithmetic. */
2954 switch (code)
2956 case PLUS:
2957 val = arg0s + arg1s;
2958 break;
2960 case MINUS:
2961 val = arg0s - arg1s;
2962 break;
2964 case MULT:
2965 val = arg0s * arg1s;
2966 break;
2968 case DIV:
2969 if (arg1s == 0
2970 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2971 && arg1s == -1))
2972 return 0;
2973 val = arg0s / arg1s;
2974 break;
2976 case MOD:
2977 if (arg1s == 0
2978 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2979 && arg1s == -1))
2980 return 0;
2981 val = arg0s % arg1s;
2982 break;
2984 case UDIV:
2985 if (arg1 == 0
2986 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2987 && arg1s == -1))
2988 return 0;
2989 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
2990 break;
2992 case UMOD:
2993 if (arg1 == 0
2994 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2995 && arg1s == -1))
2996 return 0;
2997 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
2998 break;
3000 case AND:
3001 val = arg0 & arg1;
3002 break;
3004 case IOR:
3005 val = arg0 | arg1;
3006 break;
3008 case XOR:
3009 val = arg0 ^ arg1;
3010 break;
3012 case LSHIFTRT:
3013 case ASHIFT:
3014 case ASHIFTRT:
3015 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3016 the value is in range. We can't return any old value for
3017 out-of-range arguments because either the middle-end (via
3018 shift_truncation_mask) or the back-end might be relying on
3019 target-specific knowledge. Nor can we rely on
3020 shift_truncation_mask, since the shift might not be part of an
3021 ashlM3, lshrM3 or ashrM3 instruction. */
3022 if (SHIFT_COUNT_TRUNCATED)
3023 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3024 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3025 return 0;
3027 val = (code == ASHIFT
3028 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3029 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3031 /* Sign-extend the result for arithmetic right shifts. */
3032 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3033 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3034 break;
3036 case ROTATERT:
3037 if (arg1 < 0)
3038 return 0;
3040 arg1 %= width;
3041 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3042 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3043 break;
3045 case ROTATE:
3046 if (arg1 < 0)
3047 return 0;
3049 arg1 %= width;
3050 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3051 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3052 break;
3054 case COMPARE:
3055 /* Do nothing here. */
3056 return 0;
3058 case SMIN:
3059 val = arg0s <= arg1s ? arg0s : arg1s;
3060 break;
3062 case UMIN:
3063 val = ((unsigned HOST_WIDE_INT) arg0
3064 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3065 break;
3067 case SMAX:
3068 val = arg0s > arg1s ? arg0s : arg1s;
3069 break;
3071 case UMAX:
3072 val = ((unsigned HOST_WIDE_INT) arg0
3073 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3074 break;
3076 case SS_PLUS:
3077 case US_PLUS:
3078 case SS_MINUS:
3079 case US_MINUS:
3080 /* ??? There are simplifications that can be done. */
3081 return 0;
3083 default:
3084 gcc_unreachable ();
3087 return gen_int_mode (val, mode);
3090 return NULL_RTX;
3095 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3096 PLUS or MINUS.
3098 Rather than test for specific case, we do this by a brute-force method
3099 and do all possible simplifications until no more changes occur. Then
3100 we rebuild the operation. */
3102 struct simplify_plus_minus_op_data
3104 rtx op;
3105 short neg;
3106 short ix;
3109 static int
3110 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
3112 const struct simplify_plus_minus_op_data *d1 = p1;
3113 const struct simplify_plus_minus_op_data *d2 = p2;
3114 int result;
3116 result = (commutative_operand_precedence (d2->op)
3117 - commutative_operand_precedence (d1->op));
3118 if (result)
3119 return result;
3120 return d1->ix - d2->ix;
3123 static rtx
3124 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3125 rtx op1)
3127 struct simplify_plus_minus_op_data ops[8];
3128 rtx result, tem;
3129 int n_ops = 2, input_ops = 2;
3130 int first, changed, canonicalized = 0;
3131 int i, j;
3133 memset (ops, 0, sizeof ops);
3135 /* Set up the two operands and then expand them until nothing has been
3136 changed. If we run out of room in our array, give up; this should
3137 almost never happen. */
3139 ops[0].op = op0;
3140 ops[0].neg = 0;
3141 ops[1].op = op1;
3142 ops[1].neg = (code == MINUS);
3146 changed = 0;
3148 for (i = 0; i < n_ops; i++)
3150 rtx this_op = ops[i].op;
3151 int this_neg = ops[i].neg;
3152 enum rtx_code this_code = GET_CODE (this_op);
3154 switch (this_code)
3156 case PLUS:
3157 case MINUS:
3158 if (n_ops == 7)
3159 return NULL_RTX;
3161 ops[n_ops].op = XEXP (this_op, 1);
3162 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3163 n_ops++;
3165 ops[i].op = XEXP (this_op, 0);
3166 input_ops++;
3167 changed = 1;
3168 canonicalized |= this_neg;
3169 break;
3171 case NEG:
3172 ops[i].op = XEXP (this_op, 0);
3173 ops[i].neg = ! this_neg;
3174 changed = 1;
3175 canonicalized = 1;
3176 break;
3178 case CONST:
3179 if (n_ops < 7
3180 && GET_CODE (XEXP (this_op, 0)) == PLUS
3181 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3182 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3184 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3185 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3186 ops[n_ops].neg = this_neg;
3187 n_ops++;
3188 changed = 1;
3189 canonicalized = 1;
3191 break;
3193 case NOT:
3194 /* ~a -> (-a - 1) */
3195 if (n_ops != 7)
3197 ops[n_ops].op = constm1_rtx;
3198 ops[n_ops++].neg = this_neg;
3199 ops[i].op = XEXP (this_op, 0);
3200 ops[i].neg = !this_neg;
3201 changed = 1;
3202 canonicalized = 1;
3204 break;
3206 case CONST_INT:
3207 if (this_neg)
3209 ops[i].op = neg_const_int (mode, this_op);
3210 ops[i].neg = 0;
3211 changed = 1;
3212 canonicalized = 1;
3214 break;
3216 default:
3217 break;
3221 while (changed);
3223 gcc_assert (n_ops >= 2);
3224 if (!canonicalized)
3226 int n_constants = 0;
3228 for (i = 0; i < n_ops; i++)
3229 if (GET_CODE (ops[i].op) == CONST_INT)
3230 n_constants++;
3232 if (n_constants <= 1)
3233 return NULL_RTX;
3236 /* If we only have two operands, we can avoid the loops. */
3237 if (n_ops == 2)
3239 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3240 rtx lhs, rhs;
3242 /* Get the two operands. Be careful with the order, especially for
3243 the cases where code == MINUS. */
3244 if (ops[0].neg && ops[1].neg)
3246 lhs = gen_rtx_NEG (mode, ops[0].op);
3247 rhs = ops[1].op;
3249 else if (ops[0].neg)
3251 lhs = ops[1].op;
3252 rhs = ops[0].op;
3254 else
3256 lhs = ops[0].op;
3257 rhs = ops[1].op;
3260 return simplify_const_binary_operation (code, mode, lhs, rhs);
3263 /* Now simplify each pair of operands until nothing changes. The first
3264 time through just simplify constants against each other. */
3266 first = 1;
3269 changed = first;
3271 for (i = 0; i < n_ops - 1; i++)
3272 for (j = i + 1; j < n_ops; j++)
3274 rtx lhs = ops[i].op, rhs = ops[j].op;
3275 int lneg = ops[i].neg, rneg = ops[j].neg;
3277 if (lhs != 0 && rhs != 0
3278 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
3280 enum rtx_code ncode = PLUS;
3282 if (lneg != rneg)
3284 ncode = MINUS;
3285 if (lneg)
3286 tem = lhs, lhs = rhs, rhs = tem;
3288 else if (swap_commutative_operands_p (lhs, rhs))
3289 tem = lhs, lhs = rhs, rhs = tem;
3291 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3293 /* Reject "simplifications" that just wrap the two
3294 arguments in a CONST. Failure to do so can result
3295 in infinite recursion with simplify_binary_operation
3296 when it calls us to simplify CONST operations. */
3297 if (tem
3298 && ! (GET_CODE (tem) == CONST
3299 && GET_CODE (XEXP (tem, 0)) == ncode
3300 && XEXP (XEXP (tem, 0), 0) == lhs
3301 && XEXP (XEXP (tem, 0), 1) == rhs)
3302 /* Don't allow -x + -1 -> ~x simplifications in the
3303 first pass. This allows us the chance to combine
3304 the -1 with other constants. */
3305 && ! (first
3306 && GET_CODE (tem) == NOT
3307 && XEXP (tem, 0) == rhs))
3309 lneg &= rneg;
3310 if (GET_CODE (tem) == NEG)
3311 tem = XEXP (tem, 0), lneg = !lneg;
3312 if (GET_CODE (tem) == CONST_INT && lneg)
3313 tem = neg_const_int (mode, tem), lneg = 0;
3315 ops[i].op = tem;
3316 ops[i].neg = lneg;
3317 ops[j].op = NULL_RTX;
3318 changed = 1;
3323 first = 0;
3325 while (changed);
3327 /* Pack all the operands to the lower-numbered entries. */
3328 for (i = 0, j = 0; j < n_ops; j++)
3329 if (ops[j].op)
3331 ops[i] = ops[j];
3332 /* Stabilize sort. */
3333 ops[i].ix = i;
3334 i++;
3336 n_ops = i;
3338 /* Sort the operations based on swap_commutative_operands_p. */
3339 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
3341 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3342 if (n_ops == 2
3343 && GET_CODE (ops[1].op) == CONST_INT
3344 && CONSTANT_P (ops[0].op)
3345 && ops[0].neg)
3346 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3348 /* We suppressed creation of trivial CONST expressions in the
3349 combination loop to avoid recursion. Create one manually now.
3350 The combination loop should have ensured that there is exactly
3351 one CONST_INT, and the sort will have ensured that it is last
3352 in the array and that any other constant will be next-to-last. */
3354 if (n_ops > 1
3355 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
3356 && CONSTANT_P (ops[n_ops - 2].op))
3358 rtx value = ops[n_ops - 1].op;
3359 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3360 value = neg_const_int (mode, value);
3361 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3362 n_ops--;
3365 /* Put a non-negated operand first, if possible. */
3367 for (i = 0; i < n_ops && ops[i].neg; i++)
3368 continue;
3369 if (i == n_ops)
3370 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3371 else if (i != 0)
3373 tem = ops[0].op;
3374 ops[0] = ops[i];
3375 ops[i].op = tem;
3376 ops[i].neg = 1;
3379 /* Now make the result by performing the requested operations. */
3380 result = ops[0].op;
3381 for (i = 1; i < n_ops; i++)
3382 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3383 mode, result, ops[i].op);
3385 return result;
3388 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3389 static bool
3390 plus_minus_operand_p (rtx x)
3392 return GET_CODE (x) == PLUS
3393 || GET_CODE (x) == MINUS
3394 || (GET_CODE (x) == CONST
3395 && GET_CODE (XEXP (x, 0)) == PLUS
3396 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3397 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3400 /* Like simplify_binary_operation except used for relational operators.
3401 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3402 not also be VOIDmode.
3404 CMP_MODE specifies in which mode the comparison is done in, so it is
3405 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3406 the operands or, if both are VOIDmode, the operands are compared in
3407 "infinite precision". */
3409 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3410 enum machine_mode cmp_mode, rtx op0, rtx op1)
3412 rtx tem, trueop0, trueop1;
3414 if (cmp_mode == VOIDmode)
3415 cmp_mode = GET_MODE (op0);
3416 if (cmp_mode == VOIDmode)
3417 cmp_mode = GET_MODE (op1);
3419 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3420 if (tem)
3422 if (SCALAR_FLOAT_MODE_P (mode))
3424 if (tem == const0_rtx)
3425 return CONST0_RTX (mode);
3426 #ifdef FLOAT_STORE_FLAG_VALUE
3428 REAL_VALUE_TYPE val;
3429 val = FLOAT_STORE_FLAG_VALUE (mode);
3430 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3432 #else
3433 return NULL_RTX;
3434 #endif
3436 if (VECTOR_MODE_P (mode))
3438 if (tem == const0_rtx)
3439 return CONST0_RTX (mode);
3440 #ifdef VECTOR_STORE_FLAG_VALUE
3442 int i, units;
3443 rtvec v;
3445 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3446 if (val == NULL_RTX)
3447 return NULL_RTX;
3448 if (val == const1_rtx)
3449 return CONST1_RTX (mode);
3451 units = GET_MODE_NUNITS (mode);
3452 v = rtvec_alloc (units);
3453 for (i = 0; i < units; i++)
3454 RTVEC_ELT (v, i) = val;
3455 return gen_rtx_raw_CONST_VECTOR (mode, v);
3457 #else
3458 return NULL_RTX;
3459 #endif
3462 return tem;
3465 /* For the following tests, ensure const0_rtx is op1. */
3466 if (swap_commutative_operands_p (op0, op1)
3467 || (op0 == const0_rtx && op1 != const0_rtx))
3468 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3470 /* If op0 is a compare, extract the comparison arguments from it. */
3471 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3472 return simplify_relational_operation (code, mode, VOIDmode,
3473 XEXP (op0, 0), XEXP (op0, 1));
3475 if (mode == VOIDmode
3476 || GET_MODE_CLASS (cmp_mode) == MODE_CC
3477 || CC0_P (op0))
3478 return NULL_RTX;
3480 trueop0 = avoid_constant_pool_reference (op0);
3481 trueop1 = avoid_constant_pool_reference (op1);
3482 return simplify_relational_operation_1 (code, mode, cmp_mode,
3483 trueop0, trueop1);
3486 /* This part of simplify_relational_operation is only used when CMP_MODE
3487 is not in class MODE_CC (i.e. it is a real comparison).
3489 MODE is the mode of the result, while CMP_MODE specifies in which
3490 mode the comparison is done in, so it is the mode of the operands. */
3492 static rtx
3493 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
3494 enum machine_mode cmp_mode, rtx op0, rtx op1)
3496 enum rtx_code op0code = GET_CODE (op0);
3498 if (GET_CODE (op1) == CONST_INT)
3500 if (INTVAL (op1) == 0 && COMPARISON_P (op0))
3502 /* If op0 is a comparison, extract the comparison arguments form it. */
3503 if (code == NE)
3505 if (GET_MODE (op0) == mode)
3506 return simplify_rtx (op0);
3507 else
3508 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
3509 XEXP (op0, 0), XEXP (op0, 1));
3511 else if (code == EQ)
3513 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
3514 if (new_code != UNKNOWN)
3515 return simplify_gen_relational (new_code, mode, VOIDmode,
3516 XEXP (op0, 0), XEXP (op0, 1));
3521 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
3522 if ((code == EQ || code == NE)
3523 && (op0code == PLUS || op0code == MINUS)
3524 && CONSTANT_P (op1)
3525 && CONSTANT_P (XEXP (op0, 1))
3526 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
3528 rtx x = XEXP (op0, 0);
3529 rtx c = XEXP (op0, 1);
3531 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
3532 cmp_mode, op1, c);
3533 return simplify_gen_relational (code, mode, cmp_mode, x, c);
3536 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3537 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3538 if (code == NE
3539 && op1 == const0_rtx
3540 && GET_MODE_CLASS (mode) == MODE_INT
3541 && cmp_mode != VOIDmode
3542 /* ??? Work-around BImode bugs in the ia64 backend. */
3543 && mode != BImode
3544 && cmp_mode != BImode
3545 && nonzero_bits (op0, cmp_mode) == 1
3546 && STORE_FLAG_VALUE == 1)
3547 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
3548 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
3549 : lowpart_subreg (mode, op0, cmp_mode);
3551 return NULL_RTX;
3554 /* Check if the given comparison (done in the given MODE) is actually a
3555 tautology or a contradiction.
3556 If no simplification is possible, this function returns zero.
3557 Otherwise, it returns either const_true_rtx or const0_rtx. */
3560 simplify_const_relational_operation (enum rtx_code code,
3561 enum machine_mode mode,
3562 rtx op0, rtx op1)
3564 int equal, op0lt, op0ltu, op1lt, op1ltu;
3565 rtx tem;
3566 rtx trueop0;
3567 rtx trueop1;
3569 gcc_assert (mode != VOIDmode
3570 || (GET_MODE (op0) == VOIDmode
3571 && GET_MODE (op1) == VOIDmode));
3573 /* If op0 is a compare, extract the comparison arguments from it. */
3574 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3576 op1 = XEXP (op0, 1);
3577 op0 = XEXP (op0, 0);
3579 if (GET_MODE (op0) != VOIDmode)
3580 mode = GET_MODE (op0);
3581 else if (GET_MODE (op1) != VOIDmode)
3582 mode = GET_MODE (op1);
3583 else
3584 return 0;
3587 /* We can't simplify MODE_CC values since we don't know what the
3588 actual comparison is. */
3589 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
3590 return 0;
3592 /* Make sure the constant is second. */
3593 if (swap_commutative_operands_p (op0, op1))
3595 tem = op0, op0 = op1, op1 = tem;
3596 code = swap_condition (code);
3599 trueop0 = avoid_constant_pool_reference (op0);
3600 trueop1 = avoid_constant_pool_reference (op1);
3602 /* For integer comparisons of A and B maybe we can simplify A - B and can
3603 then simplify a comparison of that with zero. If A and B are both either
3604 a register or a CONST_INT, this can't help; testing for these cases will
3605 prevent infinite recursion here and speed things up.
3607 If CODE is an unsigned comparison, then we can never do this optimization,
3608 because it gives an incorrect result if the subtraction wraps around zero.
3609 ANSI C defines unsigned operations such that they never overflow, and
3610 thus such cases can not be ignored; but we cannot do it even for
3611 signed comparisons for languages such as Java, so test flag_wrapv. */
3613 if (!flag_wrapv && INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
3614 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
3615 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
3616 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
3617 /* We cannot do this for == or != if tem is a nonzero address. */
3618 && ((code != EQ && code != NE) || ! nonzero_address_p (tem))
3619 && code != GTU && code != GEU && code != LTU && code != LEU)
3620 return simplify_const_relational_operation (signed_condition (code),
3621 mode, tem, const0_rtx);
3623 if (flag_unsafe_math_optimizations && code == ORDERED)
3624 return const_true_rtx;
3626 if (flag_unsafe_math_optimizations && code == UNORDERED)
3627 return const0_rtx;
3629 /* For modes without NaNs, if the two operands are equal, we know the
3630 result except if they have side-effects. */
3631 if (! HONOR_NANS (GET_MODE (trueop0))
3632 && rtx_equal_p (trueop0, trueop1)
3633 && ! side_effects_p (trueop0))
3634 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
3636 /* If the operands are floating-point constants, see if we can fold
3637 the result. */
3638 else if (GET_CODE (trueop0) == CONST_DOUBLE
3639 && GET_CODE (trueop1) == CONST_DOUBLE
3640 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
3642 REAL_VALUE_TYPE d0, d1;
3644 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
3645 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
3647 /* Comparisons are unordered iff at least one of the values is NaN. */
3648 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
3649 switch (code)
3651 case UNEQ:
3652 case UNLT:
3653 case UNGT:
3654 case UNLE:
3655 case UNGE:
3656 case NE:
3657 case UNORDERED:
3658 return const_true_rtx;
3659 case EQ:
3660 case LT:
3661 case GT:
3662 case LE:
3663 case GE:
3664 case LTGT:
3665 case ORDERED:
3666 return const0_rtx;
3667 default:
3668 return 0;
3671 equal = REAL_VALUES_EQUAL (d0, d1);
3672 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
3673 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
3676 /* Otherwise, see if the operands are both integers. */
3677 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
3678 && (GET_CODE (trueop0) == CONST_DOUBLE
3679 || GET_CODE (trueop0) == CONST_INT)
3680 && (GET_CODE (trueop1) == CONST_DOUBLE
3681 || GET_CODE (trueop1) == CONST_INT))
3683 int width = GET_MODE_BITSIZE (mode);
3684 HOST_WIDE_INT l0s, h0s, l1s, h1s;
3685 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
3687 /* Get the two words comprising each integer constant. */
3688 if (GET_CODE (trueop0) == CONST_DOUBLE)
3690 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
3691 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
3693 else
3695 l0u = l0s = INTVAL (trueop0);
3696 h0u = h0s = HWI_SIGN_EXTEND (l0s);
3699 if (GET_CODE (trueop1) == CONST_DOUBLE)
3701 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
3702 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
3704 else
3706 l1u = l1s = INTVAL (trueop1);
3707 h1u = h1s = HWI_SIGN_EXTEND (l1s);
3710 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
3711 we have to sign or zero-extend the values. */
3712 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
3714 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
3715 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
3717 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3718 l0s |= ((HOST_WIDE_INT) (-1) << width);
3720 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3721 l1s |= ((HOST_WIDE_INT) (-1) << width);
3723 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
3724 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
3726 equal = (h0u == h1u && l0u == l1u);
3727 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
3728 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
3729 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
3730 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
3733 /* Otherwise, there are some code-specific tests we can make. */
3734 else
3736 /* Optimize comparisons with upper and lower bounds. */
3737 if (SCALAR_INT_MODE_P (mode)
3738 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3740 rtx mmin, mmax;
3741 int sign;
3743 if (code == GEU
3744 || code == LEU
3745 || code == GTU
3746 || code == LTU)
3747 sign = 0;
3748 else
3749 sign = 1;
3751 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
3753 tem = NULL_RTX;
3754 switch (code)
3756 case GEU:
3757 case GE:
3758 /* x >= min is always true. */
3759 if (rtx_equal_p (trueop1, mmin))
3760 tem = const_true_rtx;
3761 else
3762 break;
3764 case LEU:
3765 case LE:
3766 /* x <= max is always true. */
3767 if (rtx_equal_p (trueop1, mmax))
3768 tem = const_true_rtx;
3769 break;
3771 case GTU:
3772 case GT:
3773 /* x > max is always false. */
3774 if (rtx_equal_p (trueop1, mmax))
3775 tem = const0_rtx;
3776 break;
3778 case LTU:
3779 case LT:
3780 /* x < min is always false. */
3781 if (rtx_equal_p (trueop1, mmin))
3782 tem = const0_rtx;
3783 break;
3785 default:
3786 break;
3788 if (tem == const0_rtx
3789 || tem == const_true_rtx)
3790 return tem;
3793 switch (code)
3795 case EQ:
3796 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3797 return const0_rtx;
3798 break;
3800 case NE:
3801 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3802 return const_true_rtx;
3803 break;
3805 case LT:
3806 /* Optimize abs(x) < 0.0. */
3807 if (trueop1 == CONST0_RTX (mode)
3808 && !HONOR_SNANS (mode)
3809 && !(flag_wrapv && INTEGRAL_MODE_P (mode)))
3811 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3812 : trueop0;
3813 if (GET_CODE (tem) == ABS)
3814 return const0_rtx;
3816 break;
3818 case GE:
3819 /* Optimize abs(x) >= 0.0. */
3820 if (trueop1 == CONST0_RTX (mode)
3821 && !HONOR_NANS (mode)
3822 && !(flag_wrapv && INTEGRAL_MODE_P (mode)))
3824 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3825 : trueop0;
3826 if (GET_CODE (tem) == ABS)
3827 return const_true_rtx;
3829 break;
3831 case UNGE:
3832 /* Optimize ! (abs(x) < 0.0). */
3833 if (trueop1 == CONST0_RTX (mode))
3835 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3836 : trueop0;
3837 if (GET_CODE (tem) == ABS)
3838 return const_true_rtx;
3840 break;
3842 default:
3843 break;
3846 return 0;
3849 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3850 as appropriate. */
3851 switch (code)
3853 case EQ:
3854 case UNEQ:
3855 return equal ? const_true_rtx : const0_rtx;
3856 case NE:
3857 case LTGT:
3858 return ! equal ? const_true_rtx : const0_rtx;
3859 case LT:
3860 case UNLT:
3861 return op0lt ? const_true_rtx : const0_rtx;
3862 case GT:
3863 case UNGT:
3864 return op1lt ? const_true_rtx : const0_rtx;
3865 case LTU:
3866 return op0ltu ? const_true_rtx : const0_rtx;
3867 case GTU:
3868 return op1ltu ? const_true_rtx : const0_rtx;
3869 case LE:
3870 case UNLE:
3871 return equal || op0lt ? const_true_rtx : const0_rtx;
3872 case GE:
3873 case UNGE:
3874 return equal || op1lt ? const_true_rtx : const0_rtx;
3875 case LEU:
3876 return equal || op0ltu ? const_true_rtx : const0_rtx;
3877 case GEU:
3878 return equal || op1ltu ? const_true_rtx : const0_rtx;
3879 case ORDERED:
3880 return const_true_rtx;
3881 case UNORDERED:
3882 return const0_rtx;
3883 default:
3884 gcc_unreachable ();
3888 /* Simplify CODE, an operation with result mode MODE and three operands,
3889 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
3890 a constant. Return 0 if no simplifications is possible. */
3893 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
3894 enum machine_mode op0_mode, rtx op0, rtx op1,
3895 rtx op2)
3897 unsigned int width = GET_MODE_BITSIZE (mode);
3899 /* VOIDmode means "infinite" precision. */
3900 if (width == 0)
3901 width = HOST_BITS_PER_WIDE_INT;
3903 switch (code)
3905 case SIGN_EXTRACT:
3906 case ZERO_EXTRACT:
3907 if (GET_CODE (op0) == CONST_INT
3908 && GET_CODE (op1) == CONST_INT
3909 && GET_CODE (op2) == CONST_INT
3910 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
3911 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
3913 /* Extracting a bit-field from a constant */
3914 HOST_WIDE_INT val = INTVAL (op0);
3916 if (BITS_BIG_ENDIAN)
3917 val >>= (GET_MODE_BITSIZE (op0_mode)
3918 - INTVAL (op2) - INTVAL (op1));
3919 else
3920 val >>= INTVAL (op2);
3922 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
3924 /* First zero-extend. */
3925 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
3926 /* If desired, propagate sign bit. */
3927 if (code == SIGN_EXTRACT
3928 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
3929 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
3932 /* Clear the bits that don't belong in our mode,
3933 unless they and our sign bit are all one.
3934 So we get either a reasonable negative value or a reasonable
3935 unsigned value for this mode. */
3936 if (width < HOST_BITS_PER_WIDE_INT
3937 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
3938 != ((HOST_WIDE_INT) (-1) << (width - 1))))
3939 val &= ((HOST_WIDE_INT) 1 << width) - 1;
3941 return gen_int_mode (val, mode);
3943 break;
3945 case IF_THEN_ELSE:
3946 if (GET_CODE (op0) == CONST_INT)
3947 return op0 != const0_rtx ? op1 : op2;
3949 /* Convert c ? a : a into "a". */
3950 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
3951 return op1;
3953 /* Convert a != b ? a : b into "a". */
3954 if (GET_CODE (op0) == NE
3955 && ! side_effects_p (op0)
3956 && ! HONOR_NANS (mode)
3957 && ! HONOR_SIGNED_ZEROS (mode)
3958 && ((rtx_equal_p (XEXP (op0, 0), op1)
3959 && rtx_equal_p (XEXP (op0, 1), op2))
3960 || (rtx_equal_p (XEXP (op0, 0), op2)
3961 && rtx_equal_p (XEXP (op0, 1), op1))))
3962 return op1;
3964 /* Convert a == b ? a : b into "b". */
3965 if (GET_CODE (op0) == EQ
3966 && ! side_effects_p (op0)
3967 && ! HONOR_NANS (mode)
3968 && ! HONOR_SIGNED_ZEROS (mode)
3969 && ((rtx_equal_p (XEXP (op0, 0), op1)
3970 && rtx_equal_p (XEXP (op0, 1), op2))
3971 || (rtx_equal_p (XEXP (op0, 0), op2)
3972 && rtx_equal_p (XEXP (op0, 1), op1))))
3973 return op2;
3975 if (COMPARISON_P (op0) && ! side_effects_p (op0))
3977 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
3978 ? GET_MODE (XEXP (op0, 1))
3979 : GET_MODE (XEXP (op0, 0)));
3980 rtx temp;
3982 /* Look for happy constants in op1 and op2. */
3983 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
3985 HOST_WIDE_INT t = INTVAL (op1);
3986 HOST_WIDE_INT f = INTVAL (op2);
3988 if (t == STORE_FLAG_VALUE && f == 0)
3989 code = GET_CODE (op0);
3990 else if (t == 0 && f == STORE_FLAG_VALUE)
3992 enum rtx_code tmp;
3993 tmp = reversed_comparison_code (op0, NULL_RTX);
3994 if (tmp == UNKNOWN)
3995 break;
3996 code = tmp;
3998 else
3999 break;
4001 return simplify_gen_relational (code, mode, cmp_mode,
4002 XEXP (op0, 0), XEXP (op0, 1));
4005 if (cmp_mode == VOIDmode)
4006 cmp_mode = op0_mode;
4007 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4008 cmp_mode, XEXP (op0, 0),
4009 XEXP (op0, 1));
4011 /* See if any simplifications were possible. */
4012 if (temp)
4014 if (GET_CODE (temp) == CONST_INT)
4015 return temp == const0_rtx ? op2 : op1;
4016 else if (temp)
4017 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4020 break;
4022 case VEC_MERGE:
4023 gcc_assert (GET_MODE (op0) == mode);
4024 gcc_assert (GET_MODE (op1) == mode);
4025 gcc_assert (VECTOR_MODE_P (mode));
4026 op2 = avoid_constant_pool_reference (op2);
4027 if (GET_CODE (op2) == CONST_INT)
4029 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4030 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4031 int mask = (1 << n_elts) - 1;
4033 if (!(INTVAL (op2) & mask))
4034 return op1;
4035 if ((INTVAL (op2) & mask) == mask)
4036 return op0;
4038 op0 = avoid_constant_pool_reference (op0);
4039 op1 = avoid_constant_pool_reference (op1);
4040 if (GET_CODE (op0) == CONST_VECTOR
4041 && GET_CODE (op1) == CONST_VECTOR)
4043 rtvec v = rtvec_alloc (n_elts);
4044 unsigned int i;
4046 for (i = 0; i < n_elts; i++)
4047 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4048 ? CONST_VECTOR_ELT (op0, i)
4049 : CONST_VECTOR_ELT (op1, i));
4050 return gen_rtx_CONST_VECTOR (mode, v);
4053 break;
4055 default:
4056 gcc_unreachable ();
4059 return 0;
4062 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
4063 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
4065 Works by unpacking OP into a collection of 8-bit values
4066 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4067 and then repacking them again for OUTERMODE. */
4069 static rtx
4070 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4071 enum machine_mode innermode, unsigned int byte)
4073 /* We support up to 512-bit values (for V8DFmode). */
4074 enum {
4075 max_bitsize = 512,
4076 value_bit = 8,
4077 value_mask = (1 << value_bit) - 1
4079 unsigned char value[max_bitsize / value_bit];
4080 int value_start;
4081 int i;
4082 int elem;
4084 int num_elem;
4085 rtx * elems;
4086 int elem_bitsize;
4087 rtx result_s;
4088 rtvec result_v = NULL;
4089 enum mode_class outer_class;
4090 enum machine_mode outer_submode;
4092 /* Some ports misuse CCmode. */
4093 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
4094 return op;
4096 /* We have no way to represent a complex constant at the rtl level. */
4097 if (COMPLEX_MODE_P (outermode))
4098 return NULL_RTX;
4100 /* Unpack the value. */
4102 if (GET_CODE (op) == CONST_VECTOR)
4104 num_elem = CONST_VECTOR_NUNITS (op);
4105 elems = &CONST_VECTOR_ELT (op, 0);
4106 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4108 else
4110 num_elem = 1;
4111 elems = &op;
4112 elem_bitsize = max_bitsize;
4114 /* If this asserts, it is too complicated; reducing value_bit may help. */
4115 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4116 /* I don't know how to handle endianness of sub-units. */
4117 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4119 for (elem = 0; elem < num_elem; elem++)
4121 unsigned char * vp;
4122 rtx el = elems[elem];
4124 /* Vectors are kept in target memory order. (This is probably
4125 a mistake.) */
4127 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4128 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4129 / BITS_PER_UNIT);
4130 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4131 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4132 unsigned bytele = (subword_byte % UNITS_PER_WORD
4133 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4134 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4137 switch (GET_CODE (el))
4139 case CONST_INT:
4140 for (i = 0;
4141 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4142 i += value_bit)
4143 *vp++ = INTVAL (el) >> i;
4144 /* CONST_INTs are always logically sign-extended. */
4145 for (; i < elem_bitsize; i += value_bit)
4146 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4147 break;
4149 case CONST_DOUBLE:
4150 if (GET_MODE (el) == VOIDmode)
4152 /* If this triggers, someone should have generated a
4153 CONST_INT instead. */
4154 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4156 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4157 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4158 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4160 *vp++
4161 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4162 i += value_bit;
4164 /* It shouldn't matter what's done here, so fill it with
4165 zero. */
4166 for (; i < elem_bitsize; i += value_bit)
4167 *vp++ = 0;
4169 else
4171 long tmp[max_bitsize / 32];
4172 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4174 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4175 gcc_assert (bitsize <= elem_bitsize);
4176 gcc_assert (bitsize % value_bit == 0);
4178 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4179 GET_MODE (el));
4181 /* real_to_target produces its result in words affected by
4182 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4183 and use WORDS_BIG_ENDIAN instead; see the documentation
4184 of SUBREG in rtl.texi. */
4185 for (i = 0; i < bitsize; i += value_bit)
4187 int ibase;
4188 if (WORDS_BIG_ENDIAN)
4189 ibase = bitsize - 1 - i;
4190 else
4191 ibase = i;
4192 *vp++ = tmp[ibase / 32] >> i % 32;
4195 /* It shouldn't matter what's done here, so fill it with
4196 zero. */
4197 for (; i < elem_bitsize; i += value_bit)
4198 *vp++ = 0;
4200 break;
4202 default:
4203 gcc_unreachable ();
4207 /* Now, pick the right byte to start with. */
4208 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4209 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4210 will already have offset 0. */
4211 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4213 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4214 - byte);
4215 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4216 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4217 byte = (subword_byte % UNITS_PER_WORD
4218 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4221 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4222 so if it's become negative it will instead be very large.) */
4223 gcc_assert (byte < GET_MODE_SIZE (innermode));
4225 /* Convert from bytes to chunks of size value_bit. */
4226 value_start = byte * (BITS_PER_UNIT / value_bit);
4228 /* Re-pack the value. */
4230 if (VECTOR_MODE_P (outermode))
4232 num_elem = GET_MODE_NUNITS (outermode);
4233 result_v = rtvec_alloc (num_elem);
4234 elems = &RTVEC_ELT (result_v, 0);
4235 outer_submode = GET_MODE_INNER (outermode);
4237 else
4239 num_elem = 1;
4240 elems = &result_s;
4241 outer_submode = outermode;
4244 outer_class = GET_MODE_CLASS (outer_submode);
4245 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
4247 gcc_assert (elem_bitsize % value_bit == 0);
4248 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
4250 for (elem = 0; elem < num_elem; elem++)
4252 unsigned char *vp;
4254 /* Vectors are stored in target memory order. (This is probably
4255 a mistake.) */
4257 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4258 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4259 / BITS_PER_UNIT);
4260 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4261 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4262 unsigned bytele = (subword_byte % UNITS_PER_WORD
4263 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4264 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
4267 switch (outer_class)
4269 case MODE_INT:
4270 case MODE_PARTIAL_INT:
4272 unsigned HOST_WIDE_INT hi = 0, lo = 0;
4274 for (i = 0;
4275 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4276 i += value_bit)
4277 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
4278 for (; i < elem_bitsize; i += value_bit)
4279 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
4280 << (i - HOST_BITS_PER_WIDE_INT));
4282 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4283 know why. */
4284 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4285 elems[elem] = gen_int_mode (lo, outer_submode);
4286 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
4287 elems[elem] = immed_double_const (lo, hi, outer_submode);
4288 else
4289 return NULL_RTX;
4291 break;
4293 case MODE_FLOAT:
4294 case MODE_DECIMAL_FLOAT:
4296 REAL_VALUE_TYPE r;
4297 long tmp[max_bitsize / 32];
4299 /* real_from_target wants its input in words affected by
4300 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4301 and use WORDS_BIG_ENDIAN instead; see the documentation
4302 of SUBREG in rtl.texi. */
4303 for (i = 0; i < max_bitsize / 32; i++)
4304 tmp[i] = 0;
4305 for (i = 0; i < elem_bitsize; i += value_bit)
4307 int ibase;
4308 if (WORDS_BIG_ENDIAN)
4309 ibase = elem_bitsize - 1 - i;
4310 else
4311 ibase = i;
4312 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
4315 real_from_target (&r, tmp, outer_submode);
4316 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
4318 break;
4320 default:
4321 gcc_unreachable ();
4324 if (VECTOR_MODE_P (outermode))
4325 return gen_rtx_CONST_VECTOR (outermode, result_v);
4326 else
4327 return result_s;
4330 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
4331 Return 0 if no simplifications are possible. */
4333 simplify_subreg (enum machine_mode outermode, rtx op,
4334 enum machine_mode innermode, unsigned int byte)
4336 /* Little bit of sanity checking. */
4337 gcc_assert (innermode != VOIDmode);
4338 gcc_assert (outermode != VOIDmode);
4339 gcc_assert (innermode != BLKmode);
4340 gcc_assert (outermode != BLKmode);
4342 gcc_assert (GET_MODE (op) == innermode
4343 || GET_MODE (op) == VOIDmode);
4345 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
4346 gcc_assert (byte < GET_MODE_SIZE (innermode));
4348 if (outermode == innermode && !byte)
4349 return op;
4351 if (GET_CODE (op) == CONST_INT
4352 || GET_CODE (op) == CONST_DOUBLE
4353 || GET_CODE (op) == CONST_VECTOR)
4354 return simplify_immed_subreg (outermode, op, innermode, byte);
4356 /* Changing mode twice with SUBREG => just change it once,
4357 or not at all if changing back op starting mode. */
4358 if (GET_CODE (op) == SUBREG)
4360 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
4361 int final_offset = byte + SUBREG_BYTE (op);
4362 rtx newx;
4364 if (outermode == innermostmode
4365 && byte == 0 && SUBREG_BYTE (op) == 0)
4366 return SUBREG_REG (op);
4368 /* The SUBREG_BYTE represents offset, as if the value were stored
4369 in memory. Irritating exception is paradoxical subreg, where
4370 we define SUBREG_BYTE to be 0. On big endian machines, this
4371 value should be negative. For a moment, undo this exception. */
4372 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4374 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
4375 if (WORDS_BIG_ENDIAN)
4376 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4377 if (BYTES_BIG_ENDIAN)
4378 final_offset += difference % UNITS_PER_WORD;
4380 if (SUBREG_BYTE (op) == 0
4381 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
4383 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
4384 if (WORDS_BIG_ENDIAN)
4385 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4386 if (BYTES_BIG_ENDIAN)
4387 final_offset += difference % UNITS_PER_WORD;
4390 /* See whether resulting subreg will be paradoxical. */
4391 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
4393 /* In nonparadoxical subregs we can't handle negative offsets. */
4394 if (final_offset < 0)
4395 return NULL_RTX;
4396 /* Bail out in case resulting subreg would be incorrect. */
4397 if (final_offset % GET_MODE_SIZE (outermode)
4398 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
4399 return NULL_RTX;
4401 else
4403 int offset = 0;
4404 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
4406 /* In paradoxical subreg, see if we are still looking on lower part.
4407 If so, our SUBREG_BYTE will be 0. */
4408 if (WORDS_BIG_ENDIAN)
4409 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4410 if (BYTES_BIG_ENDIAN)
4411 offset += difference % UNITS_PER_WORD;
4412 if (offset == final_offset)
4413 final_offset = 0;
4414 else
4415 return NULL_RTX;
4418 /* Recurse for further possible simplifications. */
4419 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
4420 final_offset);
4421 if (newx)
4422 return newx;
4423 if (validate_subreg (outermode, innermostmode,
4424 SUBREG_REG (op), final_offset))
4425 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
4426 return NULL_RTX;
4429 /* Merge implicit and explicit truncations. */
4431 if (GET_CODE (op) == TRUNCATE
4432 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
4433 && subreg_lowpart_offset (outermode, innermode) == byte)
4434 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
4435 GET_MODE (XEXP (op, 0)));
4437 /* SUBREG of a hard register => just change the register number
4438 and/or mode. If the hard register is not valid in that mode,
4439 suppress this simplification. If the hard register is the stack,
4440 frame, or argument pointer, leave this as a SUBREG. */
4442 if (REG_P (op)
4443 && REGNO (op) < FIRST_PSEUDO_REGISTER
4444 #ifdef CANNOT_CHANGE_MODE_CLASS
4445 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
4446 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
4447 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
4448 #endif
4449 && ((reload_completed && !frame_pointer_needed)
4450 || (REGNO (op) != FRAME_POINTER_REGNUM
4451 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
4452 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
4453 #endif
4455 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4456 && REGNO (op) != ARG_POINTER_REGNUM
4457 #endif
4458 && REGNO (op) != STACK_POINTER_REGNUM
4459 && subreg_offset_representable_p (REGNO (op), innermode,
4460 byte, outermode))
4462 unsigned int regno = REGNO (op);
4463 unsigned int final_regno
4464 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
4466 /* ??? We do allow it if the current REG is not valid for
4467 its mode. This is a kludge to work around how float/complex
4468 arguments are passed on 32-bit SPARC and should be fixed. */
4469 if (HARD_REGNO_MODE_OK (final_regno, outermode)
4470 || ! HARD_REGNO_MODE_OK (regno, innermode))
4472 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
4474 /* Propagate original regno. We don't have any way to specify
4475 the offset inside original regno, so do so only for lowpart.
4476 The information is used only by alias analysis that can not
4477 grog partial register anyway. */
4479 if (subreg_lowpart_offset (outermode, innermode) == byte)
4480 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
4481 return x;
4485 /* If we have a SUBREG of a register that we are replacing and we are
4486 replacing it with a MEM, make a new MEM and try replacing the
4487 SUBREG with it. Don't do this if the MEM has a mode-dependent address
4488 or if we would be widening it. */
4490 if (MEM_P (op)
4491 && ! mode_dependent_address_p (XEXP (op, 0))
4492 /* Allow splitting of volatile memory references in case we don't
4493 have instruction to move the whole thing. */
4494 && (! MEM_VOLATILE_P (op)
4495 || ! have_insn_for (SET, innermode))
4496 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
4497 return adjust_address_nv (op, outermode, byte);
4499 /* Handle complex values represented as CONCAT
4500 of real and imaginary part. */
4501 if (GET_CODE (op) == CONCAT)
4503 unsigned int inner_size, final_offset;
4504 rtx part, res;
4506 inner_size = GET_MODE_UNIT_SIZE (innermode);
4507 part = byte < inner_size ? XEXP (op, 0) : XEXP (op, 1);
4508 final_offset = byte % inner_size;
4509 if (final_offset + GET_MODE_SIZE (outermode) > inner_size)
4510 return NULL_RTX;
4512 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
4513 if (res)
4514 return res;
4515 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
4516 return gen_rtx_SUBREG (outermode, part, final_offset);
4517 return NULL_RTX;
4520 /* Optimize SUBREG truncations of zero and sign extended values. */
4521 if ((GET_CODE (op) == ZERO_EXTEND
4522 || GET_CODE (op) == SIGN_EXTEND)
4523 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
4525 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
4527 /* If we're requesting the lowpart of a zero or sign extension,
4528 there are three possibilities. If the outermode is the same
4529 as the origmode, we can omit both the extension and the subreg.
4530 If the outermode is not larger than the origmode, we can apply
4531 the truncation without the extension. Finally, if the outermode
4532 is larger than the origmode, but both are integer modes, we
4533 can just extend to the appropriate mode. */
4534 if (bitpos == 0)
4536 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
4537 if (outermode == origmode)
4538 return XEXP (op, 0);
4539 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
4540 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
4541 subreg_lowpart_offset (outermode,
4542 origmode));
4543 if (SCALAR_INT_MODE_P (outermode))
4544 return simplify_gen_unary (GET_CODE (op), outermode,
4545 XEXP (op, 0), origmode);
4548 /* A SUBREG resulting from a zero extension may fold to zero if
4549 it extracts higher bits that the ZERO_EXTEND's source bits. */
4550 if (GET_CODE (op) == ZERO_EXTEND
4551 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
4552 return CONST0_RTX (outermode);
4555 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
4556 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
4557 the outer subreg is effectively a truncation to the original mode. */
4558 if ((GET_CODE (op) == LSHIFTRT
4559 || GET_CODE (op) == ASHIFTRT)
4560 && SCALAR_INT_MODE_P (outermode)
4561 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
4562 to avoid the possibility that an outer LSHIFTRT shifts by more
4563 than the sign extension's sign_bit_copies and introduces zeros
4564 into the high bits of the result. */
4565 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
4566 && GET_CODE (XEXP (op, 1)) == CONST_INT
4567 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
4568 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4569 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4570 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4571 return simplify_gen_binary (ASHIFTRT, outermode,
4572 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4574 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
4575 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
4576 the outer subreg is effectively a truncation to the original mode. */
4577 if ((GET_CODE (op) == LSHIFTRT
4578 || GET_CODE (op) == ASHIFTRT)
4579 && SCALAR_INT_MODE_P (outermode)
4580 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4581 && GET_CODE (XEXP (op, 1)) == CONST_INT
4582 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4583 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4584 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4585 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4586 return simplify_gen_binary (LSHIFTRT, outermode,
4587 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4589 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
4590 to (ashift:QI (x:QI) C), where C is a suitable small constant and
4591 the outer subreg is effectively a truncation to the original mode. */
4592 if (GET_CODE (op) == ASHIFT
4593 && SCALAR_INT_MODE_P (outermode)
4594 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4595 && GET_CODE (XEXP (op, 1)) == CONST_INT
4596 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4597 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
4598 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4599 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4600 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4601 return simplify_gen_binary (ASHIFT, outermode,
4602 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4604 return NULL_RTX;
4607 /* Make a SUBREG operation or equivalent if it folds. */
4610 simplify_gen_subreg (enum machine_mode outermode, rtx op,
4611 enum machine_mode innermode, unsigned int byte)
4613 rtx newx;
4615 newx = simplify_subreg (outermode, op, innermode, byte);
4616 if (newx)
4617 return newx;
4619 if (GET_CODE (op) == SUBREG
4620 || GET_CODE (op) == CONCAT
4621 || GET_MODE (op) == VOIDmode)
4622 return NULL_RTX;
4624 if (validate_subreg (outermode, innermode, op, byte))
4625 return gen_rtx_SUBREG (outermode, op, byte);
4627 return NULL_RTX;
4630 /* Simplify X, an rtx expression.
4632 Return the simplified expression or NULL if no simplifications
4633 were possible.
4635 This is the preferred entry point into the simplification routines;
4636 however, we still allow passes to call the more specific routines.
4638 Right now GCC has three (yes, three) major bodies of RTL simplification
4639 code that need to be unified.
4641 1. fold_rtx in cse.c. This code uses various CSE specific
4642 information to aid in RTL simplification.
4644 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
4645 it uses combine specific information to aid in RTL
4646 simplification.
4648 3. The routines in this file.
4651 Long term we want to only have one body of simplification code; to
4652 get to that state I recommend the following steps:
4654 1. Pour over fold_rtx & simplify_rtx and move any simplifications
4655 which are not pass dependent state into these routines.
4657 2. As code is moved by #1, change fold_rtx & simplify_rtx to
4658 use this routine whenever possible.
4660 3. Allow for pass dependent state to be provided to these
4661 routines and add simplifications based on the pass dependent
4662 state. Remove code from cse.c & combine.c that becomes
4663 redundant/dead.
4665 It will take time, but ultimately the compiler will be easier to
4666 maintain and improve. It's totally silly that when we add a
4667 simplification that it needs to be added to 4 places (3 for RTL
4668 simplification and 1 for tree simplification. */
4671 simplify_rtx (rtx x)
4673 enum rtx_code code = GET_CODE (x);
4674 enum machine_mode mode = GET_MODE (x);
4676 switch (GET_RTX_CLASS (code))
4678 case RTX_UNARY:
4679 return simplify_unary_operation (code, mode,
4680 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
4681 case RTX_COMM_ARITH:
4682 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
4683 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
4685 /* Fall through.... */
4687 case RTX_BIN_ARITH:
4688 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
4690 case RTX_TERNARY:
4691 case RTX_BITFIELD_OPS:
4692 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
4693 XEXP (x, 0), XEXP (x, 1),
4694 XEXP (x, 2));
4696 case RTX_COMPARE:
4697 case RTX_COMM_COMPARE:
4698 return simplify_relational_operation (code, mode,
4699 ((GET_MODE (XEXP (x, 0))
4700 != VOIDmode)
4701 ? GET_MODE (XEXP (x, 0))
4702 : GET_MODE (XEXP (x, 1))),
4703 XEXP (x, 0),
4704 XEXP (x, 1));
4706 case RTX_EXTRA:
4707 if (code == SUBREG)
4708 return simplify_gen_subreg (mode, SUBREG_REG (x),
4709 GET_MODE (SUBREG_REG (x)),
4710 SUBREG_BYTE (x));
4711 break;
4713 case RTX_OBJ:
4714 if (code == LO_SUM)
4716 /* Convert (lo_sum (high FOO) FOO) to FOO. */
4717 if (GET_CODE (XEXP (x, 0)) == HIGH
4718 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
4719 return XEXP (x, 1);
4721 break;
4723 default:
4724 break;
4726 return NULL;