2007-01-04 Paolo Bonzini <bonzini@gnu.org>
[official-gcc.git] / gcc / simplify-rtx.c
blob2199c634b83d649fb23c2cf83b9f26cfc5dd10a6
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
20 02110-1301, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static bool plus_minus_operand_p (rtx);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
57 unsigned int);
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
59 rtx, rtx);
60 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
61 enum machine_mode, rtx, rtx);
62 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
63 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
64 rtx, rtx, rtx, rtx);
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
68 static rtx
69 neg_const_int (enum machine_mode mode, rtx i)
71 return gen_int_mode (- INTVAL (i), mode);
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
77 bool
78 mode_signbit_p (enum machine_mode mode, rtx x)
80 unsigned HOST_WIDE_INT val;
81 unsigned int width;
83 if (GET_MODE_CLASS (mode) != MODE_INT)
84 return false;
86 width = GET_MODE_BITSIZE (mode);
87 if (width == 0)
88 return false;
90 if (width <= HOST_BITS_PER_WIDE_INT
91 && GET_CODE (x) == CONST_INT)
92 val = INTVAL (x);
93 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x) == 0)
97 val = CONST_DOUBLE_HIGH (x);
98 width -= HOST_BITS_PER_WIDE_INT;
100 else
101 return false;
103 if (width < HOST_BITS_PER_WIDE_INT)
104 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
105 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
112 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
113 rtx op1)
115 rtx tem;
117 /* If this simplifies, do it. */
118 tem = simplify_binary_operation (code, mode, op0, op1);
119 if (tem)
120 return tem;
122 /* Put complex operands first and constants second if commutative. */
123 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
124 && swap_commutative_operands_p (op0, op1))
125 tem = op0, op0 = op1, op1 = tem;
127 return gen_rtx_fmt_ee (code, mode, op0, op1);
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
133 avoid_constant_pool_reference (rtx x)
135 rtx c, tmp, addr;
136 enum machine_mode cmode;
137 HOST_WIDE_INT offset = 0;
139 switch (GET_CODE (x))
141 case MEM:
142 break;
144 case FLOAT_EXTEND:
145 /* Handle float extensions of constant pool references. */
146 tmp = XEXP (x, 0);
147 c = avoid_constant_pool_reference (tmp);
148 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
150 REAL_VALUE_TYPE d;
152 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
155 return x;
157 default:
158 return x;
161 if (GET_MODE (x) == BLKmode)
162 return x;
164 addr = XEXP (x, 0);
166 /* Call target hook to avoid the effects of -fpic etc.... */
167 addr = targetm.delegitimize_address (addr);
169 /* Split the address into a base and integer offset. */
170 if (GET_CODE (addr) == CONST
171 && GET_CODE (XEXP (addr, 0)) == PLUS
172 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
174 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
175 addr = XEXP (XEXP (addr, 0), 0);
178 if (GET_CODE (addr) == LO_SUM)
179 addr = XEXP (addr, 1);
181 /* If this is a constant pool reference, we can turn it into its
182 constant and hope that simplifications happen. */
183 if (GET_CODE (addr) == SYMBOL_REF
184 && CONSTANT_POOL_ADDRESS_P (addr))
186 c = get_pool_constant (addr);
187 cmode = get_pool_mode (addr);
189 /* If we're accessing the constant in a different mode than it was
190 originally stored, attempt to fix that up via subreg simplifications.
191 If that fails we have no choice but to return the original memory. */
192 if (offset != 0 || cmode != GET_MODE (x))
194 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
195 if (tem && CONSTANT_P (tem))
196 return tem;
198 else
199 return c;
202 return x;
205 /* Return true if X is a MEM referencing the constant pool. */
207 bool
208 constant_pool_reference_p (rtx x)
210 return avoid_constant_pool_reference (x) != x;
213 /* Make a unary operation by first seeing if it folds and otherwise making
214 the specified operation. */
217 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
218 enum machine_mode op_mode)
220 rtx tem;
222 /* If this simplifies, use it. */
223 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
224 return tem;
226 return gen_rtx_fmt_e (code, mode, op);
229 /* Likewise for ternary operations. */
232 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
233 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
235 rtx tem;
237 /* If this simplifies, use it. */
238 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
239 op0, op1, op2)))
240 return tem;
242 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
245 /* Likewise, for relational operations.
246 CMP_MODE specifies mode comparison is done in. */
249 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
250 enum machine_mode cmp_mode, rtx op0, rtx op1)
252 rtx tem;
254 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
255 op0, op1)))
256 return tem;
258 return gen_rtx_fmt_ee (code, mode, op0, op1);
261 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
262 resulting RTX. Return a new RTX which is as simplified as possible. */
265 simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
267 enum rtx_code code = GET_CODE (x);
268 enum machine_mode mode = GET_MODE (x);
269 enum machine_mode op_mode;
270 rtx op0, op1, op2;
272 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
273 to build a new expression substituting recursively. If we can't do
274 anything, return our input. */
276 if (x == old_rtx)
277 return new_rtx;
279 switch (GET_RTX_CLASS (code))
281 case RTX_UNARY:
282 op0 = XEXP (x, 0);
283 op_mode = GET_MODE (op0);
284 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
285 if (op0 == XEXP (x, 0))
286 return x;
287 return simplify_gen_unary (code, mode, op0, op_mode);
289 case RTX_BIN_ARITH:
290 case RTX_COMM_ARITH:
291 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
292 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
293 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
294 return x;
295 return simplify_gen_binary (code, mode, op0, op1);
297 case RTX_COMPARE:
298 case RTX_COMM_COMPARE:
299 op0 = XEXP (x, 0);
300 op1 = XEXP (x, 1);
301 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
302 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
303 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
304 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
305 return x;
306 return simplify_gen_relational (code, mode, op_mode, op0, op1);
308 case RTX_TERNARY:
309 case RTX_BITFIELD_OPS:
310 op0 = XEXP (x, 0);
311 op_mode = GET_MODE (op0);
312 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
313 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
314 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
315 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
316 return x;
317 if (op_mode == VOIDmode)
318 op_mode = GET_MODE (op0);
319 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
321 case RTX_EXTRA:
322 /* The only case we try to handle is a SUBREG. */
323 if (code == SUBREG)
325 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
326 if (op0 == SUBREG_REG (x))
327 return x;
328 op0 = simplify_gen_subreg (GET_MODE (x), op0,
329 GET_MODE (SUBREG_REG (x)),
330 SUBREG_BYTE (x));
331 return op0 ? op0 : x;
333 break;
335 case RTX_OBJ:
336 if (code == MEM)
338 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
339 if (op0 == XEXP (x, 0))
340 return x;
341 return replace_equiv_address_nv (x, op0);
343 else if (code == LO_SUM)
345 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
346 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
348 /* (lo_sum (high x) x) -> x */
349 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
350 return op1;
352 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
353 return x;
354 return gen_rtx_LO_SUM (mode, op0, op1);
356 else if (code == REG)
358 if (rtx_equal_p (x, old_rtx))
359 return new_rtx;
361 break;
363 default:
364 break;
366 return x;
369 /* Try to simplify a unary operation CODE whose output mode is to be
370 MODE with input operand OP whose mode was originally OP_MODE.
371 Return zero if no simplification can be made. */
373 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
374 rtx op, enum machine_mode op_mode)
376 rtx trueop, tem;
378 if (GET_CODE (op) == CONST)
379 op = XEXP (op, 0);
381 trueop = avoid_constant_pool_reference (op);
383 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
384 if (tem)
385 return tem;
387 return simplify_unary_operation_1 (code, mode, op);
390 /* Perform some simplifications we can do even if the operands
391 aren't constant. */
392 static rtx
393 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
395 enum rtx_code reversed;
396 rtx temp;
398 switch (code)
400 case NOT:
401 /* (not (not X)) == X. */
402 if (GET_CODE (op) == NOT)
403 return XEXP (op, 0);
405 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
406 comparison is all ones. */
407 if (COMPARISON_P (op)
408 && (mode == BImode || STORE_FLAG_VALUE == -1)
409 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
410 return simplify_gen_relational (reversed, mode, VOIDmode,
411 XEXP (op, 0), XEXP (op, 1));
413 /* (not (plus X -1)) can become (neg X). */
414 if (GET_CODE (op) == PLUS
415 && XEXP (op, 1) == constm1_rtx)
416 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
418 /* Similarly, (not (neg X)) is (plus X -1). */
419 if (GET_CODE (op) == NEG)
420 return plus_constant (XEXP (op, 0), -1);
422 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
423 if (GET_CODE (op) == XOR
424 && GET_CODE (XEXP (op, 1)) == CONST_INT
425 && (temp = simplify_unary_operation (NOT, mode,
426 XEXP (op, 1), mode)) != 0)
427 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
429 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
430 if (GET_CODE (op) == PLUS
431 && GET_CODE (XEXP (op, 1)) == CONST_INT
432 && mode_signbit_p (mode, XEXP (op, 1))
433 && (temp = simplify_unary_operation (NOT, mode,
434 XEXP (op, 1), mode)) != 0)
435 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
438 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
439 operands other than 1, but that is not valid. We could do a
440 similar simplification for (not (lshiftrt C X)) where C is
441 just the sign bit, but this doesn't seem common enough to
442 bother with. */
443 if (GET_CODE (op) == ASHIFT
444 && XEXP (op, 0) == const1_rtx)
446 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
447 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
450 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
451 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
452 so we can perform the above simplification. */
454 if (STORE_FLAG_VALUE == -1
455 && GET_CODE (op) == ASHIFTRT
456 && GET_CODE (XEXP (op, 1)) == CONST_INT
457 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
458 return simplify_gen_relational (GE, mode, VOIDmode,
459 XEXP (op, 0), const0_rtx);
462 if (GET_CODE (op) == SUBREG
463 && subreg_lowpart_p (op)
464 && (GET_MODE_SIZE (GET_MODE (op))
465 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
466 && GET_CODE (SUBREG_REG (op)) == ASHIFT
467 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
469 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
470 rtx x;
472 x = gen_rtx_ROTATE (inner_mode,
473 simplify_gen_unary (NOT, inner_mode, const1_rtx,
474 inner_mode),
475 XEXP (SUBREG_REG (op), 1));
476 return rtl_hooks.gen_lowpart_no_emit (mode, x);
479 /* Apply De Morgan's laws to reduce number of patterns for machines
480 with negating logical insns (and-not, nand, etc.). If result has
481 only one NOT, put it first, since that is how the patterns are
482 coded. */
484 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
486 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
487 enum machine_mode op_mode;
489 op_mode = GET_MODE (in1);
490 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
492 op_mode = GET_MODE (in2);
493 if (op_mode == VOIDmode)
494 op_mode = mode;
495 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
497 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
499 rtx tem = in2;
500 in2 = in1; in1 = tem;
503 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
504 mode, in1, in2);
506 break;
508 case NEG:
509 /* (neg (neg X)) == X. */
510 if (GET_CODE (op) == NEG)
511 return XEXP (op, 0);
513 /* (neg (plus X 1)) can become (not X). */
514 if (GET_CODE (op) == PLUS
515 && XEXP (op, 1) == const1_rtx)
516 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
518 /* Similarly, (neg (not X)) is (plus X 1). */
519 if (GET_CODE (op) == NOT)
520 return plus_constant (XEXP (op, 0), 1);
522 /* (neg (minus X Y)) can become (minus Y X). This transformation
523 isn't safe for modes with signed zeros, since if X and Y are
524 both +0, (minus Y X) is the same as (minus X Y). If the
525 rounding mode is towards +infinity (or -infinity) then the two
526 expressions will be rounded differently. */
527 if (GET_CODE (op) == MINUS
528 && !HONOR_SIGNED_ZEROS (mode)
529 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
530 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
532 if (GET_CODE (op) == PLUS
533 && !HONOR_SIGNED_ZEROS (mode)
534 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
536 /* (neg (plus A C)) is simplified to (minus -C A). */
537 if (GET_CODE (XEXP (op, 1)) == CONST_INT
538 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
540 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
541 if (temp)
542 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
545 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
546 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
547 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
550 /* (neg (mult A B)) becomes (mult (neg A) B).
551 This works even for floating-point values. */
552 if (GET_CODE (op) == MULT
553 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
555 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
556 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
559 /* NEG commutes with ASHIFT since it is multiplication. Only do
560 this if we can then eliminate the NEG (e.g., if the operand
561 is a constant). */
562 if (GET_CODE (op) == ASHIFT)
564 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
565 if (temp)
566 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
569 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
570 C is equal to the width of MODE minus 1. */
571 if (GET_CODE (op) == ASHIFTRT
572 && GET_CODE (XEXP (op, 1)) == CONST_INT
573 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
574 return simplify_gen_binary (LSHIFTRT, mode,
575 XEXP (op, 0), XEXP (op, 1));
577 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
578 C is equal to the width of MODE minus 1. */
579 if (GET_CODE (op) == LSHIFTRT
580 && GET_CODE (XEXP (op, 1)) == CONST_INT
581 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
582 return simplify_gen_binary (ASHIFTRT, mode,
583 XEXP (op, 0), XEXP (op, 1));
585 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
586 if (GET_CODE (op) == XOR
587 && XEXP (op, 1) == const1_rtx
588 && nonzero_bits (XEXP (op, 0), mode) == 1)
589 return plus_constant (XEXP (op, 0), -1);
591 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
592 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
593 if (GET_CODE (op) == LT
594 && XEXP (op, 1) == const0_rtx)
596 enum machine_mode inner = GET_MODE (XEXP (op, 0));
597 int isize = GET_MODE_BITSIZE (inner);
598 if (STORE_FLAG_VALUE == 1)
600 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
601 GEN_INT (isize - 1));
602 if (mode == inner)
603 return temp;
604 if (GET_MODE_BITSIZE (mode) > isize)
605 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
606 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
608 else if (STORE_FLAG_VALUE == -1)
610 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
611 GEN_INT (isize - 1));
612 if (mode == inner)
613 return temp;
614 if (GET_MODE_BITSIZE (mode) > isize)
615 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
616 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
619 break;
621 case TRUNCATE:
622 /* We can't handle truncation to a partial integer mode here
623 because we don't know the real bitsize of the partial
624 integer mode. */
625 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
626 break;
628 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
629 if ((GET_CODE (op) == SIGN_EXTEND
630 || GET_CODE (op) == ZERO_EXTEND)
631 && GET_MODE (XEXP (op, 0)) == mode)
632 return XEXP (op, 0);
634 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
635 (OP:SI foo:SI) if OP is NEG or ABS. */
636 if ((GET_CODE (op) == ABS
637 || GET_CODE (op) == NEG)
638 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
639 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
640 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
641 return simplify_gen_unary (GET_CODE (op), mode,
642 XEXP (XEXP (op, 0), 0), mode);
644 /* (truncate:A (subreg:B (truncate:C X) 0)) is
645 (truncate:A X). */
646 if (GET_CODE (op) == SUBREG
647 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
648 && subreg_lowpart_p (op))
649 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
650 GET_MODE (XEXP (SUBREG_REG (op), 0)));
652 /* If we know that the value is already truncated, we can
653 replace the TRUNCATE with a SUBREG. Note that this is also
654 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
655 modes we just have to apply a different definition for
656 truncation. But don't do this for an (LSHIFTRT (MULT ...))
657 since this will cause problems with the umulXi3_highpart
658 patterns. */
659 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
660 GET_MODE_BITSIZE (GET_MODE (op)))
661 ? (num_sign_bit_copies (op, GET_MODE (op))
662 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
663 - GET_MODE_BITSIZE (mode)))
664 : truncated_to_mode (mode, op))
665 && ! (GET_CODE (op) == LSHIFTRT
666 && GET_CODE (XEXP (op, 0)) == MULT))
667 return rtl_hooks.gen_lowpart_no_emit (mode, op);
669 /* A truncate of a comparison can be replaced with a subreg if
670 STORE_FLAG_VALUE permits. This is like the previous test,
671 but it works even if the comparison is done in a mode larger
672 than HOST_BITS_PER_WIDE_INT. */
673 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
674 && COMPARISON_P (op)
675 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
676 return rtl_hooks.gen_lowpart_no_emit (mode, op);
677 break;
679 case FLOAT_TRUNCATE:
680 if (DECIMAL_FLOAT_MODE_P (mode))
681 break;
683 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
684 if (GET_CODE (op) == FLOAT_EXTEND
685 && GET_MODE (XEXP (op, 0)) == mode)
686 return XEXP (op, 0);
688 /* (float_truncate:SF (float_truncate:DF foo:XF))
689 = (float_truncate:SF foo:XF).
690 This may eliminate double rounding, so it is unsafe.
692 (float_truncate:SF (float_extend:XF foo:DF))
693 = (float_truncate:SF foo:DF).
695 (float_truncate:DF (float_extend:XF foo:SF))
696 = (float_extend:SF foo:DF). */
697 if ((GET_CODE (op) == FLOAT_TRUNCATE
698 && flag_unsafe_math_optimizations)
699 || GET_CODE (op) == FLOAT_EXTEND)
700 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
701 0)))
702 > GET_MODE_SIZE (mode)
703 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
704 mode,
705 XEXP (op, 0), mode);
707 /* (float_truncate (float x)) is (float x) */
708 if (GET_CODE (op) == FLOAT
709 && (flag_unsafe_math_optimizations
710 || ((unsigned)significand_size (GET_MODE (op))
711 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
712 - num_sign_bit_copies (XEXP (op, 0),
713 GET_MODE (XEXP (op, 0)))))))
714 return simplify_gen_unary (FLOAT, mode,
715 XEXP (op, 0),
716 GET_MODE (XEXP (op, 0)));
718 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
719 (OP:SF foo:SF) if OP is NEG or ABS. */
720 if ((GET_CODE (op) == ABS
721 || GET_CODE (op) == NEG)
722 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
723 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
724 return simplify_gen_unary (GET_CODE (op), mode,
725 XEXP (XEXP (op, 0), 0), mode);
727 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
728 is (float_truncate:SF x). */
729 if (GET_CODE (op) == SUBREG
730 && subreg_lowpart_p (op)
731 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
732 return SUBREG_REG (op);
733 break;
735 case FLOAT_EXTEND:
736 if (DECIMAL_FLOAT_MODE_P (mode))
737 break;
739 /* (float_extend (float_extend x)) is (float_extend x)
741 (float_extend (float x)) is (float x) assuming that double
742 rounding can't happen.
744 if (GET_CODE (op) == FLOAT_EXTEND
745 || (GET_CODE (op) == FLOAT
746 && ((unsigned)significand_size (GET_MODE (op))
747 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
748 - num_sign_bit_copies (XEXP (op, 0),
749 GET_MODE (XEXP (op, 0)))))))
750 return simplify_gen_unary (GET_CODE (op), mode,
751 XEXP (op, 0),
752 GET_MODE (XEXP (op, 0)));
754 break;
756 case ABS:
757 /* (abs (neg <foo>)) -> (abs <foo>) */
758 if (GET_CODE (op) == NEG)
759 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
760 GET_MODE (XEXP (op, 0)));
762 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
763 do nothing. */
764 if (GET_MODE (op) == VOIDmode)
765 break;
767 /* If operand is something known to be positive, ignore the ABS. */
768 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
769 || ((GET_MODE_BITSIZE (GET_MODE (op))
770 <= HOST_BITS_PER_WIDE_INT)
771 && ((nonzero_bits (op, GET_MODE (op))
772 & ((HOST_WIDE_INT) 1
773 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
774 == 0)))
775 return op;
777 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
778 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
779 return gen_rtx_NEG (mode, op);
781 break;
783 case FFS:
784 /* (ffs (*_extend <X>)) = (ffs <X>) */
785 if (GET_CODE (op) == SIGN_EXTEND
786 || GET_CODE (op) == ZERO_EXTEND)
787 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
788 GET_MODE (XEXP (op, 0)));
789 break;
791 case POPCOUNT:
792 case PARITY:
793 /* (pop* (zero_extend <X>)) = (pop* <X>) */
794 if (GET_CODE (op) == ZERO_EXTEND)
795 return simplify_gen_unary (code, mode, XEXP (op, 0),
796 GET_MODE (XEXP (op, 0)));
797 break;
799 case FLOAT:
800 /* (float (sign_extend <X>)) = (float <X>). */
801 if (GET_CODE (op) == SIGN_EXTEND)
802 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
803 GET_MODE (XEXP (op, 0)));
804 break;
806 case SIGN_EXTEND:
807 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
808 becomes just the MINUS if its mode is MODE. This allows
809 folding switch statements on machines using casesi (such as
810 the VAX). */
811 if (GET_CODE (op) == TRUNCATE
812 && GET_MODE (XEXP (op, 0)) == mode
813 && GET_CODE (XEXP (op, 0)) == MINUS
814 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
815 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
816 return XEXP (op, 0);
818 /* Check for a sign extension of a subreg of a promoted
819 variable, where the promotion is sign-extended, and the
820 target mode is the same as the variable's promotion. */
821 if (GET_CODE (op) == SUBREG
822 && SUBREG_PROMOTED_VAR_P (op)
823 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
824 && GET_MODE (XEXP (op, 0)) == mode)
825 return XEXP (op, 0);
827 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
828 if (! POINTERS_EXTEND_UNSIGNED
829 && mode == Pmode && GET_MODE (op) == ptr_mode
830 && (CONSTANT_P (op)
831 || (GET_CODE (op) == SUBREG
832 && REG_P (SUBREG_REG (op))
833 && REG_POINTER (SUBREG_REG (op))
834 && GET_MODE (SUBREG_REG (op)) == Pmode)))
835 return convert_memory_address (Pmode, op);
836 #endif
837 break;
839 case ZERO_EXTEND:
840 /* Check for a zero extension of a subreg of a promoted
841 variable, where the promotion is zero-extended, and the
842 target mode is the same as the variable's promotion. */
843 if (GET_CODE (op) == SUBREG
844 && SUBREG_PROMOTED_VAR_P (op)
845 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
846 && GET_MODE (XEXP (op, 0)) == mode)
847 return XEXP (op, 0);
849 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
850 if (POINTERS_EXTEND_UNSIGNED > 0
851 && mode == Pmode && GET_MODE (op) == ptr_mode
852 && (CONSTANT_P (op)
853 || (GET_CODE (op) == SUBREG
854 && REG_P (SUBREG_REG (op))
855 && REG_POINTER (SUBREG_REG (op))
856 && GET_MODE (SUBREG_REG (op)) == Pmode)))
857 return convert_memory_address (Pmode, op);
858 #endif
859 break;
861 default:
862 break;
865 return 0;
868 /* Try to compute the value of a unary operation CODE whose output mode is to
869 be MODE with input operand OP whose mode was originally OP_MODE.
870 Return zero if the value cannot be computed. */
872 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
873 rtx op, enum machine_mode op_mode)
875 unsigned int width = GET_MODE_BITSIZE (mode);
877 if (code == VEC_DUPLICATE)
879 gcc_assert (VECTOR_MODE_P (mode));
880 if (GET_MODE (op) != VOIDmode)
882 if (!VECTOR_MODE_P (GET_MODE (op)))
883 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
884 else
885 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
886 (GET_MODE (op)));
888 if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
889 || GET_CODE (op) == CONST_VECTOR)
891 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
892 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
893 rtvec v = rtvec_alloc (n_elts);
894 unsigned int i;
896 if (GET_CODE (op) != CONST_VECTOR)
897 for (i = 0; i < n_elts; i++)
898 RTVEC_ELT (v, i) = op;
899 else
901 enum machine_mode inmode = GET_MODE (op);
902 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
903 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
905 gcc_assert (in_n_elts < n_elts);
906 gcc_assert ((n_elts % in_n_elts) == 0);
907 for (i = 0; i < n_elts; i++)
908 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
910 return gen_rtx_CONST_VECTOR (mode, v);
914 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
916 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
917 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
918 enum machine_mode opmode = GET_MODE (op);
919 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
920 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
921 rtvec v = rtvec_alloc (n_elts);
922 unsigned int i;
924 gcc_assert (op_n_elts == n_elts);
925 for (i = 0; i < n_elts; i++)
927 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
928 CONST_VECTOR_ELT (op, i),
929 GET_MODE_INNER (opmode));
930 if (!x)
931 return 0;
932 RTVEC_ELT (v, i) = x;
934 return gen_rtx_CONST_VECTOR (mode, v);
937 /* The order of these tests is critical so that, for example, we don't
938 check the wrong mode (input vs. output) for a conversion operation,
939 such as FIX. At some point, this should be simplified. */
941 if (code == FLOAT && GET_MODE (op) == VOIDmode
942 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
944 HOST_WIDE_INT hv, lv;
945 REAL_VALUE_TYPE d;
947 if (GET_CODE (op) == CONST_INT)
948 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
949 else
950 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
952 REAL_VALUE_FROM_INT (d, lv, hv, mode);
953 d = real_value_truncate (mode, d);
954 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
956 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
957 && (GET_CODE (op) == CONST_DOUBLE
958 || GET_CODE (op) == CONST_INT))
960 HOST_WIDE_INT hv, lv;
961 REAL_VALUE_TYPE d;
963 if (GET_CODE (op) == CONST_INT)
964 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
965 else
966 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
968 if (op_mode == VOIDmode)
970 /* We don't know how to interpret negative-looking numbers in
971 this case, so don't try to fold those. */
972 if (hv < 0)
973 return 0;
975 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
977 else
978 hv = 0, lv &= GET_MODE_MASK (op_mode);
980 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
981 d = real_value_truncate (mode, d);
982 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
985 if (GET_CODE (op) == CONST_INT
986 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
988 HOST_WIDE_INT arg0 = INTVAL (op);
989 HOST_WIDE_INT val;
991 switch (code)
993 case NOT:
994 val = ~ arg0;
995 break;
997 case NEG:
998 val = - arg0;
999 break;
1001 case ABS:
1002 val = (arg0 >= 0 ? arg0 : - arg0);
1003 break;
1005 case FFS:
1006 /* Don't use ffs here. Instead, get low order bit and then its
1007 number. If arg0 is zero, this will return 0, as desired. */
1008 arg0 &= GET_MODE_MASK (mode);
1009 val = exact_log2 (arg0 & (- arg0)) + 1;
1010 break;
1012 case CLZ:
1013 arg0 &= GET_MODE_MASK (mode);
1014 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1016 else
1017 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1018 break;
1020 case CTZ:
1021 arg0 &= GET_MODE_MASK (mode);
1022 if (arg0 == 0)
1024 /* Even if the value at zero is undefined, we have to come
1025 up with some replacement. Seems good enough. */
1026 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1027 val = GET_MODE_BITSIZE (mode);
1029 else
1030 val = exact_log2 (arg0 & -arg0);
1031 break;
1033 case POPCOUNT:
1034 arg0 &= GET_MODE_MASK (mode);
1035 val = 0;
1036 while (arg0)
1037 val++, arg0 &= arg0 - 1;
1038 break;
1040 case PARITY:
1041 arg0 &= GET_MODE_MASK (mode);
1042 val = 0;
1043 while (arg0)
1044 val++, arg0 &= arg0 - 1;
1045 val &= 1;
1046 break;
1048 case BSWAP:
1049 return 0;
1051 case TRUNCATE:
1052 val = arg0;
1053 break;
1055 case ZERO_EXTEND:
1056 /* When zero-extending a CONST_INT, we need to know its
1057 original mode. */
1058 gcc_assert (op_mode != VOIDmode);
1059 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1061 /* If we were really extending the mode,
1062 we would have to distinguish between zero-extension
1063 and sign-extension. */
1064 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1065 val = arg0;
1067 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1068 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1069 else
1070 return 0;
1071 break;
1073 case SIGN_EXTEND:
1074 if (op_mode == VOIDmode)
1075 op_mode = mode;
1076 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1078 /* If we were really extending the mode,
1079 we would have to distinguish between zero-extension
1080 and sign-extension. */
1081 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1082 val = arg0;
1084 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1087 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1088 if (val
1089 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1090 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1092 else
1093 return 0;
1094 break;
1096 case SQRT:
1097 case FLOAT_EXTEND:
1098 case FLOAT_TRUNCATE:
1099 case SS_TRUNCATE:
1100 case US_TRUNCATE:
1101 case SS_NEG:
1102 return 0;
1104 default:
1105 gcc_unreachable ();
1108 return gen_int_mode (val, mode);
1111 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1112 for a DImode operation on a CONST_INT. */
1113 else if (GET_MODE (op) == VOIDmode
1114 && width <= HOST_BITS_PER_WIDE_INT * 2
1115 && (GET_CODE (op) == CONST_DOUBLE
1116 || GET_CODE (op) == CONST_INT))
1118 unsigned HOST_WIDE_INT l1, lv;
1119 HOST_WIDE_INT h1, hv;
1121 if (GET_CODE (op) == CONST_DOUBLE)
1122 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1123 else
1124 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1126 switch (code)
1128 case NOT:
1129 lv = ~ l1;
1130 hv = ~ h1;
1131 break;
1133 case NEG:
1134 neg_double (l1, h1, &lv, &hv);
1135 break;
1137 case ABS:
1138 if (h1 < 0)
1139 neg_double (l1, h1, &lv, &hv);
1140 else
1141 lv = l1, hv = h1;
1142 break;
1144 case FFS:
1145 hv = 0;
1146 if (l1 == 0)
1148 if (h1 == 0)
1149 lv = 0;
1150 else
1151 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1153 else
1154 lv = exact_log2 (l1 & -l1) + 1;
1155 break;
1157 case CLZ:
1158 hv = 0;
1159 if (h1 != 0)
1160 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1161 - HOST_BITS_PER_WIDE_INT;
1162 else if (l1 != 0)
1163 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1164 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1165 lv = GET_MODE_BITSIZE (mode);
1166 break;
1168 case CTZ:
1169 hv = 0;
1170 if (l1 != 0)
1171 lv = exact_log2 (l1 & -l1);
1172 else if (h1 != 0)
1173 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1174 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1175 lv = GET_MODE_BITSIZE (mode);
1176 break;
1178 case POPCOUNT:
1179 hv = 0;
1180 lv = 0;
1181 while (l1)
1182 lv++, l1 &= l1 - 1;
1183 while (h1)
1184 lv++, h1 &= h1 - 1;
1185 break;
1187 case PARITY:
1188 hv = 0;
1189 lv = 0;
1190 while (l1)
1191 lv++, l1 &= l1 - 1;
1192 while (h1)
1193 lv++, h1 &= h1 - 1;
1194 lv &= 1;
1195 break;
1197 case TRUNCATE:
1198 /* This is just a change-of-mode, so do nothing. */
1199 lv = l1, hv = h1;
1200 break;
1202 case ZERO_EXTEND:
1203 gcc_assert (op_mode != VOIDmode);
1205 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1206 return 0;
1208 hv = 0;
1209 lv = l1 & GET_MODE_MASK (op_mode);
1210 break;
1212 case SIGN_EXTEND:
1213 if (op_mode == VOIDmode
1214 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1215 return 0;
1216 else
1218 lv = l1 & GET_MODE_MASK (op_mode);
1219 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1220 && (lv & ((HOST_WIDE_INT) 1
1221 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1222 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1224 hv = HWI_SIGN_EXTEND (lv);
1226 break;
1228 case SQRT:
1229 return 0;
1231 default:
1232 return 0;
1235 return immed_double_const (lv, hv, mode);
1238 else if (GET_CODE (op) == CONST_DOUBLE
1239 && SCALAR_FLOAT_MODE_P (mode))
1241 REAL_VALUE_TYPE d, t;
1242 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1244 switch (code)
1246 case SQRT:
1247 if (HONOR_SNANS (mode) && real_isnan (&d))
1248 return 0;
1249 real_sqrt (&t, mode, &d);
1250 d = t;
1251 break;
1252 case ABS:
1253 d = REAL_VALUE_ABS (d);
1254 break;
1255 case NEG:
1256 d = REAL_VALUE_NEGATE (d);
1257 break;
1258 case FLOAT_TRUNCATE:
1259 d = real_value_truncate (mode, d);
1260 break;
1261 case FLOAT_EXTEND:
1262 /* All this does is change the mode. */
1263 break;
1264 case FIX:
1265 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1266 break;
1267 case NOT:
1269 long tmp[4];
1270 int i;
1272 real_to_target (tmp, &d, GET_MODE (op));
1273 for (i = 0; i < 4; i++)
1274 tmp[i] = ~tmp[i];
1275 real_from_target (&d, tmp, mode);
1276 break;
1278 default:
1279 gcc_unreachable ();
1281 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1284 else if (GET_CODE (op) == CONST_DOUBLE
1285 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1286 && GET_MODE_CLASS (mode) == MODE_INT
1287 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1289 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1290 operators are intentionally left unspecified (to ease implementation
1291 by target backends), for consistency, this routine implements the
1292 same semantics for constant folding as used by the middle-end. */
1294 /* This was formerly used only for non-IEEE float.
1295 eggert@twinsun.com says it is safe for IEEE also. */
1296 HOST_WIDE_INT xh, xl, th, tl;
1297 REAL_VALUE_TYPE x, t;
1298 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1299 switch (code)
1301 case FIX:
1302 if (REAL_VALUE_ISNAN (x))
1303 return const0_rtx;
1305 /* Test against the signed upper bound. */
1306 if (width > HOST_BITS_PER_WIDE_INT)
1308 th = ((unsigned HOST_WIDE_INT) 1
1309 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1310 tl = -1;
1312 else
1314 th = 0;
1315 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1317 real_from_integer (&t, VOIDmode, tl, th, 0);
1318 if (REAL_VALUES_LESS (t, x))
1320 xh = th;
1321 xl = tl;
1322 break;
1325 /* Test against the signed lower bound. */
1326 if (width > HOST_BITS_PER_WIDE_INT)
1328 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1329 tl = 0;
1331 else
1333 th = -1;
1334 tl = (HOST_WIDE_INT) -1 << (width - 1);
1336 real_from_integer (&t, VOIDmode, tl, th, 0);
1337 if (REAL_VALUES_LESS (x, t))
1339 xh = th;
1340 xl = tl;
1341 break;
1343 REAL_VALUE_TO_INT (&xl, &xh, x);
1344 break;
1346 case UNSIGNED_FIX:
1347 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1348 return const0_rtx;
1350 /* Test against the unsigned upper bound. */
1351 if (width == 2*HOST_BITS_PER_WIDE_INT)
1353 th = -1;
1354 tl = -1;
1356 else if (width >= HOST_BITS_PER_WIDE_INT)
1358 th = ((unsigned HOST_WIDE_INT) 1
1359 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1360 tl = -1;
1362 else
1364 th = 0;
1365 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1367 real_from_integer (&t, VOIDmode, tl, th, 1);
1368 if (REAL_VALUES_LESS (t, x))
1370 xh = th;
1371 xl = tl;
1372 break;
1375 REAL_VALUE_TO_INT (&xl, &xh, x);
1376 break;
1378 default:
1379 gcc_unreachable ();
1381 return immed_double_const (xl, xh, mode);
1384 return NULL_RTX;
1387 /* Subroutine of simplify_binary_operation to simplify a commutative,
1388 associative binary operation CODE with result mode MODE, operating
1389 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1390 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1391 canonicalization is possible. */
1393 static rtx
1394 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1395 rtx op0, rtx op1)
1397 rtx tem;
1399 /* Linearize the operator to the left. */
1400 if (GET_CODE (op1) == code)
1402 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1403 if (GET_CODE (op0) == code)
1405 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1406 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1409 /* "a op (b op c)" becomes "(b op c) op a". */
1410 if (! swap_commutative_operands_p (op1, op0))
1411 return simplify_gen_binary (code, mode, op1, op0);
1413 tem = op0;
1414 op0 = op1;
1415 op1 = tem;
1418 if (GET_CODE (op0) == code)
1420 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1421 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1423 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1424 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1427 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1428 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1429 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1430 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1431 if (tem != 0)
1432 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1434 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1435 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1436 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1437 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1438 if (tem != 0)
1439 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1442 return 0;
1446 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1447 and OP1. Return 0 if no simplification is possible.
1449 Don't use this for relational operations such as EQ or LT.
1450 Use simplify_relational_operation instead. */
1452 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1453 rtx op0, rtx op1)
1455 rtx trueop0, trueop1;
1456 rtx tem;
1458 /* Relational operations don't work here. We must know the mode
1459 of the operands in order to do the comparison correctly.
1460 Assuming a full word can give incorrect results.
1461 Consider comparing 128 with -128 in QImode. */
1462 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1463 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1465 /* Make sure the constant is second. */
1466 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1467 && swap_commutative_operands_p (op0, op1))
1469 tem = op0, op0 = op1, op1 = tem;
1472 trueop0 = avoid_constant_pool_reference (op0);
1473 trueop1 = avoid_constant_pool_reference (op1);
1475 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1476 if (tem)
1477 return tem;
1478 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1481 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1482 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1483 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1484 actual constants. */
1486 static rtx
1487 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1488 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1490 rtx tem, reversed, opleft, opright;
1491 HOST_WIDE_INT val;
1492 unsigned int width = GET_MODE_BITSIZE (mode);
1494 /* Even if we can't compute a constant result,
1495 there are some cases worth simplifying. */
1497 switch (code)
1499 case PLUS:
1500 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1501 when x is NaN, infinite, or finite and nonzero. They aren't
1502 when x is -0 and the rounding mode is not towards -infinity,
1503 since (-0) + 0 is then 0. */
1504 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1505 return op0;
1507 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1508 transformations are safe even for IEEE. */
1509 if (GET_CODE (op0) == NEG)
1510 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1511 else if (GET_CODE (op1) == NEG)
1512 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1514 /* (~a) + 1 -> -a */
1515 if (INTEGRAL_MODE_P (mode)
1516 && GET_CODE (op0) == NOT
1517 && trueop1 == const1_rtx)
1518 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1520 /* Handle both-operands-constant cases. We can only add
1521 CONST_INTs to constants since the sum of relocatable symbols
1522 can't be handled by most assemblers. Don't add CONST_INT
1523 to CONST_INT since overflow won't be computed properly if wider
1524 than HOST_BITS_PER_WIDE_INT. */
1526 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1527 && GET_CODE (op1) == CONST_INT)
1528 return plus_constant (op0, INTVAL (op1));
1529 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1530 && GET_CODE (op0) == CONST_INT)
1531 return plus_constant (op1, INTVAL (op0));
1533 /* See if this is something like X * C - X or vice versa or
1534 if the multiplication is written as a shift. If so, we can
1535 distribute and make a new multiply, shift, or maybe just
1536 have X (if C is 2 in the example above). But don't make
1537 something more expensive than we had before. */
1539 if (SCALAR_INT_MODE_P (mode))
1541 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1542 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1543 rtx lhs = op0, rhs = op1;
1545 if (GET_CODE (lhs) == NEG)
1547 coeff0l = -1;
1548 coeff0h = -1;
1549 lhs = XEXP (lhs, 0);
1551 else if (GET_CODE (lhs) == MULT
1552 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1554 coeff0l = INTVAL (XEXP (lhs, 1));
1555 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1556 lhs = XEXP (lhs, 0);
1558 else if (GET_CODE (lhs) == ASHIFT
1559 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1560 && INTVAL (XEXP (lhs, 1)) >= 0
1561 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1563 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1564 coeff0h = 0;
1565 lhs = XEXP (lhs, 0);
1568 if (GET_CODE (rhs) == NEG)
1570 coeff1l = -1;
1571 coeff1h = -1;
1572 rhs = XEXP (rhs, 0);
1574 else if (GET_CODE (rhs) == MULT
1575 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1577 coeff1l = INTVAL (XEXP (rhs, 1));
1578 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1579 rhs = XEXP (rhs, 0);
1581 else if (GET_CODE (rhs) == ASHIFT
1582 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1583 && INTVAL (XEXP (rhs, 1)) >= 0
1584 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1586 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1587 coeff1h = 0;
1588 rhs = XEXP (rhs, 0);
1591 if (rtx_equal_p (lhs, rhs))
1593 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1594 rtx coeff;
1595 unsigned HOST_WIDE_INT l;
1596 HOST_WIDE_INT h;
1598 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1599 coeff = immed_double_const (l, h, mode);
1601 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1602 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1603 ? tem : 0;
1607 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1608 if ((GET_CODE (op1) == CONST_INT
1609 || GET_CODE (op1) == CONST_DOUBLE)
1610 && GET_CODE (op0) == XOR
1611 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1612 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1613 && mode_signbit_p (mode, op1))
1614 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1615 simplify_gen_binary (XOR, mode, op1,
1616 XEXP (op0, 1)));
1618 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1619 if (GET_CODE (op0) == MULT
1620 && GET_CODE (XEXP (op0, 0)) == NEG)
1622 rtx in1, in2;
1624 in1 = XEXP (XEXP (op0, 0), 0);
1625 in2 = XEXP (op0, 1);
1626 return simplify_gen_binary (MINUS, mode, op1,
1627 simplify_gen_binary (MULT, mode,
1628 in1, in2));
1631 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1632 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1633 is 1. */
1634 if (COMPARISON_P (op0)
1635 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1636 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1637 && (reversed = reversed_comparison (op0, mode)))
1638 return
1639 simplify_gen_unary (NEG, mode, reversed, mode);
1641 /* If one of the operands is a PLUS or a MINUS, see if we can
1642 simplify this by the associative law.
1643 Don't use the associative law for floating point.
1644 The inaccuracy makes it nonassociative,
1645 and subtle programs can break if operations are associated. */
1647 if (INTEGRAL_MODE_P (mode)
1648 && (plus_minus_operand_p (op0)
1649 || plus_minus_operand_p (op1))
1650 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1651 return tem;
1653 /* Reassociate floating point addition only when the user
1654 specifies unsafe math optimizations. */
1655 if (FLOAT_MODE_P (mode)
1656 && flag_unsafe_math_optimizations)
1658 tem = simplify_associative_operation (code, mode, op0, op1);
1659 if (tem)
1660 return tem;
1662 break;
1664 case COMPARE:
1665 #ifdef HAVE_cc0
1666 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1667 using cc0, in which case we want to leave it as a COMPARE
1668 so we can distinguish it from a register-register-copy.
1670 In IEEE floating point, x-0 is not the same as x. */
1672 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1673 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1674 && trueop1 == CONST0_RTX (mode))
1675 return op0;
1676 #endif
1678 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1679 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1680 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1681 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1683 rtx xop00 = XEXP (op0, 0);
1684 rtx xop10 = XEXP (op1, 0);
1686 #ifdef HAVE_cc0
1687 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1688 #else
1689 if (REG_P (xop00) && REG_P (xop10)
1690 && GET_MODE (xop00) == GET_MODE (xop10)
1691 && REGNO (xop00) == REGNO (xop10)
1692 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1693 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1694 #endif
1695 return xop00;
1697 break;
1699 case MINUS:
1700 /* We can't assume x-x is 0 even with non-IEEE floating point,
1701 but since it is zero except in very strange circumstances, we
1702 will treat it as zero with -funsafe-math-optimizations. */
1703 if (rtx_equal_p (trueop0, trueop1)
1704 && ! side_effects_p (op0)
1705 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1706 return CONST0_RTX (mode);
1708 /* Change subtraction from zero into negation. (0 - x) is the
1709 same as -x when x is NaN, infinite, or finite and nonzero.
1710 But if the mode has signed zeros, and does not round towards
1711 -infinity, then 0 - 0 is 0, not -0. */
1712 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1713 return simplify_gen_unary (NEG, mode, op1, mode);
1715 /* (-1 - a) is ~a. */
1716 if (trueop0 == constm1_rtx)
1717 return simplify_gen_unary (NOT, mode, op1, mode);
1719 /* Subtracting 0 has no effect unless the mode has signed zeros
1720 and supports rounding towards -infinity. In such a case,
1721 0 - 0 is -0. */
1722 if (!(HONOR_SIGNED_ZEROS (mode)
1723 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1724 && trueop1 == CONST0_RTX (mode))
1725 return op0;
1727 /* See if this is something like X * C - X or vice versa or
1728 if the multiplication is written as a shift. If so, we can
1729 distribute and make a new multiply, shift, or maybe just
1730 have X (if C is 2 in the example above). But don't make
1731 something more expensive than we had before. */
1733 if (SCALAR_INT_MODE_P (mode))
1735 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1736 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1737 rtx lhs = op0, rhs = op1;
1739 if (GET_CODE (lhs) == NEG)
1741 coeff0l = -1;
1742 coeff0h = -1;
1743 lhs = XEXP (lhs, 0);
1745 else if (GET_CODE (lhs) == MULT
1746 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1748 coeff0l = INTVAL (XEXP (lhs, 1));
1749 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1750 lhs = XEXP (lhs, 0);
1752 else if (GET_CODE (lhs) == ASHIFT
1753 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1754 && INTVAL (XEXP (lhs, 1)) >= 0
1755 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1757 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1758 coeff0h = 0;
1759 lhs = XEXP (lhs, 0);
1762 if (GET_CODE (rhs) == NEG)
1764 negcoeff1l = 1;
1765 negcoeff1h = 0;
1766 rhs = XEXP (rhs, 0);
1768 else if (GET_CODE (rhs) == MULT
1769 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1771 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1772 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1773 rhs = XEXP (rhs, 0);
1775 else if (GET_CODE (rhs) == ASHIFT
1776 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1777 && INTVAL (XEXP (rhs, 1)) >= 0
1778 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1780 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1781 negcoeff1h = -1;
1782 rhs = XEXP (rhs, 0);
1785 if (rtx_equal_p (lhs, rhs))
1787 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1788 rtx coeff;
1789 unsigned HOST_WIDE_INT l;
1790 HOST_WIDE_INT h;
1792 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1793 coeff = immed_double_const (l, h, mode);
1795 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1796 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1797 ? tem : 0;
1801 /* (a - (-b)) -> (a + b). True even for IEEE. */
1802 if (GET_CODE (op1) == NEG)
1803 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1805 /* (-x - c) may be simplified as (-c - x). */
1806 if (GET_CODE (op0) == NEG
1807 && (GET_CODE (op1) == CONST_INT
1808 || GET_CODE (op1) == CONST_DOUBLE))
1810 tem = simplify_unary_operation (NEG, mode, op1, mode);
1811 if (tem)
1812 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1815 /* Don't let a relocatable value get a negative coeff. */
1816 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1817 return simplify_gen_binary (PLUS, mode,
1818 op0,
1819 neg_const_int (mode, op1));
1821 /* (x - (x & y)) -> (x & ~y) */
1822 if (GET_CODE (op1) == AND)
1824 if (rtx_equal_p (op0, XEXP (op1, 0)))
1826 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1827 GET_MODE (XEXP (op1, 1)));
1828 return simplify_gen_binary (AND, mode, op0, tem);
1830 if (rtx_equal_p (op0, XEXP (op1, 1)))
1832 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1833 GET_MODE (XEXP (op1, 0)));
1834 return simplify_gen_binary (AND, mode, op0, tem);
1838 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1839 by reversing the comparison code if valid. */
1840 if (STORE_FLAG_VALUE == 1
1841 && trueop0 == const1_rtx
1842 && COMPARISON_P (op1)
1843 && (reversed = reversed_comparison (op1, mode)))
1844 return reversed;
1846 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
1847 if (GET_CODE (op1) == MULT
1848 && GET_CODE (XEXP (op1, 0)) == NEG)
1850 rtx in1, in2;
1852 in1 = XEXP (XEXP (op1, 0), 0);
1853 in2 = XEXP (op1, 1);
1854 return simplify_gen_binary (PLUS, mode,
1855 simplify_gen_binary (MULT, mode,
1856 in1, in2),
1857 op0);
1860 /* Canonicalize (minus (neg A) (mult B C)) to
1861 (minus (mult (neg B) C) A). */
1862 if (GET_CODE (op1) == MULT
1863 && GET_CODE (op0) == NEG)
1865 rtx in1, in2;
1867 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
1868 in2 = XEXP (op1, 1);
1869 return simplify_gen_binary (MINUS, mode,
1870 simplify_gen_binary (MULT, mode,
1871 in1, in2),
1872 XEXP (op0, 0));
1875 /* If one of the operands is a PLUS or a MINUS, see if we can
1876 simplify this by the associative law. This will, for example,
1877 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1878 Don't use the associative law for floating point.
1879 The inaccuracy makes it nonassociative,
1880 and subtle programs can break if operations are associated. */
1882 if (INTEGRAL_MODE_P (mode)
1883 && (plus_minus_operand_p (op0)
1884 || plus_minus_operand_p (op1))
1885 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1886 return tem;
1887 break;
1889 case MULT:
1890 if (trueop1 == constm1_rtx)
1891 return simplify_gen_unary (NEG, mode, op0, mode);
1893 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1894 x is NaN, since x * 0 is then also NaN. Nor is it valid
1895 when the mode has signed zeros, since multiplying a negative
1896 number by 0 will give -0, not 0. */
1897 if (!HONOR_NANS (mode)
1898 && !HONOR_SIGNED_ZEROS (mode)
1899 && trueop1 == CONST0_RTX (mode)
1900 && ! side_effects_p (op0))
1901 return op1;
1903 /* In IEEE floating point, x*1 is not equivalent to x for
1904 signalling NaNs. */
1905 if (!HONOR_SNANS (mode)
1906 && trueop1 == CONST1_RTX (mode))
1907 return op0;
1909 /* Convert multiply by constant power of two into shift unless
1910 we are still generating RTL. This test is a kludge. */
1911 if (GET_CODE (trueop1) == CONST_INT
1912 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1913 /* If the mode is larger than the host word size, and the
1914 uppermost bit is set, then this isn't a power of two due
1915 to implicit sign extension. */
1916 && (width <= HOST_BITS_PER_WIDE_INT
1917 || val != HOST_BITS_PER_WIDE_INT - 1))
1918 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1920 /* Likewise for multipliers wider than a word. */
1921 if (GET_CODE (trueop1) == CONST_DOUBLE
1922 && (GET_MODE (trueop1) == VOIDmode
1923 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
1924 && GET_MODE (op0) == mode
1925 && CONST_DOUBLE_LOW (trueop1) == 0
1926 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
1927 return simplify_gen_binary (ASHIFT, mode, op0,
1928 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
1930 /* x*2 is x+x and x*(-1) is -x */
1931 if (GET_CODE (trueop1) == CONST_DOUBLE
1932 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
1933 && GET_MODE (op0) == mode)
1935 REAL_VALUE_TYPE d;
1936 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1938 if (REAL_VALUES_EQUAL (d, dconst2))
1939 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1941 if (!HONOR_SNANS (mode)
1942 && REAL_VALUES_EQUAL (d, dconstm1))
1943 return simplify_gen_unary (NEG, mode, op0, mode);
1946 /* Optimize -x * -x as x * x. */
1947 if (FLOAT_MODE_P (mode)
1948 && GET_CODE (op0) == NEG
1949 && GET_CODE (op1) == NEG
1950 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
1951 && !side_effects_p (XEXP (op0, 0)))
1952 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
1954 /* Likewise, optimize abs(x) * abs(x) as x * x. */
1955 if (SCALAR_FLOAT_MODE_P (mode)
1956 && GET_CODE (op0) == ABS
1957 && GET_CODE (op1) == ABS
1958 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
1959 && !side_effects_p (XEXP (op0, 0)))
1960 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
1962 /* Reassociate multiplication, but for floating point MULTs
1963 only when the user specifies unsafe math optimizations. */
1964 if (! FLOAT_MODE_P (mode)
1965 || flag_unsafe_math_optimizations)
1967 tem = simplify_associative_operation (code, mode, op0, op1);
1968 if (tem)
1969 return tem;
1971 break;
1973 case IOR:
1974 if (trueop1 == const0_rtx)
1975 return op0;
1976 if (GET_CODE (trueop1) == CONST_INT
1977 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1978 == GET_MODE_MASK (mode)))
1979 return op1;
1980 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1981 return op0;
1982 /* A | (~A) -> -1 */
1983 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1984 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1985 && ! side_effects_p (op0)
1986 && SCALAR_INT_MODE_P (mode))
1987 return constm1_rtx;
1989 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
1990 if (GET_CODE (op1) == CONST_INT
1991 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1992 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
1993 return op1;
1995 /* Convert (A & B) | A to A. */
1996 if (GET_CODE (op0) == AND
1997 && (rtx_equal_p (XEXP (op0, 0), op1)
1998 || rtx_equal_p (XEXP (op0, 1), op1))
1999 && ! side_effects_p (XEXP (op0, 0))
2000 && ! side_effects_p (XEXP (op0, 1)))
2001 return op1;
2003 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2004 mode size to (rotate A CX). */
2006 if (GET_CODE (op1) == ASHIFT
2007 || GET_CODE (op1) == SUBREG)
2009 opleft = op1;
2010 opright = op0;
2012 else
2014 opright = op1;
2015 opleft = op0;
2018 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2019 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2020 && GET_CODE (XEXP (opleft, 1)) == CONST_INT
2021 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2022 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2023 == GET_MODE_BITSIZE (mode)))
2024 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2026 /* Same, but for ashift that has been "simplified" to a wider mode
2027 by simplify_shift_const. */
2029 if (GET_CODE (opleft) == SUBREG
2030 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2031 && GET_CODE (opright) == LSHIFTRT
2032 && GET_CODE (XEXP (opright, 0)) == SUBREG
2033 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2034 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2035 && (GET_MODE_SIZE (GET_MODE (opleft))
2036 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2037 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2038 SUBREG_REG (XEXP (opright, 0)))
2039 && GET_CODE (XEXP (SUBREG_REG (opleft), 1)) == CONST_INT
2040 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2041 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2042 == GET_MODE_BITSIZE (mode)))
2043 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2044 XEXP (SUBREG_REG (opleft), 1));
2046 /* If we have (ior (and (X C1) C2)), simplify this by making
2047 C1 as small as possible if C1 actually changes. */
2048 if (GET_CODE (op1) == CONST_INT
2049 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2050 || INTVAL (op1) > 0)
2051 && GET_CODE (op0) == AND
2052 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2053 && GET_CODE (op1) == CONST_INT
2054 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2055 return simplify_gen_binary (IOR, mode,
2056 simplify_gen_binary
2057 (AND, mode, XEXP (op0, 0),
2058 GEN_INT (INTVAL (XEXP (op0, 1))
2059 & ~INTVAL (op1))),
2060 op1);
2062 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2063 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2064 the PLUS does not affect any of the bits in OP1: then we can do
2065 the IOR as a PLUS and we can associate. This is valid if OP1
2066 can be safely shifted left C bits. */
2067 if (GET_CODE (trueop1) == CONST_INT && GET_CODE (op0) == ASHIFTRT
2068 && GET_CODE (XEXP (op0, 0)) == PLUS
2069 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
2070 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2071 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2073 int count = INTVAL (XEXP (op0, 1));
2074 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2076 if (mask >> count == INTVAL (trueop1)
2077 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2078 return simplify_gen_binary (ASHIFTRT, mode,
2079 plus_constant (XEXP (op0, 0), mask),
2080 XEXP (op0, 1));
2083 tem = simplify_associative_operation (code, mode, op0, op1);
2084 if (tem)
2085 return tem;
2086 break;
2088 case XOR:
2089 if (trueop1 == const0_rtx)
2090 return op0;
2091 if (GET_CODE (trueop1) == CONST_INT
2092 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2093 == GET_MODE_MASK (mode)))
2094 return simplify_gen_unary (NOT, mode, op0, mode);
2095 if (rtx_equal_p (trueop0, trueop1)
2096 && ! side_effects_p (op0)
2097 && GET_MODE_CLASS (mode) != MODE_CC)
2098 return CONST0_RTX (mode);
2100 /* Canonicalize XOR of the most significant bit to PLUS. */
2101 if ((GET_CODE (op1) == CONST_INT
2102 || GET_CODE (op1) == CONST_DOUBLE)
2103 && mode_signbit_p (mode, op1))
2104 return simplify_gen_binary (PLUS, mode, op0, op1);
2105 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2106 if ((GET_CODE (op1) == CONST_INT
2107 || GET_CODE (op1) == CONST_DOUBLE)
2108 && GET_CODE (op0) == PLUS
2109 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
2110 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2111 && mode_signbit_p (mode, XEXP (op0, 1)))
2112 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2113 simplify_gen_binary (XOR, mode, op1,
2114 XEXP (op0, 1)));
2116 /* If we are XORing two things that have no bits in common,
2117 convert them into an IOR. This helps to detect rotation encoded
2118 using those methods and possibly other simplifications. */
2120 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2121 && (nonzero_bits (op0, mode)
2122 & nonzero_bits (op1, mode)) == 0)
2123 return (simplify_gen_binary (IOR, mode, op0, op1));
2125 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2126 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2127 (NOT y). */
2129 int num_negated = 0;
2131 if (GET_CODE (op0) == NOT)
2132 num_negated++, op0 = XEXP (op0, 0);
2133 if (GET_CODE (op1) == NOT)
2134 num_negated++, op1 = XEXP (op1, 0);
2136 if (num_negated == 2)
2137 return simplify_gen_binary (XOR, mode, op0, op1);
2138 else if (num_negated == 1)
2139 return simplify_gen_unary (NOT, mode,
2140 simplify_gen_binary (XOR, mode, op0, op1),
2141 mode);
2144 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2145 correspond to a machine insn or result in further simplifications
2146 if B is a constant. */
2148 if (GET_CODE (op0) == AND
2149 && rtx_equal_p (XEXP (op0, 1), op1)
2150 && ! side_effects_p (op1))
2151 return simplify_gen_binary (AND, mode,
2152 simplify_gen_unary (NOT, mode,
2153 XEXP (op0, 0), mode),
2154 op1);
2156 else if (GET_CODE (op0) == AND
2157 && rtx_equal_p (XEXP (op0, 0), op1)
2158 && ! side_effects_p (op1))
2159 return simplify_gen_binary (AND, mode,
2160 simplify_gen_unary (NOT, mode,
2161 XEXP (op0, 1), mode),
2162 op1);
2164 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2165 comparison if STORE_FLAG_VALUE is 1. */
2166 if (STORE_FLAG_VALUE == 1
2167 && trueop1 == const1_rtx
2168 && COMPARISON_P (op0)
2169 && (reversed = reversed_comparison (op0, mode)))
2170 return reversed;
2172 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2173 is (lt foo (const_int 0)), so we can perform the above
2174 simplification if STORE_FLAG_VALUE is 1. */
2176 if (STORE_FLAG_VALUE == 1
2177 && trueop1 == const1_rtx
2178 && GET_CODE (op0) == LSHIFTRT
2179 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2180 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2181 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2183 /* (xor (comparison foo bar) (const_int sign-bit))
2184 when STORE_FLAG_VALUE is the sign bit. */
2185 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2186 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2187 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2188 && trueop1 == const_true_rtx
2189 && COMPARISON_P (op0)
2190 && (reversed = reversed_comparison (op0, mode)))
2191 return reversed;
2193 break;
2195 tem = simplify_associative_operation (code, mode, op0, op1);
2196 if (tem)
2197 return tem;
2198 break;
2200 case AND:
2201 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2202 return trueop1;
2203 /* If we are turning off bits already known off in OP0, we need
2204 not do an AND. */
2205 if (GET_CODE (trueop1) == CONST_INT
2206 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2207 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
2208 return op0;
2209 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2210 && GET_MODE_CLASS (mode) != MODE_CC)
2211 return op0;
2212 /* A & (~A) -> 0 */
2213 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2214 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2215 && ! side_effects_p (op0)
2216 && GET_MODE_CLASS (mode) != MODE_CC)
2217 return CONST0_RTX (mode);
2219 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2220 there are no nonzero bits of C outside of X's mode. */
2221 if ((GET_CODE (op0) == SIGN_EXTEND
2222 || GET_CODE (op0) == ZERO_EXTEND)
2223 && GET_CODE (trueop1) == CONST_INT
2224 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2225 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2226 & INTVAL (trueop1)) == 0)
2228 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2229 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2230 gen_int_mode (INTVAL (trueop1),
2231 imode));
2232 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2235 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2236 insn (and may simplify more). */
2237 if (GET_CODE (op0) == XOR
2238 && rtx_equal_p (XEXP (op0, 0), op1)
2239 && ! side_effects_p (op1))
2240 return simplify_gen_binary (AND, mode,
2241 simplify_gen_unary (NOT, mode,
2242 XEXP (op0, 1), mode),
2243 op1);
2245 if (GET_CODE (op0) == XOR
2246 && rtx_equal_p (XEXP (op0, 1), op1)
2247 && ! side_effects_p (op1))
2248 return simplify_gen_binary (AND, mode,
2249 simplify_gen_unary (NOT, mode,
2250 XEXP (op0, 0), mode),
2251 op1);
2253 /* Similarly for (~(A ^ B)) & A. */
2254 if (GET_CODE (op0) == NOT
2255 && GET_CODE (XEXP (op0, 0)) == XOR
2256 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2257 && ! side_effects_p (op1))
2258 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2260 if (GET_CODE (op0) == NOT
2261 && GET_CODE (XEXP (op0, 0)) == XOR
2262 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2263 && ! side_effects_p (op1))
2264 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2266 /* Convert (A | B) & A to A. */
2267 if (GET_CODE (op0) == IOR
2268 && (rtx_equal_p (XEXP (op0, 0), op1)
2269 || rtx_equal_p (XEXP (op0, 1), op1))
2270 && ! side_effects_p (XEXP (op0, 0))
2271 && ! side_effects_p (XEXP (op0, 1)))
2272 return op1;
2274 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2275 ((A & N) + B) & M -> (A + B) & M
2276 Similarly if (N & M) == 0,
2277 ((A | N) + B) & M -> (A + B) & M
2278 and for - instead of + and/or ^ instead of |. */
2279 if (GET_CODE (trueop1) == CONST_INT
2280 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2281 && ~INTVAL (trueop1)
2282 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2283 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2285 rtx pmop[2];
2286 int which;
2288 pmop[0] = XEXP (op0, 0);
2289 pmop[1] = XEXP (op0, 1);
2291 for (which = 0; which < 2; which++)
2293 tem = pmop[which];
2294 switch (GET_CODE (tem))
2296 case AND:
2297 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2298 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2299 == INTVAL (trueop1))
2300 pmop[which] = XEXP (tem, 0);
2301 break;
2302 case IOR:
2303 case XOR:
2304 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2305 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2306 pmop[which] = XEXP (tem, 0);
2307 break;
2308 default:
2309 break;
2313 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2315 tem = simplify_gen_binary (GET_CODE (op0), mode,
2316 pmop[0], pmop[1]);
2317 return simplify_gen_binary (code, mode, tem, op1);
2320 tem = simplify_associative_operation (code, mode, op0, op1);
2321 if (tem)
2322 return tem;
2323 break;
2325 case UDIV:
2326 /* 0/x is 0 (or x&0 if x has side-effects). */
2327 if (trueop0 == CONST0_RTX (mode))
2329 if (side_effects_p (op1))
2330 return simplify_gen_binary (AND, mode, op1, trueop0);
2331 return trueop0;
2333 /* x/1 is x. */
2334 if (trueop1 == CONST1_RTX (mode))
2335 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2336 /* Convert divide by power of two into shift. */
2337 if (GET_CODE (trueop1) == CONST_INT
2338 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2339 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2340 break;
2342 case DIV:
2343 /* Handle floating point and integers separately. */
2344 if (SCALAR_FLOAT_MODE_P (mode))
2346 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2347 safe for modes with NaNs, since 0.0 / 0.0 will then be
2348 NaN rather than 0.0. Nor is it safe for modes with signed
2349 zeros, since dividing 0 by a negative number gives -0.0 */
2350 if (trueop0 == CONST0_RTX (mode)
2351 && !HONOR_NANS (mode)
2352 && !HONOR_SIGNED_ZEROS (mode)
2353 && ! side_effects_p (op1))
2354 return op0;
2355 /* x/1.0 is x. */
2356 if (trueop1 == CONST1_RTX (mode)
2357 && !HONOR_SNANS (mode))
2358 return op0;
2360 if (GET_CODE (trueop1) == CONST_DOUBLE
2361 && trueop1 != CONST0_RTX (mode))
2363 REAL_VALUE_TYPE d;
2364 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2366 /* x/-1.0 is -x. */
2367 if (REAL_VALUES_EQUAL (d, dconstm1)
2368 && !HONOR_SNANS (mode))
2369 return simplify_gen_unary (NEG, mode, op0, mode);
2371 /* Change FP division by a constant into multiplication.
2372 Only do this with -funsafe-math-optimizations. */
2373 if (flag_unsafe_math_optimizations
2374 && !REAL_VALUES_EQUAL (d, dconst0))
2376 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2377 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2378 return simplify_gen_binary (MULT, mode, op0, tem);
2382 else
2384 /* 0/x is 0 (or x&0 if x has side-effects). */
2385 if (trueop0 == CONST0_RTX (mode))
2387 if (side_effects_p (op1))
2388 return simplify_gen_binary (AND, mode, op1, trueop0);
2389 return trueop0;
2391 /* x/1 is x. */
2392 if (trueop1 == CONST1_RTX (mode))
2393 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2394 /* x/-1 is -x. */
2395 if (trueop1 == constm1_rtx)
2397 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2398 return simplify_gen_unary (NEG, mode, x, mode);
2401 break;
2403 case UMOD:
2404 /* 0%x is 0 (or x&0 if x has side-effects). */
2405 if (trueop0 == CONST0_RTX (mode))
2407 if (side_effects_p (op1))
2408 return simplify_gen_binary (AND, mode, op1, trueop0);
2409 return trueop0;
2411 /* x%1 is 0 (of x&0 if x has side-effects). */
2412 if (trueop1 == CONST1_RTX (mode))
2414 if (side_effects_p (op0))
2415 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2416 return CONST0_RTX (mode);
2418 /* Implement modulus by power of two as AND. */
2419 if (GET_CODE (trueop1) == CONST_INT
2420 && exact_log2 (INTVAL (trueop1)) > 0)
2421 return simplify_gen_binary (AND, mode, op0,
2422 GEN_INT (INTVAL (op1) - 1));
2423 break;
2425 case MOD:
2426 /* 0%x is 0 (or x&0 if x has side-effects). */
2427 if (trueop0 == CONST0_RTX (mode))
2429 if (side_effects_p (op1))
2430 return simplify_gen_binary (AND, mode, op1, trueop0);
2431 return trueop0;
2433 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2434 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2436 if (side_effects_p (op0))
2437 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2438 return CONST0_RTX (mode);
2440 break;
2442 case ROTATERT:
2443 case ROTATE:
2444 case ASHIFTRT:
2445 if (trueop1 == CONST0_RTX (mode))
2446 return op0;
2447 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2448 return op0;
2449 /* Rotating ~0 always results in ~0. */
2450 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2451 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2452 && ! side_effects_p (op1))
2453 return op0;
2454 break;
2456 case ASHIFT:
2457 case SS_ASHIFT:
2458 if (trueop1 == CONST0_RTX (mode))
2459 return op0;
2460 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2461 return op0;
2462 break;
2464 case LSHIFTRT:
2465 if (trueop1 == CONST0_RTX (mode))
2466 return op0;
2467 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2468 return op0;
2469 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2470 if (GET_CODE (op0) == CLZ
2471 && GET_CODE (trueop1) == CONST_INT
2472 && STORE_FLAG_VALUE == 1
2473 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2475 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2476 unsigned HOST_WIDE_INT zero_val = 0;
2478 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2479 && zero_val == GET_MODE_BITSIZE (imode)
2480 && INTVAL (trueop1) == exact_log2 (zero_val))
2481 return simplify_gen_relational (EQ, mode, imode,
2482 XEXP (op0, 0), const0_rtx);
2484 break;
2486 case SMIN:
2487 if (width <= HOST_BITS_PER_WIDE_INT
2488 && GET_CODE (trueop1) == CONST_INT
2489 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2490 && ! side_effects_p (op0))
2491 return op1;
2492 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2493 return op0;
2494 tem = simplify_associative_operation (code, mode, op0, op1);
2495 if (tem)
2496 return tem;
2497 break;
2499 case SMAX:
2500 if (width <= HOST_BITS_PER_WIDE_INT
2501 && GET_CODE (trueop1) == CONST_INT
2502 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2503 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2504 && ! side_effects_p (op0))
2505 return op1;
2506 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2507 return op0;
2508 tem = simplify_associative_operation (code, mode, op0, op1);
2509 if (tem)
2510 return tem;
2511 break;
2513 case UMIN:
2514 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2515 return op1;
2516 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2517 return op0;
2518 tem = simplify_associative_operation (code, mode, op0, op1);
2519 if (tem)
2520 return tem;
2521 break;
2523 case UMAX:
2524 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2525 return op1;
2526 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2527 return op0;
2528 tem = simplify_associative_operation (code, mode, op0, op1);
2529 if (tem)
2530 return tem;
2531 break;
2533 case SS_PLUS:
2534 case US_PLUS:
2535 case SS_MINUS:
2536 case US_MINUS:
2537 /* ??? There are simplifications that can be done. */
2538 return 0;
2540 case VEC_SELECT:
2541 if (!VECTOR_MODE_P (mode))
2543 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2544 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2545 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2546 gcc_assert (XVECLEN (trueop1, 0) == 1);
2547 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2549 if (GET_CODE (trueop0) == CONST_VECTOR)
2550 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2551 (trueop1, 0, 0)));
2553 else
2555 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2556 gcc_assert (GET_MODE_INNER (mode)
2557 == GET_MODE_INNER (GET_MODE (trueop0)));
2558 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2560 if (GET_CODE (trueop0) == CONST_VECTOR)
2562 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2563 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2564 rtvec v = rtvec_alloc (n_elts);
2565 unsigned int i;
2567 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2568 for (i = 0; i < n_elts; i++)
2570 rtx x = XVECEXP (trueop1, 0, i);
2572 gcc_assert (GET_CODE (x) == CONST_INT);
2573 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2574 INTVAL (x));
2577 return gen_rtx_CONST_VECTOR (mode, v);
2581 if (XVECLEN (trueop1, 0) == 1
2582 && GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT
2583 && GET_CODE (trueop0) == VEC_CONCAT)
2585 rtx vec = trueop0;
2586 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2588 /* Try to find the element in the VEC_CONCAT. */
2589 while (GET_MODE (vec) != mode
2590 && GET_CODE (vec) == VEC_CONCAT)
2592 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2593 if (offset < vec_size)
2594 vec = XEXP (vec, 0);
2595 else
2597 offset -= vec_size;
2598 vec = XEXP (vec, 1);
2600 vec = avoid_constant_pool_reference (vec);
2603 if (GET_MODE (vec) == mode)
2604 return vec;
2607 return 0;
2608 case VEC_CONCAT:
2610 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2611 ? GET_MODE (trueop0)
2612 : GET_MODE_INNER (mode));
2613 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2614 ? GET_MODE (trueop1)
2615 : GET_MODE_INNER (mode));
2617 gcc_assert (VECTOR_MODE_P (mode));
2618 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2619 == GET_MODE_SIZE (mode));
2621 if (VECTOR_MODE_P (op0_mode))
2622 gcc_assert (GET_MODE_INNER (mode)
2623 == GET_MODE_INNER (op0_mode));
2624 else
2625 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2627 if (VECTOR_MODE_P (op1_mode))
2628 gcc_assert (GET_MODE_INNER (mode)
2629 == GET_MODE_INNER (op1_mode));
2630 else
2631 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2633 if ((GET_CODE (trueop0) == CONST_VECTOR
2634 || GET_CODE (trueop0) == CONST_INT
2635 || GET_CODE (trueop0) == CONST_DOUBLE)
2636 && (GET_CODE (trueop1) == CONST_VECTOR
2637 || GET_CODE (trueop1) == CONST_INT
2638 || GET_CODE (trueop1) == CONST_DOUBLE))
2640 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2641 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2642 rtvec v = rtvec_alloc (n_elts);
2643 unsigned int i;
2644 unsigned in_n_elts = 1;
2646 if (VECTOR_MODE_P (op0_mode))
2647 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2648 for (i = 0; i < n_elts; i++)
2650 if (i < in_n_elts)
2652 if (!VECTOR_MODE_P (op0_mode))
2653 RTVEC_ELT (v, i) = trueop0;
2654 else
2655 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2657 else
2659 if (!VECTOR_MODE_P (op1_mode))
2660 RTVEC_ELT (v, i) = trueop1;
2661 else
2662 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2663 i - in_n_elts);
2667 return gen_rtx_CONST_VECTOR (mode, v);
2670 return 0;
2672 default:
2673 gcc_unreachable ();
2676 return 0;
2680 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
2681 rtx op0, rtx op1)
2683 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
2684 HOST_WIDE_INT val;
2685 unsigned int width = GET_MODE_BITSIZE (mode);
2687 if (VECTOR_MODE_P (mode)
2688 && code != VEC_CONCAT
2689 && GET_CODE (op0) == CONST_VECTOR
2690 && GET_CODE (op1) == CONST_VECTOR)
2692 unsigned n_elts = GET_MODE_NUNITS (mode);
2693 enum machine_mode op0mode = GET_MODE (op0);
2694 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2695 enum machine_mode op1mode = GET_MODE (op1);
2696 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2697 rtvec v = rtvec_alloc (n_elts);
2698 unsigned int i;
2700 gcc_assert (op0_n_elts == n_elts);
2701 gcc_assert (op1_n_elts == n_elts);
2702 for (i = 0; i < n_elts; i++)
2704 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2705 CONST_VECTOR_ELT (op0, i),
2706 CONST_VECTOR_ELT (op1, i));
2707 if (!x)
2708 return 0;
2709 RTVEC_ELT (v, i) = x;
2712 return gen_rtx_CONST_VECTOR (mode, v);
2715 if (VECTOR_MODE_P (mode)
2716 && code == VEC_CONCAT
2717 && CONSTANT_P (op0) && CONSTANT_P (op1))
2719 unsigned n_elts = GET_MODE_NUNITS (mode);
2720 rtvec v = rtvec_alloc (n_elts);
2722 gcc_assert (n_elts >= 2);
2723 if (n_elts == 2)
2725 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2726 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2728 RTVEC_ELT (v, 0) = op0;
2729 RTVEC_ELT (v, 1) = op1;
2731 else
2733 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2734 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2735 unsigned i;
2737 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2738 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2739 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2741 for (i = 0; i < op0_n_elts; ++i)
2742 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2743 for (i = 0; i < op1_n_elts; ++i)
2744 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2747 return gen_rtx_CONST_VECTOR (mode, v);
2750 if (SCALAR_FLOAT_MODE_P (mode)
2751 && GET_CODE (op0) == CONST_DOUBLE
2752 && GET_CODE (op1) == CONST_DOUBLE
2753 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2755 if (code == AND
2756 || code == IOR
2757 || code == XOR)
2759 long tmp0[4];
2760 long tmp1[4];
2761 REAL_VALUE_TYPE r;
2762 int i;
2764 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
2765 GET_MODE (op0));
2766 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
2767 GET_MODE (op1));
2768 for (i = 0; i < 4; i++)
2770 switch (code)
2772 case AND:
2773 tmp0[i] &= tmp1[i];
2774 break;
2775 case IOR:
2776 tmp0[i] |= tmp1[i];
2777 break;
2778 case XOR:
2779 tmp0[i] ^= tmp1[i];
2780 break;
2781 default:
2782 gcc_unreachable ();
2785 real_from_target (&r, tmp0, mode);
2786 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
2788 else
2790 REAL_VALUE_TYPE f0, f1, value, result;
2791 bool inexact;
2793 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
2794 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
2795 real_convert (&f0, mode, &f0);
2796 real_convert (&f1, mode, &f1);
2798 if (HONOR_SNANS (mode)
2799 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
2800 return 0;
2802 if (code == DIV
2803 && REAL_VALUES_EQUAL (f1, dconst0)
2804 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
2805 return 0;
2807 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2808 && flag_trapping_math
2809 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
2811 int s0 = REAL_VALUE_NEGATIVE (f0);
2812 int s1 = REAL_VALUE_NEGATIVE (f1);
2814 switch (code)
2816 case PLUS:
2817 /* Inf + -Inf = NaN plus exception. */
2818 if (s0 != s1)
2819 return 0;
2820 break;
2821 case MINUS:
2822 /* Inf - Inf = NaN plus exception. */
2823 if (s0 == s1)
2824 return 0;
2825 break;
2826 case DIV:
2827 /* Inf / Inf = NaN plus exception. */
2828 return 0;
2829 default:
2830 break;
2834 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2835 && flag_trapping_math
2836 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
2837 || (REAL_VALUE_ISINF (f1)
2838 && REAL_VALUES_EQUAL (f0, dconst0))))
2839 /* Inf * 0 = NaN plus exception. */
2840 return 0;
2842 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
2843 &f0, &f1);
2844 real_convert (&result, mode, &value);
2846 /* Don't constant fold this floating point operation if
2847 the result has overflowed and flag_trapping_math. */
2849 if (flag_trapping_math
2850 && MODE_HAS_INFINITIES (mode)
2851 && REAL_VALUE_ISINF (result)
2852 && !REAL_VALUE_ISINF (f0)
2853 && !REAL_VALUE_ISINF (f1))
2854 /* Overflow plus exception. */
2855 return 0;
2857 /* Don't constant fold this floating point operation if the
2858 result may dependent upon the run-time rounding mode and
2859 flag_rounding_math is set, or if GCC's software emulation
2860 is unable to accurately represent the result. */
2862 if ((flag_rounding_math
2863 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
2864 && !flag_unsafe_math_optimizations))
2865 && (inexact || !real_identical (&result, &value)))
2866 return NULL_RTX;
2868 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
2872 /* We can fold some multi-word operations. */
2873 if (GET_MODE_CLASS (mode) == MODE_INT
2874 && width == HOST_BITS_PER_WIDE_INT * 2
2875 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
2876 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
2878 unsigned HOST_WIDE_INT l1, l2, lv, lt;
2879 HOST_WIDE_INT h1, h2, hv, ht;
2881 if (GET_CODE (op0) == CONST_DOUBLE)
2882 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
2883 else
2884 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
2886 if (GET_CODE (op1) == CONST_DOUBLE)
2887 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
2888 else
2889 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
2891 switch (code)
2893 case MINUS:
2894 /* A - B == A + (-B). */
2895 neg_double (l2, h2, &lv, &hv);
2896 l2 = lv, h2 = hv;
2898 /* Fall through.... */
2900 case PLUS:
2901 add_double (l1, h1, l2, h2, &lv, &hv);
2902 break;
2904 case MULT:
2905 mul_double (l1, h1, l2, h2, &lv, &hv);
2906 break;
2908 case DIV:
2909 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2910 &lv, &hv, &lt, &ht))
2911 return 0;
2912 break;
2914 case MOD:
2915 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2916 &lt, &ht, &lv, &hv))
2917 return 0;
2918 break;
2920 case UDIV:
2921 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2922 &lv, &hv, &lt, &ht))
2923 return 0;
2924 break;
2926 case UMOD:
2927 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2928 &lt, &ht, &lv, &hv))
2929 return 0;
2930 break;
2932 case AND:
2933 lv = l1 & l2, hv = h1 & h2;
2934 break;
2936 case IOR:
2937 lv = l1 | l2, hv = h1 | h2;
2938 break;
2940 case XOR:
2941 lv = l1 ^ l2, hv = h1 ^ h2;
2942 break;
2944 case SMIN:
2945 if (h1 < h2
2946 || (h1 == h2
2947 && ((unsigned HOST_WIDE_INT) l1
2948 < (unsigned HOST_WIDE_INT) l2)))
2949 lv = l1, hv = h1;
2950 else
2951 lv = l2, hv = h2;
2952 break;
2954 case SMAX:
2955 if (h1 > h2
2956 || (h1 == h2
2957 && ((unsigned HOST_WIDE_INT) l1
2958 > (unsigned HOST_WIDE_INT) l2)))
2959 lv = l1, hv = h1;
2960 else
2961 lv = l2, hv = h2;
2962 break;
2964 case UMIN:
2965 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
2966 || (h1 == h2
2967 && ((unsigned HOST_WIDE_INT) l1
2968 < (unsigned HOST_WIDE_INT) l2)))
2969 lv = l1, hv = h1;
2970 else
2971 lv = l2, hv = h2;
2972 break;
2974 case UMAX:
2975 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
2976 || (h1 == h2
2977 && ((unsigned HOST_WIDE_INT) l1
2978 > (unsigned HOST_WIDE_INT) l2)))
2979 lv = l1, hv = h1;
2980 else
2981 lv = l2, hv = h2;
2982 break;
2984 case LSHIFTRT: case ASHIFTRT:
2985 case ASHIFT:
2986 case ROTATE: case ROTATERT:
2987 if (SHIFT_COUNT_TRUNCATED)
2988 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
2990 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
2991 return 0;
2993 if (code == LSHIFTRT || code == ASHIFTRT)
2994 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
2995 code == ASHIFTRT);
2996 else if (code == ASHIFT)
2997 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
2998 else if (code == ROTATE)
2999 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3000 else /* code == ROTATERT */
3001 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3002 break;
3004 default:
3005 return 0;
3008 return immed_double_const (lv, hv, mode);
3011 if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
3012 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3014 /* Get the integer argument values in two forms:
3015 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3017 arg0 = INTVAL (op0);
3018 arg1 = INTVAL (op1);
3020 if (width < HOST_BITS_PER_WIDE_INT)
3022 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
3023 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
3025 arg0s = arg0;
3026 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3027 arg0s |= ((HOST_WIDE_INT) (-1) << width);
3029 arg1s = arg1;
3030 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3031 arg1s |= ((HOST_WIDE_INT) (-1) << width);
3033 else
3035 arg0s = arg0;
3036 arg1s = arg1;
3039 /* Compute the value of the arithmetic. */
3041 switch (code)
3043 case PLUS:
3044 val = arg0s + arg1s;
3045 break;
3047 case MINUS:
3048 val = arg0s - arg1s;
3049 break;
3051 case MULT:
3052 val = arg0s * arg1s;
3053 break;
3055 case DIV:
3056 if (arg1s == 0
3057 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3058 && arg1s == -1))
3059 return 0;
3060 val = arg0s / arg1s;
3061 break;
3063 case MOD:
3064 if (arg1s == 0
3065 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3066 && arg1s == -1))
3067 return 0;
3068 val = arg0s % arg1s;
3069 break;
3071 case UDIV:
3072 if (arg1 == 0
3073 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3074 && arg1s == -1))
3075 return 0;
3076 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3077 break;
3079 case UMOD:
3080 if (arg1 == 0
3081 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3082 && arg1s == -1))
3083 return 0;
3084 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3085 break;
3087 case AND:
3088 val = arg0 & arg1;
3089 break;
3091 case IOR:
3092 val = arg0 | arg1;
3093 break;
3095 case XOR:
3096 val = arg0 ^ arg1;
3097 break;
3099 case LSHIFTRT:
3100 case ASHIFT:
3101 case ASHIFTRT:
3102 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3103 the value is in range. We can't return any old value for
3104 out-of-range arguments because either the middle-end (via
3105 shift_truncation_mask) or the back-end might be relying on
3106 target-specific knowledge. Nor can we rely on
3107 shift_truncation_mask, since the shift might not be part of an
3108 ashlM3, lshrM3 or ashrM3 instruction. */
3109 if (SHIFT_COUNT_TRUNCATED)
3110 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3111 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3112 return 0;
3114 val = (code == ASHIFT
3115 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3116 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3118 /* Sign-extend the result for arithmetic right shifts. */
3119 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3120 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3121 break;
3123 case ROTATERT:
3124 if (arg1 < 0)
3125 return 0;
3127 arg1 %= width;
3128 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3129 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3130 break;
3132 case ROTATE:
3133 if (arg1 < 0)
3134 return 0;
3136 arg1 %= width;
3137 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3138 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3139 break;
3141 case COMPARE:
3142 /* Do nothing here. */
3143 return 0;
3145 case SMIN:
3146 val = arg0s <= arg1s ? arg0s : arg1s;
3147 break;
3149 case UMIN:
3150 val = ((unsigned HOST_WIDE_INT) arg0
3151 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3152 break;
3154 case SMAX:
3155 val = arg0s > arg1s ? arg0s : arg1s;
3156 break;
3158 case UMAX:
3159 val = ((unsigned HOST_WIDE_INT) arg0
3160 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3161 break;
3163 case SS_PLUS:
3164 case US_PLUS:
3165 case SS_MINUS:
3166 case US_MINUS:
3167 case SS_ASHIFT:
3168 /* ??? There are simplifications that can be done. */
3169 return 0;
3171 default:
3172 gcc_unreachable ();
3175 return gen_int_mode (val, mode);
3178 return NULL_RTX;
3183 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3184 PLUS or MINUS.
3186 Rather than test for specific case, we do this by a brute-force method
3187 and do all possible simplifications until no more changes occur. Then
3188 we rebuild the operation. */
3190 struct simplify_plus_minus_op_data
3192 rtx op;
3193 short neg;
3196 static int
3197 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
3199 const struct simplify_plus_minus_op_data *d1 = p1;
3200 const struct simplify_plus_minus_op_data *d2 = p2;
3201 int result;
3203 result = (commutative_operand_precedence (d2->op)
3204 - commutative_operand_precedence (d1->op));
3205 if (result)
3206 return result;
3208 /* Group together equal REGs to do more simplification. */
3209 if (REG_P (d1->op) && REG_P (d2->op))
3210 return REGNO (d1->op) - REGNO (d2->op);
3211 else
3212 return 0;
3215 static rtx
3216 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3217 rtx op1)
3219 struct simplify_plus_minus_op_data ops[8];
3220 rtx result, tem;
3221 int n_ops = 2, input_ops = 2;
3222 int changed, n_constants = 0, canonicalized = 0;
3223 int i, j;
3225 memset (ops, 0, sizeof ops);
3227 /* Set up the two operands and then expand them until nothing has been
3228 changed. If we run out of room in our array, give up; this should
3229 almost never happen. */
3231 ops[0].op = op0;
3232 ops[0].neg = 0;
3233 ops[1].op = op1;
3234 ops[1].neg = (code == MINUS);
3238 changed = 0;
3240 for (i = 0; i < n_ops; i++)
3242 rtx this_op = ops[i].op;
3243 int this_neg = ops[i].neg;
3244 enum rtx_code this_code = GET_CODE (this_op);
3246 switch (this_code)
3248 case PLUS:
3249 case MINUS:
3250 if (n_ops == 7)
3251 return NULL_RTX;
3253 ops[n_ops].op = XEXP (this_op, 1);
3254 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3255 n_ops++;
3257 ops[i].op = XEXP (this_op, 0);
3258 input_ops++;
3259 changed = 1;
3260 canonicalized |= this_neg;
3261 break;
3263 case NEG:
3264 ops[i].op = XEXP (this_op, 0);
3265 ops[i].neg = ! this_neg;
3266 changed = 1;
3267 canonicalized = 1;
3268 break;
3270 case CONST:
3271 if (n_ops < 7
3272 && GET_CODE (XEXP (this_op, 0)) == PLUS
3273 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3274 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3276 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3277 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3278 ops[n_ops].neg = this_neg;
3279 n_ops++;
3280 changed = 1;
3281 canonicalized = 1;
3283 break;
3285 case NOT:
3286 /* ~a -> (-a - 1) */
3287 if (n_ops != 7)
3289 ops[n_ops].op = constm1_rtx;
3290 ops[n_ops++].neg = this_neg;
3291 ops[i].op = XEXP (this_op, 0);
3292 ops[i].neg = !this_neg;
3293 changed = 1;
3294 canonicalized = 1;
3296 break;
3298 case CONST_INT:
3299 n_constants++;
3300 if (this_neg)
3302 ops[i].op = neg_const_int (mode, this_op);
3303 ops[i].neg = 0;
3304 changed = 1;
3305 canonicalized = 1;
3307 break;
3309 default:
3310 break;
3314 while (changed);
3316 if (n_constants > 1)
3317 canonicalized = 1;
3319 gcc_assert (n_ops >= 2);
3321 /* If we only have two operands, we can avoid the loops. */
3322 if (n_ops == 2)
3324 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3325 rtx lhs, rhs;
3327 /* Get the two operands. Be careful with the order, especially for
3328 the cases where code == MINUS. */
3329 if (ops[0].neg && ops[1].neg)
3331 lhs = gen_rtx_NEG (mode, ops[0].op);
3332 rhs = ops[1].op;
3334 else if (ops[0].neg)
3336 lhs = ops[1].op;
3337 rhs = ops[0].op;
3339 else
3341 lhs = ops[0].op;
3342 rhs = ops[1].op;
3345 return simplify_const_binary_operation (code, mode, lhs, rhs);
3348 /* Now simplify each pair of operands until nothing changes. */
3351 /* Insertion sort is good enough for an eight-element array. */
3352 for (i = 1; i < n_ops; i++)
3354 struct simplify_plus_minus_op_data save;
3355 j = i - 1;
3356 if (simplify_plus_minus_op_data_cmp (&ops[j], &ops[i]) < 0)
3357 continue;
3359 canonicalized = 1;
3360 save = ops[i];
3362 ops[j + 1] = ops[j];
3363 while (j-- && simplify_plus_minus_op_data_cmp (&ops[j], &save) > 0);
3364 ops[j + 1] = save;
3367 /* This is only useful the first time through. */
3368 if (!canonicalized)
3369 return NULL_RTX;
3371 changed = 0;
3372 for (i = n_ops - 1; i > 0; i--)
3373 for (j = i - 1; j >= 0; j--)
3375 rtx lhs = ops[j].op, rhs = ops[i].op;
3376 int lneg = ops[j].neg, rneg = ops[i].neg;
3378 if (lhs != 0 && rhs != 0)
3380 enum rtx_code ncode = PLUS;
3382 if (lneg != rneg)
3384 ncode = MINUS;
3385 if (lneg)
3386 tem = lhs, lhs = rhs, rhs = tem;
3388 else if (swap_commutative_operands_p (lhs, rhs))
3389 tem = lhs, lhs = rhs, rhs = tem;
3391 if ((GET_CODE (lhs) == CONST || GET_CODE (lhs) == CONST_INT)
3392 && (GET_CODE (rhs) == CONST || GET_CODE (rhs) == CONST_INT))
3394 rtx tem_lhs, tem_rhs;
3396 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3397 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3398 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3400 if (tem && !CONSTANT_P (tem))
3401 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3403 else
3404 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3406 /* Reject "simplifications" that just wrap the two
3407 arguments in a CONST. Failure to do so can result
3408 in infinite recursion with simplify_binary_operation
3409 when it calls us to simplify CONST operations. */
3410 if (tem
3411 && ! (GET_CODE (tem) == CONST
3412 && GET_CODE (XEXP (tem, 0)) == ncode
3413 && XEXP (XEXP (tem, 0), 0) == lhs
3414 && XEXP (XEXP (tem, 0), 1) == rhs))
3416 lneg &= rneg;
3417 if (GET_CODE (tem) == NEG)
3418 tem = XEXP (tem, 0), lneg = !lneg;
3419 if (GET_CODE (tem) == CONST_INT && lneg)
3420 tem = neg_const_int (mode, tem), lneg = 0;
3422 ops[i].op = tem;
3423 ops[i].neg = lneg;
3424 ops[j].op = NULL_RTX;
3425 changed = 1;
3430 /* Pack all the operands to the lower-numbered entries. */
3431 for (i = 0, j = 0; j < n_ops; j++)
3432 if (ops[j].op)
3434 ops[i] = ops[j];
3435 i++;
3437 n_ops = i;
3439 while (changed);
3441 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3442 if (n_ops == 2
3443 && GET_CODE (ops[1].op) == CONST_INT
3444 && CONSTANT_P (ops[0].op)
3445 && ops[0].neg)
3446 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3448 /* We suppressed creation of trivial CONST expressions in the
3449 combination loop to avoid recursion. Create one manually now.
3450 The combination loop should have ensured that there is exactly
3451 one CONST_INT, and the sort will have ensured that it is last
3452 in the array and that any other constant will be next-to-last. */
3454 if (n_ops > 1
3455 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
3456 && CONSTANT_P (ops[n_ops - 2].op))
3458 rtx value = ops[n_ops - 1].op;
3459 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3460 value = neg_const_int (mode, value);
3461 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3462 n_ops--;
3465 /* Put a non-negated operand first, if possible. */
3467 for (i = 0; i < n_ops && ops[i].neg; i++)
3468 continue;
3469 if (i == n_ops)
3470 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3471 else if (i != 0)
3473 tem = ops[0].op;
3474 ops[0] = ops[i];
3475 ops[i].op = tem;
3476 ops[i].neg = 1;
3479 /* Now make the result by performing the requested operations. */
3480 result = ops[0].op;
3481 for (i = 1; i < n_ops; i++)
3482 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3483 mode, result, ops[i].op);
3485 return result;
3488 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3489 static bool
3490 plus_minus_operand_p (rtx x)
3492 return GET_CODE (x) == PLUS
3493 || GET_CODE (x) == MINUS
3494 || (GET_CODE (x) == CONST
3495 && GET_CODE (XEXP (x, 0)) == PLUS
3496 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3497 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3500 /* Like simplify_binary_operation except used for relational operators.
3501 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3502 not also be VOIDmode.
3504 CMP_MODE specifies in which mode the comparison is done in, so it is
3505 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3506 the operands or, if both are VOIDmode, the operands are compared in
3507 "infinite precision". */
3509 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3510 enum machine_mode cmp_mode, rtx op0, rtx op1)
3512 rtx tem, trueop0, trueop1;
3514 if (cmp_mode == VOIDmode)
3515 cmp_mode = GET_MODE (op0);
3516 if (cmp_mode == VOIDmode)
3517 cmp_mode = GET_MODE (op1);
3519 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3520 if (tem)
3522 if (SCALAR_FLOAT_MODE_P (mode))
3524 if (tem == const0_rtx)
3525 return CONST0_RTX (mode);
3526 #ifdef FLOAT_STORE_FLAG_VALUE
3528 REAL_VALUE_TYPE val;
3529 val = FLOAT_STORE_FLAG_VALUE (mode);
3530 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3532 #else
3533 return NULL_RTX;
3534 #endif
3536 if (VECTOR_MODE_P (mode))
3538 if (tem == const0_rtx)
3539 return CONST0_RTX (mode);
3540 #ifdef VECTOR_STORE_FLAG_VALUE
3542 int i, units;
3543 rtvec v;
3545 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3546 if (val == NULL_RTX)
3547 return NULL_RTX;
3548 if (val == const1_rtx)
3549 return CONST1_RTX (mode);
3551 units = GET_MODE_NUNITS (mode);
3552 v = rtvec_alloc (units);
3553 for (i = 0; i < units; i++)
3554 RTVEC_ELT (v, i) = val;
3555 return gen_rtx_raw_CONST_VECTOR (mode, v);
3557 #else
3558 return NULL_RTX;
3559 #endif
3562 return tem;
3565 /* For the following tests, ensure const0_rtx is op1. */
3566 if (swap_commutative_operands_p (op0, op1)
3567 || (op0 == const0_rtx && op1 != const0_rtx))
3568 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3570 /* If op0 is a compare, extract the comparison arguments from it. */
3571 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3572 return simplify_relational_operation (code, mode, VOIDmode,
3573 XEXP (op0, 0), XEXP (op0, 1));
3575 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
3576 || CC0_P (op0))
3577 return NULL_RTX;
3579 trueop0 = avoid_constant_pool_reference (op0);
3580 trueop1 = avoid_constant_pool_reference (op1);
3581 return simplify_relational_operation_1 (code, mode, cmp_mode,
3582 trueop0, trueop1);
3585 /* This part of simplify_relational_operation is only used when CMP_MODE
3586 is not in class MODE_CC (i.e. it is a real comparison).
3588 MODE is the mode of the result, while CMP_MODE specifies in which
3589 mode the comparison is done in, so it is the mode of the operands. */
3591 static rtx
3592 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
3593 enum machine_mode cmp_mode, rtx op0, rtx op1)
3595 enum rtx_code op0code = GET_CODE (op0);
3597 if (GET_CODE (op1) == CONST_INT)
3599 if (INTVAL (op1) == 0 && COMPARISON_P (op0))
3601 /* If op0 is a comparison, extract the comparison arguments
3602 from it. */
3603 if (code == NE)
3605 if (GET_MODE (op0) == mode)
3606 return simplify_rtx (op0);
3607 else
3608 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
3609 XEXP (op0, 0), XEXP (op0, 1));
3611 else if (code == EQ)
3613 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
3614 if (new_code != UNKNOWN)
3615 return simplify_gen_relational (new_code, mode, VOIDmode,
3616 XEXP (op0, 0), XEXP (op0, 1));
3621 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
3622 if ((code == EQ || code == NE)
3623 && (op0code == PLUS || op0code == MINUS)
3624 && CONSTANT_P (op1)
3625 && CONSTANT_P (XEXP (op0, 1))
3626 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
3628 rtx x = XEXP (op0, 0);
3629 rtx c = XEXP (op0, 1);
3631 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
3632 cmp_mode, op1, c);
3633 return simplify_gen_relational (code, mode, cmp_mode, x, c);
3636 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3637 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3638 if (code == NE
3639 && op1 == const0_rtx
3640 && GET_MODE_CLASS (mode) == MODE_INT
3641 && cmp_mode != VOIDmode
3642 /* ??? Work-around BImode bugs in the ia64 backend. */
3643 && mode != BImode
3644 && cmp_mode != BImode
3645 && nonzero_bits (op0, cmp_mode) == 1
3646 && STORE_FLAG_VALUE == 1)
3647 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
3648 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
3649 : lowpart_subreg (mode, op0, cmp_mode);
3651 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
3652 if ((code == EQ || code == NE)
3653 && op1 == const0_rtx
3654 && op0code == XOR)
3655 return simplify_gen_relational (code, mode, cmp_mode,
3656 XEXP (op0, 0), XEXP (op0, 1));
3658 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
3659 if ((code == EQ || code == NE)
3660 && op0code == XOR
3661 && rtx_equal_p (XEXP (op0, 0), op1)
3662 && !side_effects_p (XEXP (op0, 0)))
3663 return simplify_gen_relational (code, mode, cmp_mode,
3664 XEXP (op0, 1), const0_rtx);
3666 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
3667 if ((code == EQ || code == NE)
3668 && op0code == XOR
3669 && rtx_equal_p (XEXP (op0, 1), op1)
3670 && !side_effects_p (XEXP (op0, 1)))
3671 return simplify_gen_relational (code, mode, cmp_mode,
3672 XEXP (op0, 0), const0_rtx);
3674 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
3675 if ((code == EQ || code == NE)
3676 && op0code == XOR
3677 && (GET_CODE (op1) == CONST_INT
3678 || GET_CODE (op1) == CONST_DOUBLE)
3679 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
3680 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
3681 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
3682 simplify_gen_binary (XOR, cmp_mode,
3683 XEXP (op0, 1), op1));
3685 return NULL_RTX;
3688 /* Check if the given comparison (done in the given MODE) is actually a
3689 tautology or a contradiction.
3690 If no simplification is possible, this function returns zero.
3691 Otherwise, it returns either const_true_rtx or const0_rtx. */
3694 simplify_const_relational_operation (enum rtx_code code,
3695 enum machine_mode mode,
3696 rtx op0, rtx op1)
3698 int equal, op0lt, op0ltu, op1lt, op1ltu;
3699 rtx tem;
3700 rtx trueop0;
3701 rtx trueop1;
3703 gcc_assert (mode != VOIDmode
3704 || (GET_MODE (op0) == VOIDmode
3705 && GET_MODE (op1) == VOIDmode));
3707 /* If op0 is a compare, extract the comparison arguments from it. */
3708 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3710 op1 = XEXP (op0, 1);
3711 op0 = XEXP (op0, 0);
3713 if (GET_MODE (op0) != VOIDmode)
3714 mode = GET_MODE (op0);
3715 else if (GET_MODE (op1) != VOIDmode)
3716 mode = GET_MODE (op1);
3717 else
3718 return 0;
3721 /* We can't simplify MODE_CC values since we don't know what the
3722 actual comparison is. */
3723 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
3724 return 0;
3726 /* Make sure the constant is second. */
3727 if (swap_commutative_operands_p (op0, op1))
3729 tem = op0, op0 = op1, op1 = tem;
3730 code = swap_condition (code);
3733 trueop0 = avoid_constant_pool_reference (op0);
3734 trueop1 = avoid_constant_pool_reference (op1);
3736 /* For integer comparisons of A and B maybe we can simplify A - B and can
3737 then simplify a comparison of that with zero. If A and B are both either
3738 a register or a CONST_INT, this can't help; testing for these cases will
3739 prevent infinite recursion here and speed things up.
3741 We can only do this for EQ and NE comparisons as otherwise we may
3742 lose or introduce overflow which we cannot disregard as undefined as
3743 we do not know the signedness of the operation on either the left or
3744 the right hand side of the comparison. */
3746 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
3747 && (code == EQ || code == NE)
3748 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
3749 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
3750 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
3751 /* We cannot do this if tem is a nonzero address. */
3752 && ! nonzero_address_p (tem))
3753 return simplify_const_relational_operation (signed_condition (code),
3754 mode, tem, const0_rtx);
3756 if (! HONOR_NANS (mode) && code == ORDERED)
3757 return const_true_rtx;
3759 if (! HONOR_NANS (mode) && code == UNORDERED)
3760 return const0_rtx;
3762 /* For modes without NaNs, if the two operands are equal, we know the
3763 result except if they have side-effects. */
3764 if (! HONOR_NANS (GET_MODE (trueop0))
3765 && rtx_equal_p (trueop0, trueop1)
3766 && ! side_effects_p (trueop0))
3767 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
3769 /* If the operands are floating-point constants, see if we can fold
3770 the result. */
3771 else if (GET_CODE (trueop0) == CONST_DOUBLE
3772 && GET_CODE (trueop1) == CONST_DOUBLE
3773 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
3775 REAL_VALUE_TYPE d0, d1;
3777 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
3778 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
3780 /* Comparisons are unordered iff at least one of the values is NaN. */
3781 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
3782 switch (code)
3784 case UNEQ:
3785 case UNLT:
3786 case UNGT:
3787 case UNLE:
3788 case UNGE:
3789 case NE:
3790 case UNORDERED:
3791 return const_true_rtx;
3792 case EQ:
3793 case LT:
3794 case GT:
3795 case LE:
3796 case GE:
3797 case LTGT:
3798 case ORDERED:
3799 return const0_rtx;
3800 default:
3801 return 0;
3804 equal = REAL_VALUES_EQUAL (d0, d1);
3805 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
3806 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
3809 /* Otherwise, see if the operands are both integers. */
3810 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
3811 && (GET_CODE (trueop0) == CONST_DOUBLE
3812 || GET_CODE (trueop0) == CONST_INT)
3813 && (GET_CODE (trueop1) == CONST_DOUBLE
3814 || GET_CODE (trueop1) == CONST_INT))
3816 int width = GET_MODE_BITSIZE (mode);
3817 HOST_WIDE_INT l0s, h0s, l1s, h1s;
3818 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
3820 /* Get the two words comprising each integer constant. */
3821 if (GET_CODE (trueop0) == CONST_DOUBLE)
3823 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
3824 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
3826 else
3828 l0u = l0s = INTVAL (trueop0);
3829 h0u = h0s = HWI_SIGN_EXTEND (l0s);
3832 if (GET_CODE (trueop1) == CONST_DOUBLE)
3834 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
3835 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
3837 else
3839 l1u = l1s = INTVAL (trueop1);
3840 h1u = h1s = HWI_SIGN_EXTEND (l1s);
3843 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
3844 we have to sign or zero-extend the values. */
3845 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
3847 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
3848 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
3850 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3851 l0s |= ((HOST_WIDE_INT) (-1) << width);
3853 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3854 l1s |= ((HOST_WIDE_INT) (-1) << width);
3856 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
3857 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
3859 equal = (h0u == h1u && l0u == l1u);
3860 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
3861 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
3862 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
3863 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
3866 /* Otherwise, there are some code-specific tests we can make. */
3867 else
3869 /* Optimize comparisons with upper and lower bounds. */
3870 if (SCALAR_INT_MODE_P (mode)
3871 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3873 rtx mmin, mmax;
3874 int sign;
3876 if (code == GEU
3877 || code == LEU
3878 || code == GTU
3879 || code == LTU)
3880 sign = 0;
3881 else
3882 sign = 1;
3884 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
3886 tem = NULL_RTX;
3887 switch (code)
3889 case GEU:
3890 case GE:
3891 /* x >= min is always true. */
3892 if (rtx_equal_p (trueop1, mmin))
3893 tem = const_true_rtx;
3894 else
3895 break;
3897 case LEU:
3898 case LE:
3899 /* x <= max is always true. */
3900 if (rtx_equal_p (trueop1, mmax))
3901 tem = const_true_rtx;
3902 break;
3904 case GTU:
3905 case GT:
3906 /* x > max is always false. */
3907 if (rtx_equal_p (trueop1, mmax))
3908 tem = const0_rtx;
3909 break;
3911 case LTU:
3912 case LT:
3913 /* x < min is always false. */
3914 if (rtx_equal_p (trueop1, mmin))
3915 tem = const0_rtx;
3916 break;
3918 default:
3919 break;
3921 if (tem == const0_rtx
3922 || tem == const_true_rtx)
3923 return tem;
3926 switch (code)
3928 case EQ:
3929 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3930 return const0_rtx;
3931 break;
3933 case NE:
3934 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3935 return const_true_rtx;
3936 break;
3938 case LT:
3939 /* Optimize abs(x) < 0.0. */
3940 if (trueop1 == CONST0_RTX (mode)
3941 && !HONOR_SNANS (mode)
3942 && !(flag_wrapv && INTEGRAL_MODE_P (mode)))
3944 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3945 : trueop0;
3946 if (GET_CODE (tem) == ABS)
3947 return const0_rtx;
3949 break;
3951 case GE:
3952 /* Optimize abs(x) >= 0.0. */
3953 if (trueop1 == CONST0_RTX (mode)
3954 && !HONOR_NANS (mode)
3955 && !(flag_wrapv && INTEGRAL_MODE_P (mode)))
3957 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3958 : trueop0;
3959 if (GET_CODE (tem) == ABS)
3960 return const_true_rtx;
3962 break;
3964 case UNGE:
3965 /* Optimize ! (abs(x) < 0.0). */
3966 if (trueop1 == CONST0_RTX (mode))
3968 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3969 : trueop0;
3970 if (GET_CODE (tem) == ABS)
3971 return const_true_rtx;
3973 break;
3975 default:
3976 break;
3979 return 0;
3982 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3983 as appropriate. */
3984 switch (code)
3986 case EQ:
3987 case UNEQ:
3988 return equal ? const_true_rtx : const0_rtx;
3989 case NE:
3990 case LTGT:
3991 return ! equal ? const_true_rtx : const0_rtx;
3992 case LT:
3993 case UNLT:
3994 return op0lt ? const_true_rtx : const0_rtx;
3995 case GT:
3996 case UNGT:
3997 return op1lt ? const_true_rtx : const0_rtx;
3998 case LTU:
3999 return op0ltu ? const_true_rtx : const0_rtx;
4000 case GTU:
4001 return op1ltu ? const_true_rtx : const0_rtx;
4002 case LE:
4003 case UNLE:
4004 return equal || op0lt ? const_true_rtx : const0_rtx;
4005 case GE:
4006 case UNGE:
4007 return equal || op1lt ? const_true_rtx : const0_rtx;
4008 case LEU:
4009 return equal || op0ltu ? const_true_rtx : const0_rtx;
4010 case GEU:
4011 return equal || op1ltu ? const_true_rtx : const0_rtx;
4012 case ORDERED:
4013 return const_true_rtx;
4014 case UNORDERED:
4015 return const0_rtx;
4016 default:
4017 gcc_unreachable ();
4021 /* Simplify CODE, an operation with result mode MODE and three operands,
4022 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4023 a constant. Return 0 if no simplifications is possible. */
4026 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4027 enum machine_mode op0_mode, rtx op0, rtx op1,
4028 rtx op2)
4030 unsigned int width = GET_MODE_BITSIZE (mode);
4032 /* VOIDmode means "infinite" precision. */
4033 if (width == 0)
4034 width = HOST_BITS_PER_WIDE_INT;
4036 switch (code)
4038 case SIGN_EXTRACT:
4039 case ZERO_EXTRACT:
4040 if (GET_CODE (op0) == CONST_INT
4041 && GET_CODE (op1) == CONST_INT
4042 && GET_CODE (op2) == CONST_INT
4043 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4044 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4046 /* Extracting a bit-field from a constant */
4047 HOST_WIDE_INT val = INTVAL (op0);
4049 if (BITS_BIG_ENDIAN)
4050 val >>= (GET_MODE_BITSIZE (op0_mode)
4051 - INTVAL (op2) - INTVAL (op1));
4052 else
4053 val >>= INTVAL (op2);
4055 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4057 /* First zero-extend. */
4058 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4059 /* If desired, propagate sign bit. */
4060 if (code == SIGN_EXTRACT
4061 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4062 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4065 /* Clear the bits that don't belong in our mode,
4066 unless they and our sign bit are all one.
4067 So we get either a reasonable negative value or a reasonable
4068 unsigned value for this mode. */
4069 if (width < HOST_BITS_PER_WIDE_INT
4070 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4071 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4072 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4074 return gen_int_mode (val, mode);
4076 break;
4078 case IF_THEN_ELSE:
4079 if (GET_CODE (op0) == CONST_INT)
4080 return op0 != const0_rtx ? op1 : op2;
4082 /* Convert c ? a : a into "a". */
4083 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4084 return op1;
4086 /* Convert a != b ? a : b into "a". */
4087 if (GET_CODE (op0) == NE
4088 && ! side_effects_p (op0)
4089 && ! HONOR_NANS (mode)
4090 && ! HONOR_SIGNED_ZEROS (mode)
4091 && ((rtx_equal_p (XEXP (op0, 0), op1)
4092 && rtx_equal_p (XEXP (op0, 1), op2))
4093 || (rtx_equal_p (XEXP (op0, 0), op2)
4094 && rtx_equal_p (XEXP (op0, 1), op1))))
4095 return op1;
4097 /* Convert a == b ? a : b into "b". */
4098 if (GET_CODE (op0) == EQ
4099 && ! side_effects_p (op0)
4100 && ! HONOR_NANS (mode)
4101 && ! HONOR_SIGNED_ZEROS (mode)
4102 && ((rtx_equal_p (XEXP (op0, 0), op1)
4103 && rtx_equal_p (XEXP (op0, 1), op2))
4104 || (rtx_equal_p (XEXP (op0, 0), op2)
4105 && rtx_equal_p (XEXP (op0, 1), op1))))
4106 return op2;
4108 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4110 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4111 ? GET_MODE (XEXP (op0, 1))
4112 : GET_MODE (XEXP (op0, 0)));
4113 rtx temp;
4115 /* Look for happy constants in op1 and op2. */
4116 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
4118 HOST_WIDE_INT t = INTVAL (op1);
4119 HOST_WIDE_INT f = INTVAL (op2);
4121 if (t == STORE_FLAG_VALUE && f == 0)
4122 code = GET_CODE (op0);
4123 else if (t == 0 && f == STORE_FLAG_VALUE)
4125 enum rtx_code tmp;
4126 tmp = reversed_comparison_code (op0, NULL_RTX);
4127 if (tmp == UNKNOWN)
4128 break;
4129 code = tmp;
4131 else
4132 break;
4134 return simplify_gen_relational (code, mode, cmp_mode,
4135 XEXP (op0, 0), XEXP (op0, 1));
4138 if (cmp_mode == VOIDmode)
4139 cmp_mode = op0_mode;
4140 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4141 cmp_mode, XEXP (op0, 0),
4142 XEXP (op0, 1));
4144 /* See if any simplifications were possible. */
4145 if (temp)
4147 if (GET_CODE (temp) == CONST_INT)
4148 return temp == const0_rtx ? op2 : op1;
4149 else if (temp)
4150 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4153 break;
4155 case VEC_MERGE:
4156 gcc_assert (GET_MODE (op0) == mode);
4157 gcc_assert (GET_MODE (op1) == mode);
4158 gcc_assert (VECTOR_MODE_P (mode));
4159 op2 = avoid_constant_pool_reference (op2);
4160 if (GET_CODE (op2) == CONST_INT)
4162 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4163 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4164 int mask = (1 << n_elts) - 1;
4166 if (!(INTVAL (op2) & mask))
4167 return op1;
4168 if ((INTVAL (op2) & mask) == mask)
4169 return op0;
4171 op0 = avoid_constant_pool_reference (op0);
4172 op1 = avoid_constant_pool_reference (op1);
4173 if (GET_CODE (op0) == CONST_VECTOR
4174 && GET_CODE (op1) == CONST_VECTOR)
4176 rtvec v = rtvec_alloc (n_elts);
4177 unsigned int i;
4179 for (i = 0; i < n_elts; i++)
4180 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4181 ? CONST_VECTOR_ELT (op0, i)
4182 : CONST_VECTOR_ELT (op1, i));
4183 return gen_rtx_CONST_VECTOR (mode, v);
4186 break;
4188 default:
4189 gcc_unreachable ();
4192 return 0;
4195 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
4196 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
4198 Works by unpacking OP into a collection of 8-bit values
4199 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4200 and then repacking them again for OUTERMODE. */
4202 static rtx
4203 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4204 enum machine_mode innermode, unsigned int byte)
4206 /* We support up to 512-bit values (for V8DFmode). */
4207 enum {
4208 max_bitsize = 512,
4209 value_bit = 8,
4210 value_mask = (1 << value_bit) - 1
4212 unsigned char value[max_bitsize / value_bit];
4213 int value_start;
4214 int i;
4215 int elem;
4217 int num_elem;
4218 rtx * elems;
4219 int elem_bitsize;
4220 rtx result_s;
4221 rtvec result_v = NULL;
4222 enum mode_class outer_class;
4223 enum machine_mode outer_submode;
4225 /* Some ports misuse CCmode. */
4226 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
4227 return op;
4229 /* We have no way to represent a complex constant at the rtl level. */
4230 if (COMPLEX_MODE_P (outermode))
4231 return NULL_RTX;
4233 /* Unpack the value. */
4235 if (GET_CODE (op) == CONST_VECTOR)
4237 num_elem = CONST_VECTOR_NUNITS (op);
4238 elems = &CONST_VECTOR_ELT (op, 0);
4239 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4241 else
4243 num_elem = 1;
4244 elems = &op;
4245 elem_bitsize = max_bitsize;
4247 /* If this asserts, it is too complicated; reducing value_bit may help. */
4248 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4249 /* I don't know how to handle endianness of sub-units. */
4250 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4252 for (elem = 0; elem < num_elem; elem++)
4254 unsigned char * vp;
4255 rtx el = elems[elem];
4257 /* Vectors are kept in target memory order. (This is probably
4258 a mistake.) */
4260 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4261 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4262 / BITS_PER_UNIT);
4263 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4264 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4265 unsigned bytele = (subword_byte % UNITS_PER_WORD
4266 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4267 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4270 switch (GET_CODE (el))
4272 case CONST_INT:
4273 for (i = 0;
4274 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4275 i += value_bit)
4276 *vp++ = INTVAL (el) >> i;
4277 /* CONST_INTs are always logically sign-extended. */
4278 for (; i < elem_bitsize; i += value_bit)
4279 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4280 break;
4282 case CONST_DOUBLE:
4283 if (GET_MODE (el) == VOIDmode)
4285 /* If this triggers, someone should have generated a
4286 CONST_INT instead. */
4287 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4289 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4290 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4291 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4293 *vp++
4294 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4295 i += value_bit;
4297 /* It shouldn't matter what's done here, so fill it with
4298 zero. */
4299 for (; i < elem_bitsize; i += value_bit)
4300 *vp++ = 0;
4302 else
4304 long tmp[max_bitsize / 32];
4305 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4307 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4308 gcc_assert (bitsize <= elem_bitsize);
4309 gcc_assert (bitsize % value_bit == 0);
4311 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4312 GET_MODE (el));
4314 /* real_to_target produces its result in words affected by
4315 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4316 and use WORDS_BIG_ENDIAN instead; see the documentation
4317 of SUBREG in rtl.texi. */
4318 for (i = 0; i < bitsize; i += value_bit)
4320 int ibase;
4321 if (WORDS_BIG_ENDIAN)
4322 ibase = bitsize - 1 - i;
4323 else
4324 ibase = i;
4325 *vp++ = tmp[ibase / 32] >> i % 32;
4328 /* It shouldn't matter what's done here, so fill it with
4329 zero. */
4330 for (; i < elem_bitsize; i += value_bit)
4331 *vp++ = 0;
4333 break;
4335 default:
4336 gcc_unreachable ();
4340 /* Now, pick the right byte to start with. */
4341 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4342 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4343 will already have offset 0. */
4344 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4346 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4347 - byte);
4348 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4349 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4350 byte = (subword_byte % UNITS_PER_WORD
4351 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4354 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4355 so if it's become negative it will instead be very large.) */
4356 gcc_assert (byte < GET_MODE_SIZE (innermode));
4358 /* Convert from bytes to chunks of size value_bit. */
4359 value_start = byte * (BITS_PER_UNIT / value_bit);
4361 /* Re-pack the value. */
4363 if (VECTOR_MODE_P (outermode))
4365 num_elem = GET_MODE_NUNITS (outermode);
4366 result_v = rtvec_alloc (num_elem);
4367 elems = &RTVEC_ELT (result_v, 0);
4368 outer_submode = GET_MODE_INNER (outermode);
4370 else
4372 num_elem = 1;
4373 elems = &result_s;
4374 outer_submode = outermode;
4377 outer_class = GET_MODE_CLASS (outer_submode);
4378 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
4380 gcc_assert (elem_bitsize % value_bit == 0);
4381 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
4383 for (elem = 0; elem < num_elem; elem++)
4385 unsigned char *vp;
4387 /* Vectors are stored in target memory order. (This is probably
4388 a mistake.) */
4390 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4391 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4392 / BITS_PER_UNIT);
4393 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4394 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4395 unsigned bytele = (subword_byte % UNITS_PER_WORD
4396 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4397 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
4400 switch (outer_class)
4402 case MODE_INT:
4403 case MODE_PARTIAL_INT:
4405 unsigned HOST_WIDE_INT hi = 0, lo = 0;
4407 for (i = 0;
4408 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4409 i += value_bit)
4410 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
4411 for (; i < elem_bitsize; i += value_bit)
4412 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
4413 << (i - HOST_BITS_PER_WIDE_INT));
4415 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4416 know why. */
4417 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4418 elems[elem] = gen_int_mode (lo, outer_submode);
4419 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
4420 elems[elem] = immed_double_const (lo, hi, outer_submode);
4421 else
4422 return NULL_RTX;
4424 break;
4426 case MODE_FLOAT:
4427 case MODE_DECIMAL_FLOAT:
4429 REAL_VALUE_TYPE r;
4430 long tmp[max_bitsize / 32];
4432 /* real_from_target wants its input in words affected by
4433 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4434 and use WORDS_BIG_ENDIAN instead; see the documentation
4435 of SUBREG in rtl.texi. */
4436 for (i = 0; i < max_bitsize / 32; i++)
4437 tmp[i] = 0;
4438 for (i = 0; i < elem_bitsize; i += value_bit)
4440 int ibase;
4441 if (WORDS_BIG_ENDIAN)
4442 ibase = elem_bitsize - 1 - i;
4443 else
4444 ibase = i;
4445 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
4448 real_from_target (&r, tmp, outer_submode);
4449 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
4451 break;
4453 default:
4454 gcc_unreachable ();
4457 if (VECTOR_MODE_P (outermode))
4458 return gen_rtx_CONST_VECTOR (outermode, result_v);
4459 else
4460 return result_s;
4463 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
4464 Return 0 if no simplifications are possible. */
4466 simplify_subreg (enum machine_mode outermode, rtx op,
4467 enum machine_mode innermode, unsigned int byte)
4469 /* Little bit of sanity checking. */
4470 gcc_assert (innermode != VOIDmode);
4471 gcc_assert (outermode != VOIDmode);
4472 gcc_assert (innermode != BLKmode);
4473 gcc_assert (outermode != BLKmode);
4475 gcc_assert (GET_MODE (op) == innermode
4476 || GET_MODE (op) == VOIDmode);
4478 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
4479 gcc_assert (byte < GET_MODE_SIZE (innermode));
4481 if (outermode == innermode && !byte)
4482 return op;
4484 if (GET_CODE (op) == CONST_INT
4485 || GET_CODE (op) == CONST_DOUBLE
4486 || GET_CODE (op) == CONST_VECTOR)
4487 return simplify_immed_subreg (outermode, op, innermode, byte);
4489 /* Changing mode twice with SUBREG => just change it once,
4490 or not at all if changing back op starting mode. */
4491 if (GET_CODE (op) == SUBREG)
4493 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
4494 int final_offset = byte + SUBREG_BYTE (op);
4495 rtx newx;
4497 if (outermode == innermostmode
4498 && byte == 0 && SUBREG_BYTE (op) == 0)
4499 return SUBREG_REG (op);
4501 /* The SUBREG_BYTE represents offset, as if the value were stored
4502 in memory. Irritating exception is paradoxical subreg, where
4503 we define SUBREG_BYTE to be 0. On big endian machines, this
4504 value should be negative. For a moment, undo this exception. */
4505 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4507 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
4508 if (WORDS_BIG_ENDIAN)
4509 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4510 if (BYTES_BIG_ENDIAN)
4511 final_offset += difference % UNITS_PER_WORD;
4513 if (SUBREG_BYTE (op) == 0
4514 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
4516 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
4517 if (WORDS_BIG_ENDIAN)
4518 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4519 if (BYTES_BIG_ENDIAN)
4520 final_offset += difference % UNITS_PER_WORD;
4523 /* See whether resulting subreg will be paradoxical. */
4524 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
4526 /* In nonparadoxical subregs we can't handle negative offsets. */
4527 if (final_offset < 0)
4528 return NULL_RTX;
4529 /* Bail out in case resulting subreg would be incorrect. */
4530 if (final_offset % GET_MODE_SIZE (outermode)
4531 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
4532 return NULL_RTX;
4534 else
4536 int offset = 0;
4537 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
4539 /* In paradoxical subreg, see if we are still looking on lower part.
4540 If so, our SUBREG_BYTE will be 0. */
4541 if (WORDS_BIG_ENDIAN)
4542 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4543 if (BYTES_BIG_ENDIAN)
4544 offset += difference % UNITS_PER_WORD;
4545 if (offset == final_offset)
4546 final_offset = 0;
4547 else
4548 return NULL_RTX;
4551 /* Recurse for further possible simplifications. */
4552 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
4553 final_offset);
4554 if (newx)
4555 return newx;
4556 if (validate_subreg (outermode, innermostmode,
4557 SUBREG_REG (op), final_offset))
4558 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
4559 return NULL_RTX;
4562 /* Merge implicit and explicit truncations. */
4564 if (GET_CODE (op) == TRUNCATE
4565 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
4566 && subreg_lowpart_offset (outermode, innermode) == byte)
4567 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
4568 GET_MODE (XEXP (op, 0)));
4570 /* SUBREG of a hard register => just change the register number
4571 and/or mode. If the hard register is not valid in that mode,
4572 suppress this simplification. If the hard register is the stack,
4573 frame, or argument pointer, leave this as a SUBREG. */
4575 if (REG_P (op)
4576 && REGNO (op) < FIRST_PSEUDO_REGISTER
4577 #ifdef CANNOT_CHANGE_MODE_CLASS
4578 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
4579 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
4580 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
4581 #endif
4582 && ((reload_completed && !frame_pointer_needed)
4583 || (REGNO (op) != FRAME_POINTER_REGNUM
4584 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
4585 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
4586 #endif
4588 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4589 && REGNO (op) != ARG_POINTER_REGNUM
4590 #endif
4591 && REGNO (op) != STACK_POINTER_REGNUM
4592 && subreg_offset_representable_p (REGNO (op), innermode,
4593 byte, outermode))
4595 unsigned int regno = REGNO (op);
4596 unsigned int final_regno
4597 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
4599 /* ??? We do allow it if the current REG is not valid for
4600 its mode. This is a kludge to work around how float/complex
4601 arguments are passed on 32-bit SPARC and should be fixed. */
4602 if (HARD_REGNO_MODE_OK (final_regno, outermode)
4603 || ! HARD_REGNO_MODE_OK (regno, innermode))
4605 rtx x;
4606 int final_offset = byte;
4608 /* Adjust offset for paradoxical subregs. */
4609 if (byte == 0
4610 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4612 int difference = (GET_MODE_SIZE (innermode)
4613 - GET_MODE_SIZE (outermode));
4614 if (WORDS_BIG_ENDIAN)
4615 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4616 if (BYTES_BIG_ENDIAN)
4617 final_offset += difference % UNITS_PER_WORD;
4620 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
4622 /* Propagate original regno. We don't have any way to specify
4623 the offset inside original regno, so do so only for lowpart.
4624 The information is used only by alias analysis that can not
4625 grog partial register anyway. */
4627 if (subreg_lowpart_offset (outermode, innermode) == byte)
4628 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
4629 return x;
4633 /* If we have a SUBREG of a register that we are replacing and we are
4634 replacing it with a MEM, make a new MEM and try replacing the
4635 SUBREG with it. Don't do this if the MEM has a mode-dependent address
4636 or if we would be widening it. */
4638 if (MEM_P (op)
4639 && ! mode_dependent_address_p (XEXP (op, 0))
4640 /* Allow splitting of volatile memory references in case we don't
4641 have instruction to move the whole thing. */
4642 && (! MEM_VOLATILE_P (op)
4643 || ! have_insn_for (SET, innermode))
4644 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
4645 return adjust_address_nv (op, outermode, byte);
4647 /* Handle complex values represented as CONCAT
4648 of real and imaginary part. */
4649 if (GET_CODE (op) == CONCAT)
4651 unsigned int part_size, final_offset;
4652 rtx part, res;
4654 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
4655 if (byte < part_size)
4657 part = XEXP (op, 0);
4658 final_offset = byte;
4660 else
4662 part = XEXP (op, 1);
4663 final_offset = byte - part_size;
4666 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
4667 return NULL_RTX;
4669 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
4670 if (res)
4671 return res;
4672 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
4673 return gen_rtx_SUBREG (outermode, part, final_offset);
4674 return NULL_RTX;
4677 /* Optimize SUBREG truncations of zero and sign extended values. */
4678 if ((GET_CODE (op) == ZERO_EXTEND
4679 || GET_CODE (op) == SIGN_EXTEND)
4680 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
4682 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
4684 /* If we're requesting the lowpart of a zero or sign extension,
4685 there are three possibilities. If the outermode is the same
4686 as the origmode, we can omit both the extension and the subreg.
4687 If the outermode is not larger than the origmode, we can apply
4688 the truncation without the extension. Finally, if the outermode
4689 is larger than the origmode, but both are integer modes, we
4690 can just extend to the appropriate mode. */
4691 if (bitpos == 0)
4693 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
4694 if (outermode == origmode)
4695 return XEXP (op, 0);
4696 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
4697 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
4698 subreg_lowpart_offset (outermode,
4699 origmode));
4700 if (SCALAR_INT_MODE_P (outermode))
4701 return simplify_gen_unary (GET_CODE (op), outermode,
4702 XEXP (op, 0), origmode);
4705 /* A SUBREG resulting from a zero extension may fold to zero if
4706 it extracts higher bits that the ZERO_EXTEND's source bits. */
4707 if (GET_CODE (op) == ZERO_EXTEND
4708 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
4709 return CONST0_RTX (outermode);
4712 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
4713 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
4714 the outer subreg is effectively a truncation to the original mode. */
4715 if ((GET_CODE (op) == LSHIFTRT
4716 || GET_CODE (op) == ASHIFTRT)
4717 && SCALAR_INT_MODE_P (outermode)
4718 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
4719 to avoid the possibility that an outer LSHIFTRT shifts by more
4720 than the sign extension's sign_bit_copies and introduces zeros
4721 into the high bits of the result. */
4722 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
4723 && GET_CODE (XEXP (op, 1)) == CONST_INT
4724 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
4725 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4726 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4727 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4728 return simplify_gen_binary (ASHIFTRT, outermode,
4729 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4731 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
4732 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
4733 the outer subreg is effectively a truncation to the original mode. */
4734 if ((GET_CODE (op) == LSHIFTRT
4735 || GET_CODE (op) == ASHIFTRT)
4736 && SCALAR_INT_MODE_P (outermode)
4737 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4738 && GET_CODE (XEXP (op, 1)) == CONST_INT
4739 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4740 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4741 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4742 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4743 return simplify_gen_binary (LSHIFTRT, outermode,
4744 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4746 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
4747 to (ashift:QI (x:QI) C), where C is a suitable small constant and
4748 the outer subreg is effectively a truncation to the original mode. */
4749 if (GET_CODE (op) == ASHIFT
4750 && SCALAR_INT_MODE_P (outermode)
4751 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4752 && GET_CODE (XEXP (op, 1)) == CONST_INT
4753 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4754 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
4755 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4756 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4757 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4758 return simplify_gen_binary (ASHIFT, outermode,
4759 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4761 return NULL_RTX;
4764 /* Make a SUBREG operation or equivalent if it folds. */
4767 simplify_gen_subreg (enum machine_mode outermode, rtx op,
4768 enum machine_mode innermode, unsigned int byte)
4770 rtx newx;
4772 newx = simplify_subreg (outermode, op, innermode, byte);
4773 if (newx)
4774 return newx;
4776 if (GET_CODE (op) == SUBREG
4777 || GET_CODE (op) == CONCAT
4778 || GET_MODE (op) == VOIDmode)
4779 return NULL_RTX;
4781 if (validate_subreg (outermode, innermode, op, byte))
4782 return gen_rtx_SUBREG (outermode, op, byte);
4784 return NULL_RTX;
4787 /* Simplify X, an rtx expression.
4789 Return the simplified expression or NULL if no simplifications
4790 were possible.
4792 This is the preferred entry point into the simplification routines;
4793 however, we still allow passes to call the more specific routines.
4795 Right now GCC has three (yes, three) major bodies of RTL simplification
4796 code that need to be unified.
4798 1. fold_rtx in cse.c. This code uses various CSE specific
4799 information to aid in RTL simplification.
4801 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
4802 it uses combine specific information to aid in RTL
4803 simplification.
4805 3. The routines in this file.
4808 Long term we want to only have one body of simplification code; to
4809 get to that state I recommend the following steps:
4811 1. Pour over fold_rtx & simplify_rtx and move any simplifications
4812 which are not pass dependent state into these routines.
4814 2. As code is moved by #1, change fold_rtx & simplify_rtx to
4815 use this routine whenever possible.
4817 3. Allow for pass dependent state to be provided to these
4818 routines and add simplifications based on the pass dependent
4819 state. Remove code from cse.c & combine.c that becomes
4820 redundant/dead.
4822 It will take time, but ultimately the compiler will be easier to
4823 maintain and improve. It's totally silly that when we add a
4824 simplification that it needs to be added to 4 places (3 for RTL
4825 simplification and 1 for tree simplification. */
4828 simplify_rtx (rtx x)
4830 enum rtx_code code = GET_CODE (x);
4831 enum machine_mode mode = GET_MODE (x);
4833 switch (GET_RTX_CLASS (code))
4835 case RTX_UNARY:
4836 return simplify_unary_operation (code, mode,
4837 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
4838 case RTX_COMM_ARITH:
4839 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
4840 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
4842 /* Fall through.... */
4844 case RTX_BIN_ARITH:
4845 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
4847 case RTX_TERNARY:
4848 case RTX_BITFIELD_OPS:
4849 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
4850 XEXP (x, 0), XEXP (x, 1),
4851 XEXP (x, 2));
4853 case RTX_COMPARE:
4854 case RTX_COMM_COMPARE:
4855 return simplify_relational_operation (code, mode,
4856 ((GET_MODE (XEXP (x, 0))
4857 != VOIDmode)
4858 ? GET_MODE (XEXP (x, 0))
4859 : GET_MODE (XEXP (x, 1))),
4860 XEXP (x, 0),
4861 XEXP (x, 1));
4863 case RTX_EXTRA:
4864 if (code == SUBREG)
4865 return simplify_subreg (mode, SUBREG_REG (x),
4866 GET_MODE (SUBREG_REG (x)),
4867 SUBREG_BYTE (x));
4868 break;
4870 case RTX_OBJ:
4871 if (code == LO_SUM)
4873 /* Convert (lo_sum (high FOO) FOO) to FOO. */
4874 if (GET_CODE (XEXP (x, 0)) == HIGH
4875 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
4876 return XEXP (x, 1);
4878 break;
4880 default:
4881 break;
4883 return NULL;