* simplify-rtx.c (simplify_relational_operation_1): Implement some
[official-gcc.git] / gcc / simplify-rtx.c
blobf04f0521e35a3b65bd1ba39d82a684c086d59ff5
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
21 02110-1301, USA. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "tm_p.h"
31 #include "regs.h"
32 #include "hard-reg-set.h"
33 #include "flags.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "recog.h"
37 #include "function.h"
38 #include "expr.h"
39 #include "toplev.h"
40 #include "output.h"
41 #include "ggc.h"
42 #include "target.h"
44 /* Simplification and canonicalization of RTL. */
46 /* Much code operates on (low, high) pairs; the low value is an
47 unsigned wide int, the high value a signed wide int. We
48 occasionally need to sign extend from low to high as if low were a
49 signed wide int. */
50 #define HWI_SIGN_EXTEND(low) \
51 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
53 static rtx neg_const_int (enum machine_mode, rtx);
54 static bool plus_minus_operand_p (rtx);
55 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
56 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
57 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
58 unsigned int);
59 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
60 rtx, rtx);
61 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
62 enum machine_mode, rtx, rtx);
63 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
64 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
65 rtx, rtx, rtx, rtx);
67 /* Negate a CONST_INT rtx, truncating (because a conversion from a
68 maximally negative number can overflow). */
69 static rtx
70 neg_const_int (enum machine_mode mode, rtx i)
72 return gen_int_mode (- INTVAL (i), mode);
75 /* Test whether expression, X, is an immediate constant that represents
76 the most significant bit of machine mode MODE. */
78 bool
79 mode_signbit_p (enum machine_mode mode, rtx x)
81 unsigned HOST_WIDE_INT val;
82 unsigned int width;
84 if (GET_MODE_CLASS (mode) != MODE_INT)
85 return false;
87 width = GET_MODE_BITSIZE (mode);
88 if (width == 0)
89 return false;
91 if (width <= HOST_BITS_PER_WIDE_INT
92 && GET_CODE (x) == CONST_INT)
93 val = INTVAL (x);
94 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
95 && GET_CODE (x) == CONST_DOUBLE
96 && CONST_DOUBLE_LOW (x) == 0)
98 val = CONST_DOUBLE_HIGH (x);
99 width -= HOST_BITS_PER_WIDE_INT;
101 else
102 return false;
104 if (width < HOST_BITS_PER_WIDE_INT)
105 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
106 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
109 /* Make a binary operation by properly ordering the operands and
110 seeing if the expression folds. */
113 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
114 rtx op1)
116 rtx tem;
118 /* If this simplifies, do it. */
119 tem = simplify_binary_operation (code, mode, op0, op1);
120 if (tem)
121 return tem;
123 /* Put complex operands first and constants second if commutative. */
124 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
125 && swap_commutative_operands_p (op0, op1))
126 tem = op0, op0 = op1, op1 = tem;
128 return gen_rtx_fmt_ee (code, mode, op0, op1);
131 /* If X is a MEM referencing the constant pool, return the real value.
132 Otherwise return X. */
134 avoid_constant_pool_reference (rtx x)
136 rtx c, tmp, addr;
137 enum machine_mode cmode;
138 HOST_WIDE_INT offset = 0;
140 switch (GET_CODE (x))
142 case MEM:
143 break;
145 case FLOAT_EXTEND:
146 /* Handle float extensions of constant pool references. */
147 tmp = XEXP (x, 0);
148 c = avoid_constant_pool_reference (tmp);
149 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
151 REAL_VALUE_TYPE d;
153 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
154 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
156 return x;
158 default:
159 return x;
162 if (GET_MODE (x) == BLKmode)
163 return x;
165 addr = XEXP (x, 0);
167 /* Call target hook to avoid the effects of -fpic etc.... */
168 addr = targetm.delegitimize_address (addr);
170 /* Split the address into a base and integer offset. */
171 if (GET_CODE (addr) == CONST
172 && GET_CODE (XEXP (addr, 0)) == PLUS
173 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
175 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
176 addr = XEXP (XEXP (addr, 0), 0);
179 if (GET_CODE (addr) == LO_SUM)
180 addr = XEXP (addr, 1);
182 /* If this is a constant pool reference, we can turn it into its
183 constant and hope that simplifications happen. */
184 if (GET_CODE (addr) == SYMBOL_REF
185 && CONSTANT_POOL_ADDRESS_P (addr))
187 c = get_pool_constant (addr);
188 cmode = get_pool_mode (addr);
190 /* If we're accessing the constant in a different mode than it was
191 originally stored, attempt to fix that up via subreg simplifications.
192 If that fails we have no choice but to return the original memory. */
193 if (offset != 0 || cmode != GET_MODE (x))
195 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
196 if (tem && CONSTANT_P (tem))
197 return tem;
199 else
200 return c;
203 return x;
206 /* Return true if X is a MEM referencing the constant pool. */
208 bool
209 constant_pool_reference_p (rtx x)
211 return avoid_constant_pool_reference (x) != x;
214 /* Make a unary operation by first seeing if it folds and otherwise making
215 the specified operation. */
218 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
219 enum machine_mode op_mode)
221 rtx tem;
223 /* If this simplifies, use it. */
224 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
225 return tem;
227 return gen_rtx_fmt_e (code, mode, op);
230 /* Likewise for ternary operations. */
233 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
234 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
236 rtx tem;
238 /* If this simplifies, use it. */
239 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
240 op0, op1, op2)))
241 return tem;
243 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
246 /* Likewise, for relational operations.
247 CMP_MODE specifies mode comparison is done in. */
250 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
251 enum machine_mode cmp_mode, rtx op0, rtx op1)
253 rtx tem;
255 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
256 op0, op1)))
257 return tem;
259 return gen_rtx_fmt_ee (code, mode, op0, op1);
262 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
263 resulting RTX. Return a new RTX which is as simplified as possible. */
266 simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
268 enum rtx_code code = GET_CODE (x);
269 enum machine_mode mode = GET_MODE (x);
270 enum machine_mode op_mode;
271 rtx op0, op1, op2;
273 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
274 to build a new expression substituting recursively. If we can't do
275 anything, return our input. */
277 if (x == old_rtx)
278 return new_rtx;
280 switch (GET_RTX_CLASS (code))
282 case RTX_UNARY:
283 op0 = XEXP (x, 0);
284 op_mode = GET_MODE (op0);
285 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
286 if (op0 == XEXP (x, 0))
287 return x;
288 return simplify_gen_unary (code, mode, op0, op_mode);
290 case RTX_BIN_ARITH:
291 case RTX_COMM_ARITH:
292 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
293 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
294 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
295 return x;
296 return simplify_gen_binary (code, mode, op0, op1);
298 case RTX_COMPARE:
299 case RTX_COMM_COMPARE:
300 op0 = XEXP (x, 0);
301 op1 = XEXP (x, 1);
302 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
303 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
304 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
305 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
306 return x;
307 return simplify_gen_relational (code, mode, op_mode, op0, op1);
309 case RTX_TERNARY:
310 case RTX_BITFIELD_OPS:
311 op0 = XEXP (x, 0);
312 op_mode = GET_MODE (op0);
313 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
314 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
315 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
316 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
317 return x;
318 if (op_mode == VOIDmode)
319 op_mode = GET_MODE (op0);
320 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
322 case RTX_EXTRA:
323 /* The only case we try to handle is a SUBREG. */
324 if (code == SUBREG)
326 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
327 if (op0 == SUBREG_REG (x))
328 return x;
329 op0 = simplify_gen_subreg (GET_MODE (x), op0,
330 GET_MODE (SUBREG_REG (x)),
331 SUBREG_BYTE (x));
332 return op0 ? op0 : x;
334 break;
336 case RTX_OBJ:
337 if (code == MEM)
339 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
340 if (op0 == XEXP (x, 0))
341 return x;
342 return replace_equiv_address_nv (x, op0);
344 else if (code == LO_SUM)
346 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
347 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
349 /* (lo_sum (high x) x) -> x */
350 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
351 return op1;
353 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
354 return x;
355 return gen_rtx_LO_SUM (mode, op0, op1);
357 else if (code == REG)
359 if (rtx_equal_p (x, old_rtx))
360 return new_rtx;
362 break;
364 default:
365 break;
367 return x;
370 /* Try to simplify a unary operation CODE whose output mode is to be
371 MODE with input operand OP whose mode was originally OP_MODE.
372 Return zero if no simplification can be made. */
374 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
375 rtx op, enum machine_mode op_mode)
377 rtx trueop, tem;
379 if (GET_CODE (op) == CONST)
380 op = XEXP (op, 0);
382 trueop = avoid_constant_pool_reference (op);
384 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
385 if (tem)
386 return tem;
388 return simplify_unary_operation_1 (code, mode, op);
391 /* Perform some simplifications we can do even if the operands
392 aren't constant. */
393 static rtx
394 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
396 enum rtx_code reversed;
397 rtx temp;
399 switch (code)
401 case NOT:
402 /* (not (not X)) == X. */
403 if (GET_CODE (op) == NOT)
404 return XEXP (op, 0);
406 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
407 comparison is all ones. */
408 if (COMPARISON_P (op)
409 && (mode == BImode || STORE_FLAG_VALUE == -1)
410 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
411 return simplify_gen_relational (reversed, mode, VOIDmode,
412 XEXP (op, 0), XEXP (op, 1));
414 /* (not (plus X -1)) can become (neg X). */
415 if (GET_CODE (op) == PLUS
416 && XEXP (op, 1) == constm1_rtx)
417 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
419 /* Similarly, (not (neg X)) is (plus X -1). */
420 if (GET_CODE (op) == NEG)
421 return plus_constant (XEXP (op, 0), -1);
423 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
424 if (GET_CODE (op) == XOR
425 && GET_CODE (XEXP (op, 1)) == CONST_INT
426 && (temp = simplify_unary_operation (NOT, mode,
427 XEXP (op, 1), mode)) != 0)
428 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
430 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
431 if (GET_CODE (op) == PLUS
432 && GET_CODE (XEXP (op, 1)) == CONST_INT
433 && mode_signbit_p (mode, XEXP (op, 1))
434 && (temp = simplify_unary_operation (NOT, mode,
435 XEXP (op, 1), mode)) != 0)
436 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
439 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
440 operands other than 1, but that is not valid. We could do a
441 similar simplification for (not (lshiftrt C X)) where C is
442 just the sign bit, but this doesn't seem common enough to
443 bother with. */
444 if (GET_CODE (op) == ASHIFT
445 && XEXP (op, 0) == const1_rtx)
447 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
448 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
451 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
452 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
453 so we can perform the above simplification. */
455 if (STORE_FLAG_VALUE == -1
456 && GET_CODE (op) == ASHIFTRT
457 && GET_CODE (XEXP (op, 1)) == CONST_INT
458 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
459 return simplify_gen_relational (GE, mode, VOIDmode,
460 XEXP (op, 0), const0_rtx);
463 if (GET_CODE (op) == SUBREG
464 && subreg_lowpart_p (op)
465 && (GET_MODE_SIZE (GET_MODE (op))
466 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
467 && GET_CODE (SUBREG_REG (op)) == ASHIFT
468 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
470 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
471 rtx x;
473 x = gen_rtx_ROTATE (inner_mode,
474 simplify_gen_unary (NOT, inner_mode, const1_rtx,
475 inner_mode),
476 XEXP (SUBREG_REG (op), 1));
477 return rtl_hooks.gen_lowpart_no_emit (mode, x);
480 /* Apply De Morgan's laws to reduce number of patterns for machines
481 with negating logical insns (and-not, nand, etc.). If result has
482 only one NOT, put it first, since that is how the patterns are
483 coded. */
485 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
487 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
488 enum machine_mode op_mode;
490 op_mode = GET_MODE (in1);
491 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
493 op_mode = GET_MODE (in2);
494 if (op_mode == VOIDmode)
495 op_mode = mode;
496 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
498 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
500 rtx tem = in2;
501 in2 = in1; in1 = tem;
504 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
505 mode, in1, in2);
507 break;
509 case NEG:
510 /* (neg (neg X)) == X. */
511 if (GET_CODE (op) == NEG)
512 return XEXP (op, 0);
514 /* (neg (plus X 1)) can become (not X). */
515 if (GET_CODE (op) == PLUS
516 && XEXP (op, 1) == const1_rtx)
517 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
519 /* Similarly, (neg (not X)) is (plus X 1). */
520 if (GET_CODE (op) == NOT)
521 return plus_constant (XEXP (op, 0), 1);
523 /* (neg (minus X Y)) can become (minus Y X). This transformation
524 isn't safe for modes with signed zeros, since if X and Y are
525 both +0, (minus Y X) is the same as (minus X Y). If the
526 rounding mode is towards +infinity (or -infinity) then the two
527 expressions will be rounded differently. */
528 if (GET_CODE (op) == MINUS
529 && !HONOR_SIGNED_ZEROS (mode)
530 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
531 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
533 if (GET_CODE (op) == PLUS
534 && !HONOR_SIGNED_ZEROS (mode)
535 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
537 /* (neg (plus A C)) is simplified to (minus -C A). */
538 if (GET_CODE (XEXP (op, 1)) == CONST_INT
539 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
541 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
542 if (temp)
543 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
546 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
547 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
548 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
551 /* (neg (mult A B)) becomes (mult (neg A) B).
552 This works even for floating-point values. */
553 if (GET_CODE (op) == MULT
554 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
556 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
557 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
560 /* NEG commutes with ASHIFT since it is multiplication. Only do
561 this if we can then eliminate the NEG (e.g., if the operand
562 is a constant). */
563 if (GET_CODE (op) == ASHIFT)
565 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
566 if (temp)
567 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
570 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
571 C is equal to the width of MODE minus 1. */
572 if (GET_CODE (op) == ASHIFTRT
573 && GET_CODE (XEXP (op, 1)) == CONST_INT
574 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
575 return simplify_gen_binary (LSHIFTRT, mode,
576 XEXP (op, 0), XEXP (op, 1));
578 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
579 C is equal to the width of MODE minus 1. */
580 if (GET_CODE (op) == LSHIFTRT
581 && GET_CODE (XEXP (op, 1)) == CONST_INT
582 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
583 return simplify_gen_binary (ASHIFTRT, mode,
584 XEXP (op, 0), XEXP (op, 1));
586 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
587 if (GET_CODE (op) == XOR
588 && XEXP (op, 1) == const1_rtx
589 && nonzero_bits (XEXP (op, 0), mode) == 1)
590 return plus_constant (XEXP (op, 0), -1);
592 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
593 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
594 if (GET_CODE (op) == LT
595 && XEXP (op, 1) == const0_rtx)
597 enum machine_mode inner = GET_MODE (XEXP (op, 0));
598 int isize = GET_MODE_BITSIZE (inner);
599 if (STORE_FLAG_VALUE == 1)
601 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
602 GEN_INT (isize - 1));
603 if (mode == inner)
604 return temp;
605 if (GET_MODE_BITSIZE (mode) > isize)
606 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
607 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
609 else if (STORE_FLAG_VALUE == -1)
611 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
612 GEN_INT (isize - 1));
613 if (mode == inner)
614 return temp;
615 if (GET_MODE_BITSIZE (mode) > isize)
616 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
617 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
620 break;
622 case TRUNCATE:
623 /* We can't handle truncation to a partial integer mode here
624 because we don't know the real bitsize of the partial
625 integer mode. */
626 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
627 break;
629 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
630 if ((GET_CODE (op) == SIGN_EXTEND
631 || GET_CODE (op) == ZERO_EXTEND)
632 && GET_MODE (XEXP (op, 0)) == mode)
633 return XEXP (op, 0);
635 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
636 (OP:SI foo:SI) if OP is NEG or ABS. */
637 if ((GET_CODE (op) == ABS
638 || GET_CODE (op) == NEG)
639 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
640 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
641 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
642 return simplify_gen_unary (GET_CODE (op), mode,
643 XEXP (XEXP (op, 0), 0), mode);
645 /* (truncate:A (subreg:B (truncate:C X) 0)) is
646 (truncate:A X). */
647 if (GET_CODE (op) == SUBREG
648 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
649 && subreg_lowpart_p (op))
650 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
651 GET_MODE (XEXP (SUBREG_REG (op), 0)));
653 /* If we know that the value is already truncated, we can
654 replace the TRUNCATE with a SUBREG. Note that this is also
655 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
656 modes we just have to apply a different definition for
657 truncation. But don't do this for an (LSHIFTRT (MULT ...))
658 since this will cause problems with the umulXi3_highpart
659 patterns. */
660 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
661 GET_MODE_BITSIZE (GET_MODE (op)))
662 ? (num_sign_bit_copies (op, GET_MODE (op))
663 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
664 - GET_MODE_BITSIZE (mode)))
665 : truncated_to_mode (mode, op))
666 && ! (GET_CODE (op) == LSHIFTRT
667 && GET_CODE (XEXP (op, 0)) == MULT))
668 return rtl_hooks.gen_lowpart_no_emit (mode, op);
670 /* A truncate of a comparison can be replaced with a subreg if
671 STORE_FLAG_VALUE permits. This is like the previous test,
672 but it works even if the comparison is done in a mode larger
673 than HOST_BITS_PER_WIDE_INT. */
674 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
675 && COMPARISON_P (op)
676 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
677 return rtl_hooks.gen_lowpart_no_emit (mode, op);
678 break;
680 case FLOAT_TRUNCATE:
681 if (DECIMAL_FLOAT_MODE_P (mode))
682 break;
684 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
685 if (GET_CODE (op) == FLOAT_EXTEND
686 && GET_MODE (XEXP (op, 0)) == mode)
687 return XEXP (op, 0);
689 /* (float_truncate:SF (float_truncate:DF foo:XF))
690 = (float_truncate:SF foo:XF).
691 This may eliminate double rounding, so it is unsafe.
693 (float_truncate:SF (float_extend:XF foo:DF))
694 = (float_truncate:SF foo:DF).
696 (float_truncate:DF (float_extend:XF foo:SF))
697 = (float_extend:SF foo:DF). */
698 if ((GET_CODE (op) == FLOAT_TRUNCATE
699 && flag_unsafe_math_optimizations)
700 || GET_CODE (op) == FLOAT_EXTEND)
701 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
702 0)))
703 > GET_MODE_SIZE (mode)
704 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
705 mode,
706 XEXP (op, 0), mode);
708 /* (float_truncate (float x)) is (float x) */
709 if (GET_CODE (op) == FLOAT
710 && (flag_unsafe_math_optimizations
711 || ((unsigned)significand_size (GET_MODE (op))
712 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
713 - num_sign_bit_copies (XEXP (op, 0),
714 GET_MODE (XEXP (op, 0)))))))
715 return simplify_gen_unary (FLOAT, mode,
716 XEXP (op, 0),
717 GET_MODE (XEXP (op, 0)));
719 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
720 (OP:SF foo:SF) if OP is NEG or ABS. */
721 if ((GET_CODE (op) == ABS
722 || GET_CODE (op) == NEG)
723 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
724 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
725 return simplify_gen_unary (GET_CODE (op), mode,
726 XEXP (XEXP (op, 0), 0), mode);
728 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
729 is (float_truncate:SF x). */
730 if (GET_CODE (op) == SUBREG
731 && subreg_lowpart_p (op)
732 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
733 return SUBREG_REG (op);
734 break;
736 case FLOAT_EXTEND:
737 if (DECIMAL_FLOAT_MODE_P (mode))
738 break;
740 /* (float_extend (float_extend x)) is (float_extend x)
742 (float_extend (float x)) is (float x) assuming that double
743 rounding can't happen.
745 if (GET_CODE (op) == FLOAT_EXTEND
746 || (GET_CODE (op) == FLOAT
747 && ((unsigned)significand_size (GET_MODE (op))
748 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
749 - num_sign_bit_copies (XEXP (op, 0),
750 GET_MODE (XEXP (op, 0)))))))
751 return simplify_gen_unary (GET_CODE (op), mode,
752 XEXP (op, 0),
753 GET_MODE (XEXP (op, 0)));
755 break;
757 case ABS:
758 /* (abs (neg <foo>)) -> (abs <foo>) */
759 if (GET_CODE (op) == NEG)
760 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
761 GET_MODE (XEXP (op, 0)));
763 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
764 do nothing. */
765 if (GET_MODE (op) == VOIDmode)
766 break;
768 /* If operand is something known to be positive, ignore the ABS. */
769 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
770 || ((GET_MODE_BITSIZE (GET_MODE (op))
771 <= HOST_BITS_PER_WIDE_INT)
772 && ((nonzero_bits (op, GET_MODE (op))
773 & ((HOST_WIDE_INT) 1
774 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
775 == 0)))
776 return op;
778 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
779 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
780 return gen_rtx_NEG (mode, op);
782 break;
784 case FFS:
785 /* (ffs (*_extend <X>)) = (ffs <X>) */
786 if (GET_CODE (op) == SIGN_EXTEND
787 || GET_CODE (op) == ZERO_EXTEND)
788 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
789 GET_MODE (XEXP (op, 0)));
790 break;
792 case POPCOUNT:
793 case PARITY:
794 /* (pop* (zero_extend <X>)) = (pop* <X>) */
795 if (GET_CODE (op) == ZERO_EXTEND)
796 return simplify_gen_unary (code, mode, XEXP (op, 0),
797 GET_MODE (XEXP (op, 0)));
798 break;
800 case FLOAT:
801 /* (float (sign_extend <X>)) = (float <X>). */
802 if (GET_CODE (op) == SIGN_EXTEND)
803 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
804 GET_MODE (XEXP (op, 0)));
805 break;
807 case SIGN_EXTEND:
808 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
809 becomes just the MINUS if its mode is MODE. This allows
810 folding switch statements on machines using casesi (such as
811 the VAX). */
812 if (GET_CODE (op) == TRUNCATE
813 && GET_MODE (XEXP (op, 0)) == mode
814 && GET_CODE (XEXP (op, 0)) == MINUS
815 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
816 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
817 return XEXP (op, 0);
819 /* Check for a sign extension of a subreg of a promoted
820 variable, where the promotion is sign-extended, and the
821 target mode is the same as the variable's promotion. */
822 if (GET_CODE (op) == SUBREG
823 && SUBREG_PROMOTED_VAR_P (op)
824 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
825 && GET_MODE (XEXP (op, 0)) == mode)
826 return XEXP (op, 0);
828 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
829 if (! POINTERS_EXTEND_UNSIGNED
830 && mode == Pmode && GET_MODE (op) == ptr_mode
831 && (CONSTANT_P (op)
832 || (GET_CODE (op) == SUBREG
833 && REG_P (SUBREG_REG (op))
834 && REG_POINTER (SUBREG_REG (op))
835 && GET_MODE (SUBREG_REG (op)) == Pmode)))
836 return convert_memory_address (Pmode, op);
837 #endif
838 break;
840 case ZERO_EXTEND:
841 /* Check for a zero extension of a subreg of a promoted
842 variable, where the promotion is zero-extended, and the
843 target mode is the same as the variable's promotion. */
844 if (GET_CODE (op) == SUBREG
845 && SUBREG_PROMOTED_VAR_P (op)
846 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
847 && GET_MODE (XEXP (op, 0)) == mode)
848 return XEXP (op, 0);
850 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
851 if (POINTERS_EXTEND_UNSIGNED > 0
852 && mode == Pmode && GET_MODE (op) == ptr_mode
853 && (CONSTANT_P (op)
854 || (GET_CODE (op) == SUBREG
855 && REG_P (SUBREG_REG (op))
856 && REG_POINTER (SUBREG_REG (op))
857 && GET_MODE (SUBREG_REG (op)) == Pmode)))
858 return convert_memory_address (Pmode, op);
859 #endif
860 break;
862 default:
863 break;
866 return 0;
869 /* Try to compute the value of a unary operation CODE whose output mode is to
870 be MODE with input operand OP whose mode was originally OP_MODE.
871 Return zero if the value cannot be computed. */
873 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
874 rtx op, enum machine_mode op_mode)
876 unsigned int width = GET_MODE_BITSIZE (mode);
878 if (code == VEC_DUPLICATE)
880 gcc_assert (VECTOR_MODE_P (mode));
881 if (GET_MODE (op) != VOIDmode)
883 if (!VECTOR_MODE_P (GET_MODE (op)))
884 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
885 else
886 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
887 (GET_MODE (op)));
889 if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
890 || GET_CODE (op) == CONST_VECTOR)
892 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
893 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
894 rtvec v = rtvec_alloc (n_elts);
895 unsigned int i;
897 if (GET_CODE (op) != CONST_VECTOR)
898 for (i = 0; i < n_elts; i++)
899 RTVEC_ELT (v, i) = op;
900 else
902 enum machine_mode inmode = GET_MODE (op);
903 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
904 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
906 gcc_assert (in_n_elts < n_elts);
907 gcc_assert ((n_elts % in_n_elts) == 0);
908 for (i = 0; i < n_elts; i++)
909 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
911 return gen_rtx_CONST_VECTOR (mode, v);
915 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
917 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
918 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
919 enum machine_mode opmode = GET_MODE (op);
920 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
921 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
922 rtvec v = rtvec_alloc (n_elts);
923 unsigned int i;
925 gcc_assert (op_n_elts == n_elts);
926 for (i = 0; i < n_elts; i++)
928 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
929 CONST_VECTOR_ELT (op, i),
930 GET_MODE_INNER (opmode));
931 if (!x)
932 return 0;
933 RTVEC_ELT (v, i) = x;
935 return gen_rtx_CONST_VECTOR (mode, v);
938 /* The order of these tests is critical so that, for example, we don't
939 check the wrong mode (input vs. output) for a conversion operation,
940 such as FIX. At some point, this should be simplified. */
942 if (code == FLOAT && GET_MODE (op) == VOIDmode
943 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
945 HOST_WIDE_INT hv, lv;
946 REAL_VALUE_TYPE d;
948 if (GET_CODE (op) == CONST_INT)
949 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
950 else
951 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
953 REAL_VALUE_FROM_INT (d, lv, hv, mode);
954 d = real_value_truncate (mode, d);
955 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
957 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
958 && (GET_CODE (op) == CONST_DOUBLE
959 || GET_CODE (op) == CONST_INT))
961 HOST_WIDE_INT hv, lv;
962 REAL_VALUE_TYPE d;
964 if (GET_CODE (op) == CONST_INT)
965 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
966 else
967 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
969 if (op_mode == VOIDmode)
971 /* We don't know how to interpret negative-looking numbers in
972 this case, so don't try to fold those. */
973 if (hv < 0)
974 return 0;
976 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
978 else
979 hv = 0, lv &= GET_MODE_MASK (op_mode);
981 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
982 d = real_value_truncate (mode, d);
983 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
986 if (GET_CODE (op) == CONST_INT
987 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
989 HOST_WIDE_INT arg0 = INTVAL (op);
990 HOST_WIDE_INT val;
992 switch (code)
994 case NOT:
995 val = ~ arg0;
996 break;
998 case NEG:
999 val = - arg0;
1000 break;
1002 case ABS:
1003 val = (arg0 >= 0 ? arg0 : - arg0);
1004 break;
1006 case FFS:
1007 /* Don't use ffs here. Instead, get low order bit and then its
1008 number. If arg0 is zero, this will return 0, as desired. */
1009 arg0 &= GET_MODE_MASK (mode);
1010 val = exact_log2 (arg0 & (- arg0)) + 1;
1011 break;
1013 case CLZ:
1014 arg0 &= GET_MODE_MASK (mode);
1015 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1017 else
1018 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1019 break;
1021 case CTZ:
1022 arg0 &= GET_MODE_MASK (mode);
1023 if (arg0 == 0)
1025 /* Even if the value at zero is undefined, we have to come
1026 up with some replacement. Seems good enough. */
1027 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1028 val = GET_MODE_BITSIZE (mode);
1030 else
1031 val = exact_log2 (arg0 & -arg0);
1032 break;
1034 case POPCOUNT:
1035 arg0 &= GET_MODE_MASK (mode);
1036 val = 0;
1037 while (arg0)
1038 val++, arg0 &= arg0 - 1;
1039 break;
1041 case PARITY:
1042 arg0 &= GET_MODE_MASK (mode);
1043 val = 0;
1044 while (arg0)
1045 val++, arg0 &= arg0 - 1;
1046 val &= 1;
1047 break;
1049 case BSWAP:
1050 return 0;
1052 case TRUNCATE:
1053 val = arg0;
1054 break;
1056 case ZERO_EXTEND:
1057 /* When zero-extending a CONST_INT, we need to know its
1058 original mode. */
1059 gcc_assert (op_mode != VOIDmode);
1060 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1062 /* If we were really extending the mode,
1063 we would have to distinguish between zero-extension
1064 and sign-extension. */
1065 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1066 val = arg0;
1068 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1069 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1070 else
1071 return 0;
1072 break;
1074 case SIGN_EXTEND:
1075 if (op_mode == VOIDmode)
1076 op_mode = mode;
1077 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1079 /* If we were really extending the mode,
1080 we would have to distinguish between zero-extension
1081 and sign-extension. */
1082 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1083 val = arg0;
1085 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1088 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1089 if (val
1090 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1091 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1093 else
1094 return 0;
1095 break;
1097 case SQRT:
1098 case FLOAT_EXTEND:
1099 case FLOAT_TRUNCATE:
1100 case SS_TRUNCATE:
1101 case US_TRUNCATE:
1102 case SS_NEG:
1103 return 0;
1105 default:
1106 gcc_unreachable ();
1109 return gen_int_mode (val, mode);
1112 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1113 for a DImode operation on a CONST_INT. */
1114 else if (GET_MODE (op) == VOIDmode
1115 && width <= HOST_BITS_PER_WIDE_INT * 2
1116 && (GET_CODE (op) == CONST_DOUBLE
1117 || GET_CODE (op) == CONST_INT))
1119 unsigned HOST_WIDE_INT l1, lv;
1120 HOST_WIDE_INT h1, hv;
1122 if (GET_CODE (op) == CONST_DOUBLE)
1123 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1124 else
1125 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1127 switch (code)
1129 case NOT:
1130 lv = ~ l1;
1131 hv = ~ h1;
1132 break;
1134 case NEG:
1135 neg_double (l1, h1, &lv, &hv);
1136 break;
1138 case ABS:
1139 if (h1 < 0)
1140 neg_double (l1, h1, &lv, &hv);
1141 else
1142 lv = l1, hv = h1;
1143 break;
1145 case FFS:
1146 hv = 0;
1147 if (l1 == 0)
1149 if (h1 == 0)
1150 lv = 0;
1151 else
1152 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1154 else
1155 lv = exact_log2 (l1 & -l1) + 1;
1156 break;
1158 case CLZ:
1159 hv = 0;
1160 if (h1 != 0)
1161 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1162 - HOST_BITS_PER_WIDE_INT;
1163 else if (l1 != 0)
1164 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1165 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1166 lv = GET_MODE_BITSIZE (mode);
1167 break;
1169 case CTZ:
1170 hv = 0;
1171 if (l1 != 0)
1172 lv = exact_log2 (l1 & -l1);
1173 else if (h1 != 0)
1174 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1175 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1176 lv = GET_MODE_BITSIZE (mode);
1177 break;
1179 case POPCOUNT:
1180 hv = 0;
1181 lv = 0;
1182 while (l1)
1183 lv++, l1 &= l1 - 1;
1184 while (h1)
1185 lv++, h1 &= h1 - 1;
1186 break;
1188 case PARITY:
1189 hv = 0;
1190 lv = 0;
1191 while (l1)
1192 lv++, l1 &= l1 - 1;
1193 while (h1)
1194 lv++, h1 &= h1 - 1;
1195 lv &= 1;
1196 break;
1198 case TRUNCATE:
1199 /* This is just a change-of-mode, so do nothing. */
1200 lv = l1, hv = h1;
1201 break;
1203 case ZERO_EXTEND:
1204 gcc_assert (op_mode != VOIDmode);
1206 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1207 return 0;
1209 hv = 0;
1210 lv = l1 & GET_MODE_MASK (op_mode);
1211 break;
1213 case SIGN_EXTEND:
1214 if (op_mode == VOIDmode
1215 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1216 return 0;
1217 else
1219 lv = l1 & GET_MODE_MASK (op_mode);
1220 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1221 && (lv & ((HOST_WIDE_INT) 1
1222 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1223 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1225 hv = HWI_SIGN_EXTEND (lv);
1227 break;
1229 case SQRT:
1230 return 0;
1232 default:
1233 return 0;
1236 return immed_double_const (lv, hv, mode);
1239 else if (GET_CODE (op) == CONST_DOUBLE
1240 && SCALAR_FLOAT_MODE_P (mode))
1242 REAL_VALUE_TYPE d, t;
1243 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1245 switch (code)
1247 case SQRT:
1248 if (HONOR_SNANS (mode) && real_isnan (&d))
1249 return 0;
1250 real_sqrt (&t, mode, &d);
1251 d = t;
1252 break;
1253 case ABS:
1254 d = REAL_VALUE_ABS (d);
1255 break;
1256 case NEG:
1257 d = REAL_VALUE_NEGATE (d);
1258 break;
1259 case FLOAT_TRUNCATE:
1260 d = real_value_truncate (mode, d);
1261 break;
1262 case FLOAT_EXTEND:
1263 /* All this does is change the mode. */
1264 break;
1265 case FIX:
1266 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1267 break;
1268 case NOT:
1270 long tmp[4];
1271 int i;
1273 real_to_target (tmp, &d, GET_MODE (op));
1274 for (i = 0; i < 4; i++)
1275 tmp[i] = ~tmp[i];
1276 real_from_target (&d, tmp, mode);
1277 break;
1279 default:
1280 gcc_unreachable ();
1282 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1285 else if (GET_CODE (op) == CONST_DOUBLE
1286 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1287 && GET_MODE_CLASS (mode) == MODE_INT
1288 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1290 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1291 operators are intentionally left unspecified (to ease implementation
1292 by target backends), for consistency, this routine implements the
1293 same semantics for constant folding as used by the middle-end. */
1295 /* This was formerly used only for non-IEEE float.
1296 eggert@twinsun.com says it is safe for IEEE also. */
1297 HOST_WIDE_INT xh, xl, th, tl;
1298 REAL_VALUE_TYPE x, t;
1299 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1300 switch (code)
1302 case FIX:
1303 if (REAL_VALUE_ISNAN (x))
1304 return const0_rtx;
1306 /* Test against the signed upper bound. */
1307 if (width > HOST_BITS_PER_WIDE_INT)
1309 th = ((unsigned HOST_WIDE_INT) 1
1310 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1311 tl = -1;
1313 else
1315 th = 0;
1316 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1318 real_from_integer (&t, VOIDmode, tl, th, 0);
1319 if (REAL_VALUES_LESS (t, x))
1321 xh = th;
1322 xl = tl;
1323 break;
1326 /* Test against the signed lower bound. */
1327 if (width > HOST_BITS_PER_WIDE_INT)
1329 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1330 tl = 0;
1332 else
1334 th = -1;
1335 tl = (HOST_WIDE_INT) -1 << (width - 1);
1337 real_from_integer (&t, VOIDmode, tl, th, 0);
1338 if (REAL_VALUES_LESS (x, t))
1340 xh = th;
1341 xl = tl;
1342 break;
1344 REAL_VALUE_TO_INT (&xl, &xh, x);
1345 break;
1347 case UNSIGNED_FIX:
1348 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1349 return const0_rtx;
1351 /* Test against the unsigned upper bound. */
1352 if (width == 2*HOST_BITS_PER_WIDE_INT)
1354 th = -1;
1355 tl = -1;
1357 else if (width >= HOST_BITS_PER_WIDE_INT)
1359 th = ((unsigned HOST_WIDE_INT) 1
1360 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1361 tl = -1;
1363 else
1365 th = 0;
1366 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1368 real_from_integer (&t, VOIDmode, tl, th, 1);
1369 if (REAL_VALUES_LESS (t, x))
1371 xh = th;
1372 xl = tl;
1373 break;
1376 REAL_VALUE_TO_INT (&xl, &xh, x);
1377 break;
1379 default:
1380 gcc_unreachable ();
1382 return immed_double_const (xl, xh, mode);
1385 return NULL_RTX;
1388 /* Subroutine of simplify_binary_operation to simplify a commutative,
1389 associative binary operation CODE with result mode MODE, operating
1390 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1391 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1392 canonicalization is possible. */
1394 static rtx
1395 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1396 rtx op0, rtx op1)
1398 rtx tem;
1400 /* Linearize the operator to the left. */
1401 if (GET_CODE (op1) == code)
1403 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1404 if (GET_CODE (op0) == code)
1406 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1407 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1410 /* "a op (b op c)" becomes "(b op c) op a". */
1411 if (! swap_commutative_operands_p (op1, op0))
1412 return simplify_gen_binary (code, mode, op1, op0);
1414 tem = op0;
1415 op0 = op1;
1416 op1 = tem;
1419 if (GET_CODE (op0) == code)
1421 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1422 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1424 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1425 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1428 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1429 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1430 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1431 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1432 if (tem != 0)
1433 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1435 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1436 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1437 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1438 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1439 if (tem != 0)
1440 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1443 return 0;
1447 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1448 and OP1. Return 0 if no simplification is possible.
1450 Don't use this for relational operations such as EQ or LT.
1451 Use simplify_relational_operation instead. */
1453 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1454 rtx op0, rtx op1)
1456 rtx trueop0, trueop1;
1457 rtx tem;
1459 /* Relational operations don't work here. We must know the mode
1460 of the operands in order to do the comparison correctly.
1461 Assuming a full word can give incorrect results.
1462 Consider comparing 128 with -128 in QImode. */
1463 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1464 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1466 /* Make sure the constant is second. */
1467 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1468 && swap_commutative_operands_p (op0, op1))
1470 tem = op0, op0 = op1, op1 = tem;
1473 trueop0 = avoid_constant_pool_reference (op0);
1474 trueop1 = avoid_constant_pool_reference (op1);
1476 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1477 if (tem)
1478 return tem;
1479 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1482 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1483 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1484 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1485 actual constants. */
1487 static rtx
1488 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1489 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1491 rtx tem, reversed, opleft, opright;
1492 HOST_WIDE_INT val;
1493 unsigned int width = GET_MODE_BITSIZE (mode);
1495 /* Even if we can't compute a constant result,
1496 there are some cases worth simplifying. */
1498 switch (code)
1500 case PLUS:
1501 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1502 when x is NaN, infinite, or finite and nonzero. They aren't
1503 when x is -0 and the rounding mode is not towards -infinity,
1504 since (-0) + 0 is then 0. */
1505 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1506 return op0;
1508 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1509 transformations are safe even for IEEE. */
1510 if (GET_CODE (op0) == NEG)
1511 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1512 else if (GET_CODE (op1) == NEG)
1513 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1515 /* (~a) + 1 -> -a */
1516 if (INTEGRAL_MODE_P (mode)
1517 && GET_CODE (op0) == NOT
1518 && trueop1 == const1_rtx)
1519 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1521 /* Handle both-operands-constant cases. We can only add
1522 CONST_INTs to constants since the sum of relocatable symbols
1523 can't be handled by most assemblers. Don't add CONST_INT
1524 to CONST_INT since overflow won't be computed properly if wider
1525 than HOST_BITS_PER_WIDE_INT. */
1527 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1528 && GET_CODE (op1) == CONST_INT)
1529 return plus_constant (op0, INTVAL (op1));
1530 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1531 && GET_CODE (op0) == CONST_INT)
1532 return plus_constant (op1, INTVAL (op0));
1534 /* See if this is something like X * C - X or vice versa or
1535 if the multiplication is written as a shift. If so, we can
1536 distribute and make a new multiply, shift, or maybe just
1537 have X (if C is 2 in the example above). But don't make
1538 something more expensive than we had before. */
1540 if (SCALAR_INT_MODE_P (mode))
1542 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1543 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1544 rtx lhs = op0, rhs = op1;
1546 if (GET_CODE (lhs) == NEG)
1548 coeff0l = -1;
1549 coeff0h = -1;
1550 lhs = XEXP (lhs, 0);
1552 else if (GET_CODE (lhs) == MULT
1553 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1555 coeff0l = INTVAL (XEXP (lhs, 1));
1556 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1557 lhs = XEXP (lhs, 0);
1559 else if (GET_CODE (lhs) == ASHIFT
1560 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1561 && INTVAL (XEXP (lhs, 1)) >= 0
1562 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1564 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1565 coeff0h = 0;
1566 lhs = XEXP (lhs, 0);
1569 if (GET_CODE (rhs) == NEG)
1571 coeff1l = -1;
1572 coeff1h = -1;
1573 rhs = XEXP (rhs, 0);
1575 else if (GET_CODE (rhs) == MULT
1576 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1578 coeff1l = INTVAL (XEXP (rhs, 1));
1579 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1580 rhs = XEXP (rhs, 0);
1582 else if (GET_CODE (rhs) == ASHIFT
1583 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1584 && INTVAL (XEXP (rhs, 1)) >= 0
1585 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1587 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1588 coeff1h = 0;
1589 rhs = XEXP (rhs, 0);
1592 if (rtx_equal_p (lhs, rhs))
1594 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1595 rtx coeff;
1596 unsigned HOST_WIDE_INT l;
1597 HOST_WIDE_INT h;
1599 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1600 coeff = immed_double_const (l, h, mode);
1602 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1603 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1604 ? tem : 0;
1608 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1609 if ((GET_CODE (op1) == CONST_INT
1610 || GET_CODE (op1) == CONST_DOUBLE)
1611 && GET_CODE (op0) == XOR
1612 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1613 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1614 && mode_signbit_p (mode, op1))
1615 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1616 simplify_gen_binary (XOR, mode, op1,
1617 XEXP (op0, 1)));
1619 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1620 if (GET_CODE (op0) == MULT
1621 && GET_CODE (XEXP (op0, 0)) == NEG)
1623 rtx in1, in2;
1625 in1 = XEXP (XEXP (op0, 0), 0);
1626 in2 = XEXP (op0, 1);
1627 return simplify_gen_binary (MINUS, mode, op1,
1628 simplify_gen_binary (MULT, mode,
1629 in1, in2));
1632 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1633 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1634 is 1. */
1635 if (COMPARISON_P (op0)
1636 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1637 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1638 && (reversed = reversed_comparison (op0, mode)))
1639 return
1640 simplify_gen_unary (NEG, mode, reversed, mode);
1642 /* If one of the operands is a PLUS or a MINUS, see if we can
1643 simplify this by the associative law.
1644 Don't use the associative law for floating point.
1645 The inaccuracy makes it nonassociative,
1646 and subtle programs can break if operations are associated. */
1648 if (INTEGRAL_MODE_P (mode)
1649 && (plus_minus_operand_p (op0)
1650 || plus_minus_operand_p (op1))
1651 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1652 return tem;
1654 /* Reassociate floating point addition only when the user
1655 specifies unsafe math optimizations. */
1656 if (FLOAT_MODE_P (mode)
1657 && flag_unsafe_math_optimizations)
1659 tem = simplify_associative_operation (code, mode, op0, op1);
1660 if (tem)
1661 return tem;
1663 break;
1665 case COMPARE:
1666 #ifdef HAVE_cc0
1667 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1668 using cc0, in which case we want to leave it as a COMPARE
1669 so we can distinguish it from a register-register-copy.
1671 In IEEE floating point, x-0 is not the same as x. */
1673 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1674 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1675 && trueop1 == CONST0_RTX (mode))
1676 return op0;
1677 #endif
1679 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1680 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1681 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1682 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1684 rtx xop00 = XEXP (op0, 0);
1685 rtx xop10 = XEXP (op1, 0);
1687 #ifdef HAVE_cc0
1688 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1689 #else
1690 if (REG_P (xop00) && REG_P (xop10)
1691 && GET_MODE (xop00) == GET_MODE (xop10)
1692 && REGNO (xop00) == REGNO (xop10)
1693 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1694 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1695 #endif
1696 return xop00;
1698 break;
1700 case MINUS:
1701 /* We can't assume x-x is 0 even with non-IEEE floating point,
1702 but since it is zero except in very strange circumstances, we
1703 will treat it as zero with -funsafe-math-optimizations. */
1704 if (rtx_equal_p (trueop0, trueop1)
1705 && ! side_effects_p (op0)
1706 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1707 return CONST0_RTX (mode);
1709 /* Change subtraction from zero into negation. (0 - x) is the
1710 same as -x when x is NaN, infinite, or finite and nonzero.
1711 But if the mode has signed zeros, and does not round towards
1712 -infinity, then 0 - 0 is 0, not -0. */
1713 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1714 return simplify_gen_unary (NEG, mode, op1, mode);
1716 /* (-1 - a) is ~a. */
1717 if (trueop0 == constm1_rtx)
1718 return simplify_gen_unary (NOT, mode, op1, mode);
1720 /* Subtracting 0 has no effect unless the mode has signed zeros
1721 and supports rounding towards -infinity. In such a case,
1722 0 - 0 is -0. */
1723 if (!(HONOR_SIGNED_ZEROS (mode)
1724 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1725 && trueop1 == CONST0_RTX (mode))
1726 return op0;
1728 /* See if this is something like X * C - X or vice versa or
1729 if the multiplication is written as a shift. If so, we can
1730 distribute and make a new multiply, shift, or maybe just
1731 have X (if C is 2 in the example above). But don't make
1732 something more expensive than we had before. */
1734 if (SCALAR_INT_MODE_P (mode))
1736 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1737 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1738 rtx lhs = op0, rhs = op1;
1740 if (GET_CODE (lhs) == NEG)
1742 coeff0l = -1;
1743 coeff0h = -1;
1744 lhs = XEXP (lhs, 0);
1746 else if (GET_CODE (lhs) == MULT
1747 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1749 coeff0l = INTVAL (XEXP (lhs, 1));
1750 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1751 lhs = XEXP (lhs, 0);
1753 else if (GET_CODE (lhs) == ASHIFT
1754 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1755 && INTVAL (XEXP (lhs, 1)) >= 0
1756 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1758 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1759 coeff0h = 0;
1760 lhs = XEXP (lhs, 0);
1763 if (GET_CODE (rhs) == NEG)
1765 negcoeff1l = 1;
1766 negcoeff1h = 0;
1767 rhs = XEXP (rhs, 0);
1769 else if (GET_CODE (rhs) == MULT
1770 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1772 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1773 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1774 rhs = XEXP (rhs, 0);
1776 else if (GET_CODE (rhs) == ASHIFT
1777 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1778 && INTVAL (XEXP (rhs, 1)) >= 0
1779 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1781 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1782 negcoeff1h = -1;
1783 rhs = XEXP (rhs, 0);
1786 if (rtx_equal_p (lhs, rhs))
1788 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1789 rtx coeff;
1790 unsigned HOST_WIDE_INT l;
1791 HOST_WIDE_INT h;
1793 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1794 coeff = immed_double_const (l, h, mode);
1796 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1797 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1798 ? tem : 0;
1802 /* (a - (-b)) -> (a + b). True even for IEEE. */
1803 if (GET_CODE (op1) == NEG)
1804 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1806 /* (-x - c) may be simplified as (-c - x). */
1807 if (GET_CODE (op0) == NEG
1808 && (GET_CODE (op1) == CONST_INT
1809 || GET_CODE (op1) == CONST_DOUBLE))
1811 tem = simplify_unary_operation (NEG, mode, op1, mode);
1812 if (tem)
1813 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1816 /* Don't let a relocatable value get a negative coeff. */
1817 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1818 return simplify_gen_binary (PLUS, mode,
1819 op0,
1820 neg_const_int (mode, op1));
1822 /* (x - (x & y)) -> (x & ~y) */
1823 if (GET_CODE (op1) == AND)
1825 if (rtx_equal_p (op0, XEXP (op1, 0)))
1827 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1828 GET_MODE (XEXP (op1, 1)));
1829 return simplify_gen_binary (AND, mode, op0, tem);
1831 if (rtx_equal_p (op0, XEXP (op1, 1)))
1833 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1834 GET_MODE (XEXP (op1, 0)));
1835 return simplify_gen_binary (AND, mode, op0, tem);
1839 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1840 by reversing the comparison code if valid. */
1841 if (STORE_FLAG_VALUE == 1
1842 && trueop0 == const1_rtx
1843 && COMPARISON_P (op1)
1844 && (reversed = reversed_comparison (op1, mode)))
1845 return reversed;
1847 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
1848 if (GET_CODE (op1) == MULT
1849 && GET_CODE (XEXP (op1, 0)) == NEG)
1851 rtx in1, in2;
1853 in1 = XEXP (XEXP (op1, 0), 0);
1854 in2 = XEXP (op1, 1);
1855 return simplify_gen_binary (PLUS, mode,
1856 simplify_gen_binary (MULT, mode,
1857 in1, in2),
1858 op0);
1861 /* Canonicalize (minus (neg A) (mult B C)) to
1862 (minus (mult (neg B) C) A). */
1863 if (GET_CODE (op1) == MULT
1864 && GET_CODE (op0) == NEG)
1866 rtx in1, in2;
1868 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
1869 in2 = XEXP (op1, 1);
1870 return simplify_gen_binary (MINUS, mode,
1871 simplify_gen_binary (MULT, mode,
1872 in1, in2),
1873 XEXP (op0, 0));
1876 /* If one of the operands is a PLUS or a MINUS, see if we can
1877 simplify this by the associative law. This will, for example,
1878 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1879 Don't use the associative law for floating point.
1880 The inaccuracy makes it nonassociative,
1881 and subtle programs can break if operations are associated. */
1883 if (INTEGRAL_MODE_P (mode)
1884 && (plus_minus_operand_p (op0)
1885 || plus_minus_operand_p (op1))
1886 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1887 return tem;
1888 break;
1890 case MULT:
1891 if (trueop1 == constm1_rtx)
1892 return simplify_gen_unary (NEG, mode, op0, mode);
1894 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1895 x is NaN, since x * 0 is then also NaN. Nor is it valid
1896 when the mode has signed zeros, since multiplying a negative
1897 number by 0 will give -0, not 0. */
1898 if (!HONOR_NANS (mode)
1899 && !HONOR_SIGNED_ZEROS (mode)
1900 && trueop1 == CONST0_RTX (mode)
1901 && ! side_effects_p (op0))
1902 return op1;
1904 /* In IEEE floating point, x*1 is not equivalent to x for
1905 signalling NaNs. */
1906 if (!HONOR_SNANS (mode)
1907 && trueop1 == CONST1_RTX (mode))
1908 return op0;
1910 /* Convert multiply by constant power of two into shift unless
1911 we are still generating RTL. This test is a kludge. */
1912 if (GET_CODE (trueop1) == CONST_INT
1913 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1914 /* If the mode is larger than the host word size, and the
1915 uppermost bit is set, then this isn't a power of two due
1916 to implicit sign extension. */
1917 && (width <= HOST_BITS_PER_WIDE_INT
1918 || val != HOST_BITS_PER_WIDE_INT - 1))
1919 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1921 /* Likewise for multipliers wider than a word. */
1922 if (GET_CODE (trueop1) == CONST_DOUBLE
1923 && (GET_MODE (trueop1) == VOIDmode
1924 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
1925 && GET_MODE (op0) == mode
1926 && CONST_DOUBLE_LOW (trueop1) == 0
1927 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
1928 return simplify_gen_binary (ASHIFT, mode, op0,
1929 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
1931 /* x*2 is x+x and x*(-1) is -x */
1932 if (GET_CODE (trueop1) == CONST_DOUBLE
1933 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
1934 && GET_MODE (op0) == mode)
1936 REAL_VALUE_TYPE d;
1937 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1939 if (REAL_VALUES_EQUAL (d, dconst2))
1940 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1942 if (!HONOR_SNANS (mode)
1943 && REAL_VALUES_EQUAL (d, dconstm1))
1944 return simplify_gen_unary (NEG, mode, op0, mode);
1947 /* Optimize -x * -x as x * x. */
1948 if (FLOAT_MODE_P (mode)
1949 && GET_CODE (op0) == NEG
1950 && GET_CODE (op1) == NEG
1951 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
1952 && !side_effects_p (XEXP (op0, 0)))
1953 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
1955 /* Likewise, optimize abs(x) * abs(x) as x * x. */
1956 if (SCALAR_FLOAT_MODE_P (mode)
1957 && GET_CODE (op0) == ABS
1958 && GET_CODE (op1) == ABS
1959 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
1960 && !side_effects_p (XEXP (op0, 0)))
1961 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
1963 /* Reassociate multiplication, but for floating point MULTs
1964 only when the user specifies unsafe math optimizations. */
1965 if (! FLOAT_MODE_P (mode)
1966 || flag_unsafe_math_optimizations)
1968 tem = simplify_associative_operation (code, mode, op0, op1);
1969 if (tem)
1970 return tem;
1972 break;
1974 case IOR:
1975 if (trueop1 == const0_rtx)
1976 return op0;
1977 if (GET_CODE (trueop1) == CONST_INT
1978 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1979 == GET_MODE_MASK (mode)))
1980 return op1;
1981 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1982 return op0;
1983 /* A | (~A) -> -1 */
1984 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1985 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1986 && ! side_effects_p (op0)
1987 && SCALAR_INT_MODE_P (mode))
1988 return constm1_rtx;
1990 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
1991 if (GET_CODE (op1) == CONST_INT
1992 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1993 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
1994 return op1;
1996 /* Convert (A & B) | A to A. */
1997 if (GET_CODE (op0) == AND
1998 && (rtx_equal_p (XEXP (op0, 0), op1)
1999 || rtx_equal_p (XEXP (op0, 1), op1))
2000 && ! side_effects_p (XEXP (op0, 0))
2001 && ! side_effects_p (XEXP (op0, 1)))
2002 return op1;
2004 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2005 mode size to (rotate A CX). */
2007 if (GET_CODE (op1) == ASHIFT
2008 || GET_CODE (op1) == SUBREG)
2010 opleft = op1;
2011 opright = op0;
2013 else
2015 opright = op1;
2016 opleft = op0;
2019 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2020 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2021 && GET_CODE (XEXP (opleft, 1)) == CONST_INT
2022 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2023 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2024 == GET_MODE_BITSIZE (mode)))
2025 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2027 /* Same, but for ashift that has been "simplified" to a wider mode
2028 by simplify_shift_const. */
2030 if (GET_CODE (opleft) == SUBREG
2031 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2032 && GET_CODE (opright) == LSHIFTRT
2033 && GET_CODE (XEXP (opright, 0)) == SUBREG
2034 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2035 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2036 && (GET_MODE_SIZE (GET_MODE (opleft))
2037 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2038 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2039 SUBREG_REG (XEXP (opright, 0)))
2040 && GET_CODE (XEXP (SUBREG_REG (opleft), 1)) == CONST_INT
2041 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2042 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2043 == GET_MODE_BITSIZE (mode)))
2044 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2045 XEXP (SUBREG_REG (opleft), 1));
2047 /* If we have (ior (and (X C1) C2)), simplify this by making
2048 C1 as small as possible if C1 actually changes. */
2049 if (GET_CODE (op1) == CONST_INT
2050 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2051 || INTVAL (op1) > 0)
2052 && GET_CODE (op0) == AND
2053 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2054 && GET_CODE (op1) == CONST_INT
2055 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2056 return simplify_gen_binary (IOR, mode,
2057 simplify_gen_binary
2058 (AND, mode, XEXP (op0, 0),
2059 GEN_INT (INTVAL (XEXP (op0, 1))
2060 & ~INTVAL (op1))),
2061 op1);
2063 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2064 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2065 the PLUS does not affect any of the bits in OP1: then we can do
2066 the IOR as a PLUS and we can associate. This is valid if OP1
2067 can be safely shifted left C bits. */
2068 if (GET_CODE (trueop1) == CONST_INT && GET_CODE (op0) == ASHIFTRT
2069 && GET_CODE (XEXP (op0, 0)) == PLUS
2070 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
2071 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2072 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2074 int count = INTVAL (XEXP (op0, 1));
2075 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2077 if (mask >> count == INTVAL (trueop1)
2078 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2079 return simplify_gen_binary (ASHIFTRT, mode,
2080 plus_constant (XEXP (op0, 0), mask),
2081 XEXP (op0, 1));
2084 tem = simplify_associative_operation (code, mode, op0, op1);
2085 if (tem)
2086 return tem;
2087 break;
2089 case XOR:
2090 if (trueop1 == const0_rtx)
2091 return op0;
2092 if (GET_CODE (trueop1) == CONST_INT
2093 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2094 == GET_MODE_MASK (mode)))
2095 return simplify_gen_unary (NOT, mode, op0, mode);
2096 if (rtx_equal_p (trueop0, trueop1)
2097 && ! side_effects_p (op0)
2098 && GET_MODE_CLASS (mode) != MODE_CC)
2099 return CONST0_RTX (mode);
2101 /* Canonicalize XOR of the most significant bit to PLUS. */
2102 if ((GET_CODE (op1) == CONST_INT
2103 || GET_CODE (op1) == CONST_DOUBLE)
2104 && mode_signbit_p (mode, op1))
2105 return simplify_gen_binary (PLUS, mode, op0, op1);
2106 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2107 if ((GET_CODE (op1) == CONST_INT
2108 || GET_CODE (op1) == CONST_DOUBLE)
2109 && GET_CODE (op0) == PLUS
2110 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
2111 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2112 && mode_signbit_p (mode, XEXP (op0, 1)))
2113 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2114 simplify_gen_binary (XOR, mode, op1,
2115 XEXP (op0, 1)));
2117 /* If we are XORing two things that have no bits in common,
2118 convert them into an IOR. This helps to detect rotation encoded
2119 using those methods and possibly other simplifications. */
2121 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2122 && (nonzero_bits (op0, mode)
2123 & nonzero_bits (op1, mode)) == 0)
2124 return (simplify_gen_binary (IOR, mode, op0, op1));
2126 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2127 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2128 (NOT y). */
2130 int num_negated = 0;
2132 if (GET_CODE (op0) == NOT)
2133 num_negated++, op0 = XEXP (op0, 0);
2134 if (GET_CODE (op1) == NOT)
2135 num_negated++, op1 = XEXP (op1, 0);
2137 if (num_negated == 2)
2138 return simplify_gen_binary (XOR, mode, op0, op1);
2139 else if (num_negated == 1)
2140 return simplify_gen_unary (NOT, mode,
2141 simplify_gen_binary (XOR, mode, op0, op1),
2142 mode);
2145 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2146 correspond to a machine insn or result in further simplifications
2147 if B is a constant. */
2149 if (GET_CODE (op0) == AND
2150 && rtx_equal_p (XEXP (op0, 1), op1)
2151 && ! side_effects_p (op1))
2152 return simplify_gen_binary (AND, mode,
2153 simplify_gen_unary (NOT, mode,
2154 XEXP (op0, 0), mode),
2155 op1);
2157 else if (GET_CODE (op0) == AND
2158 && rtx_equal_p (XEXP (op0, 0), op1)
2159 && ! side_effects_p (op1))
2160 return simplify_gen_binary (AND, mode,
2161 simplify_gen_unary (NOT, mode,
2162 XEXP (op0, 1), mode),
2163 op1);
2165 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2166 comparison if STORE_FLAG_VALUE is 1. */
2167 if (STORE_FLAG_VALUE == 1
2168 && trueop1 == const1_rtx
2169 && COMPARISON_P (op0)
2170 && (reversed = reversed_comparison (op0, mode)))
2171 return reversed;
2173 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2174 is (lt foo (const_int 0)), so we can perform the above
2175 simplification if STORE_FLAG_VALUE is 1. */
2177 if (STORE_FLAG_VALUE == 1
2178 && trueop1 == const1_rtx
2179 && GET_CODE (op0) == LSHIFTRT
2180 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2181 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2182 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2184 /* (xor (comparison foo bar) (const_int sign-bit))
2185 when STORE_FLAG_VALUE is the sign bit. */
2186 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2187 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2188 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2189 && trueop1 == const_true_rtx
2190 && COMPARISON_P (op0)
2191 && (reversed = reversed_comparison (op0, mode)))
2192 return reversed;
2194 break;
2196 tem = simplify_associative_operation (code, mode, op0, op1);
2197 if (tem)
2198 return tem;
2199 break;
2201 case AND:
2202 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2203 return trueop1;
2204 /* If we are turning off bits already known off in OP0, we need
2205 not do an AND. */
2206 if (GET_CODE (trueop1) == CONST_INT
2207 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2208 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
2209 return op0;
2210 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2211 && GET_MODE_CLASS (mode) != MODE_CC)
2212 return op0;
2213 /* A & (~A) -> 0 */
2214 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2215 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2216 && ! side_effects_p (op0)
2217 && GET_MODE_CLASS (mode) != MODE_CC)
2218 return CONST0_RTX (mode);
2220 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2221 there are no nonzero bits of C outside of X's mode. */
2222 if ((GET_CODE (op0) == SIGN_EXTEND
2223 || GET_CODE (op0) == ZERO_EXTEND)
2224 && GET_CODE (trueop1) == CONST_INT
2225 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2226 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2227 & INTVAL (trueop1)) == 0)
2229 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2230 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2231 gen_int_mode (INTVAL (trueop1),
2232 imode));
2233 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2236 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2237 insn (and may simplify more). */
2238 if (GET_CODE (op0) == XOR
2239 && rtx_equal_p (XEXP (op0, 0), op1)
2240 && ! side_effects_p (op1))
2241 return simplify_gen_binary (AND, mode,
2242 simplify_gen_unary (NOT, mode,
2243 XEXP (op0, 1), mode),
2244 op1);
2246 if (GET_CODE (op0) == XOR
2247 && rtx_equal_p (XEXP (op0, 1), op1)
2248 && ! side_effects_p (op1))
2249 return simplify_gen_binary (AND, mode,
2250 simplify_gen_unary (NOT, mode,
2251 XEXP (op0, 0), mode),
2252 op1);
2254 /* Similarly for (~(A ^ B)) & A. */
2255 if (GET_CODE (op0) == NOT
2256 && GET_CODE (XEXP (op0, 0)) == XOR
2257 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2258 && ! side_effects_p (op1))
2259 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2261 if (GET_CODE (op0) == NOT
2262 && GET_CODE (XEXP (op0, 0)) == XOR
2263 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2264 && ! side_effects_p (op1))
2265 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2267 /* Convert (A | B) & A to A. */
2268 if (GET_CODE (op0) == IOR
2269 && (rtx_equal_p (XEXP (op0, 0), op1)
2270 || rtx_equal_p (XEXP (op0, 1), op1))
2271 && ! side_effects_p (XEXP (op0, 0))
2272 && ! side_effects_p (XEXP (op0, 1)))
2273 return op1;
2275 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2276 ((A & N) + B) & M -> (A + B) & M
2277 Similarly if (N & M) == 0,
2278 ((A | N) + B) & M -> (A + B) & M
2279 and for - instead of + and/or ^ instead of |. */
2280 if (GET_CODE (trueop1) == CONST_INT
2281 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2282 && ~INTVAL (trueop1)
2283 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2284 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2286 rtx pmop[2];
2287 int which;
2289 pmop[0] = XEXP (op0, 0);
2290 pmop[1] = XEXP (op0, 1);
2292 for (which = 0; which < 2; which++)
2294 tem = pmop[which];
2295 switch (GET_CODE (tem))
2297 case AND:
2298 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2299 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2300 == INTVAL (trueop1))
2301 pmop[which] = XEXP (tem, 0);
2302 break;
2303 case IOR:
2304 case XOR:
2305 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2306 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2307 pmop[which] = XEXP (tem, 0);
2308 break;
2309 default:
2310 break;
2314 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2316 tem = simplify_gen_binary (GET_CODE (op0), mode,
2317 pmop[0], pmop[1]);
2318 return simplify_gen_binary (code, mode, tem, op1);
2321 tem = simplify_associative_operation (code, mode, op0, op1);
2322 if (tem)
2323 return tem;
2324 break;
2326 case UDIV:
2327 /* 0/x is 0 (or x&0 if x has side-effects). */
2328 if (trueop0 == CONST0_RTX (mode))
2330 if (side_effects_p (op1))
2331 return simplify_gen_binary (AND, mode, op1, trueop0);
2332 return trueop0;
2334 /* x/1 is x. */
2335 if (trueop1 == CONST1_RTX (mode))
2336 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2337 /* Convert divide by power of two into shift. */
2338 if (GET_CODE (trueop1) == CONST_INT
2339 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2340 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2341 break;
2343 case DIV:
2344 /* Handle floating point and integers separately. */
2345 if (SCALAR_FLOAT_MODE_P (mode))
2347 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2348 safe for modes with NaNs, since 0.0 / 0.0 will then be
2349 NaN rather than 0.0. Nor is it safe for modes with signed
2350 zeros, since dividing 0 by a negative number gives -0.0 */
2351 if (trueop0 == CONST0_RTX (mode)
2352 && !HONOR_NANS (mode)
2353 && !HONOR_SIGNED_ZEROS (mode)
2354 && ! side_effects_p (op1))
2355 return op0;
2356 /* x/1.0 is x. */
2357 if (trueop1 == CONST1_RTX (mode)
2358 && !HONOR_SNANS (mode))
2359 return op0;
2361 if (GET_CODE (trueop1) == CONST_DOUBLE
2362 && trueop1 != CONST0_RTX (mode))
2364 REAL_VALUE_TYPE d;
2365 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2367 /* x/-1.0 is -x. */
2368 if (REAL_VALUES_EQUAL (d, dconstm1)
2369 && !HONOR_SNANS (mode))
2370 return simplify_gen_unary (NEG, mode, op0, mode);
2372 /* Change FP division by a constant into multiplication.
2373 Only do this with -funsafe-math-optimizations. */
2374 if (flag_unsafe_math_optimizations
2375 && !REAL_VALUES_EQUAL (d, dconst0))
2377 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2378 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2379 return simplify_gen_binary (MULT, mode, op0, tem);
2383 else
2385 /* 0/x is 0 (or x&0 if x has side-effects). */
2386 if (trueop0 == CONST0_RTX (mode))
2388 if (side_effects_p (op1))
2389 return simplify_gen_binary (AND, mode, op1, trueop0);
2390 return trueop0;
2392 /* x/1 is x. */
2393 if (trueop1 == CONST1_RTX (mode))
2394 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2395 /* x/-1 is -x. */
2396 if (trueop1 == constm1_rtx)
2398 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2399 return simplify_gen_unary (NEG, mode, x, mode);
2402 break;
2404 case UMOD:
2405 /* 0%x is 0 (or x&0 if x has side-effects). */
2406 if (trueop0 == CONST0_RTX (mode))
2408 if (side_effects_p (op1))
2409 return simplify_gen_binary (AND, mode, op1, trueop0);
2410 return trueop0;
2412 /* x%1 is 0 (of x&0 if x has side-effects). */
2413 if (trueop1 == CONST1_RTX (mode))
2415 if (side_effects_p (op0))
2416 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2417 return CONST0_RTX (mode);
2419 /* Implement modulus by power of two as AND. */
2420 if (GET_CODE (trueop1) == CONST_INT
2421 && exact_log2 (INTVAL (trueop1)) > 0)
2422 return simplify_gen_binary (AND, mode, op0,
2423 GEN_INT (INTVAL (op1) - 1));
2424 break;
2426 case MOD:
2427 /* 0%x is 0 (or x&0 if x has side-effects). */
2428 if (trueop0 == CONST0_RTX (mode))
2430 if (side_effects_p (op1))
2431 return simplify_gen_binary (AND, mode, op1, trueop0);
2432 return trueop0;
2434 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2435 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2437 if (side_effects_p (op0))
2438 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2439 return CONST0_RTX (mode);
2441 break;
2443 case ROTATERT:
2444 case ROTATE:
2445 case ASHIFTRT:
2446 if (trueop1 == CONST0_RTX (mode))
2447 return op0;
2448 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2449 return op0;
2450 /* Rotating ~0 always results in ~0. */
2451 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2452 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2453 && ! side_effects_p (op1))
2454 return op0;
2455 break;
2457 case ASHIFT:
2458 case SS_ASHIFT:
2459 if (trueop1 == CONST0_RTX (mode))
2460 return op0;
2461 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2462 return op0;
2463 break;
2465 case LSHIFTRT:
2466 if (trueop1 == CONST0_RTX (mode))
2467 return op0;
2468 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2469 return op0;
2470 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2471 if (GET_CODE (op0) == CLZ
2472 && GET_CODE (trueop1) == CONST_INT
2473 && STORE_FLAG_VALUE == 1
2474 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2476 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2477 unsigned HOST_WIDE_INT zero_val = 0;
2479 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2480 && zero_val == GET_MODE_BITSIZE (imode)
2481 && INTVAL (trueop1) == exact_log2 (zero_val))
2482 return simplify_gen_relational (EQ, mode, imode,
2483 XEXP (op0, 0), const0_rtx);
2485 break;
2487 case SMIN:
2488 if (width <= HOST_BITS_PER_WIDE_INT
2489 && GET_CODE (trueop1) == CONST_INT
2490 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2491 && ! side_effects_p (op0))
2492 return op1;
2493 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2494 return op0;
2495 tem = simplify_associative_operation (code, mode, op0, op1);
2496 if (tem)
2497 return tem;
2498 break;
2500 case SMAX:
2501 if (width <= HOST_BITS_PER_WIDE_INT
2502 && GET_CODE (trueop1) == CONST_INT
2503 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2504 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2505 && ! side_effects_p (op0))
2506 return op1;
2507 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2508 return op0;
2509 tem = simplify_associative_operation (code, mode, op0, op1);
2510 if (tem)
2511 return tem;
2512 break;
2514 case UMIN:
2515 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2516 return op1;
2517 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2518 return op0;
2519 tem = simplify_associative_operation (code, mode, op0, op1);
2520 if (tem)
2521 return tem;
2522 break;
2524 case UMAX:
2525 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2526 return op1;
2527 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2528 return op0;
2529 tem = simplify_associative_operation (code, mode, op0, op1);
2530 if (tem)
2531 return tem;
2532 break;
2534 case SS_PLUS:
2535 case US_PLUS:
2536 case SS_MINUS:
2537 case US_MINUS:
2538 /* ??? There are simplifications that can be done. */
2539 return 0;
2541 case VEC_SELECT:
2542 if (!VECTOR_MODE_P (mode))
2544 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2545 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2546 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2547 gcc_assert (XVECLEN (trueop1, 0) == 1);
2548 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2550 if (GET_CODE (trueop0) == CONST_VECTOR)
2551 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2552 (trueop1, 0, 0)));
2554 else
2556 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2557 gcc_assert (GET_MODE_INNER (mode)
2558 == GET_MODE_INNER (GET_MODE (trueop0)));
2559 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2561 if (GET_CODE (trueop0) == CONST_VECTOR)
2563 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2564 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2565 rtvec v = rtvec_alloc (n_elts);
2566 unsigned int i;
2568 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2569 for (i = 0; i < n_elts; i++)
2571 rtx x = XVECEXP (trueop1, 0, i);
2573 gcc_assert (GET_CODE (x) == CONST_INT);
2574 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2575 INTVAL (x));
2578 return gen_rtx_CONST_VECTOR (mode, v);
2582 if (XVECLEN (trueop1, 0) == 1
2583 && GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT
2584 && GET_CODE (trueop0) == VEC_CONCAT)
2586 rtx vec = trueop0;
2587 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2589 /* Try to find the element in the VEC_CONCAT. */
2590 while (GET_MODE (vec) != mode
2591 && GET_CODE (vec) == VEC_CONCAT)
2593 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2594 if (offset < vec_size)
2595 vec = XEXP (vec, 0);
2596 else
2598 offset -= vec_size;
2599 vec = XEXP (vec, 1);
2601 vec = avoid_constant_pool_reference (vec);
2604 if (GET_MODE (vec) == mode)
2605 return vec;
2608 return 0;
2609 case VEC_CONCAT:
2611 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2612 ? GET_MODE (trueop0)
2613 : GET_MODE_INNER (mode));
2614 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2615 ? GET_MODE (trueop1)
2616 : GET_MODE_INNER (mode));
2618 gcc_assert (VECTOR_MODE_P (mode));
2619 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2620 == GET_MODE_SIZE (mode));
2622 if (VECTOR_MODE_P (op0_mode))
2623 gcc_assert (GET_MODE_INNER (mode)
2624 == GET_MODE_INNER (op0_mode));
2625 else
2626 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2628 if (VECTOR_MODE_P (op1_mode))
2629 gcc_assert (GET_MODE_INNER (mode)
2630 == GET_MODE_INNER (op1_mode));
2631 else
2632 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2634 if ((GET_CODE (trueop0) == CONST_VECTOR
2635 || GET_CODE (trueop0) == CONST_INT
2636 || GET_CODE (trueop0) == CONST_DOUBLE)
2637 && (GET_CODE (trueop1) == CONST_VECTOR
2638 || GET_CODE (trueop1) == CONST_INT
2639 || GET_CODE (trueop1) == CONST_DOUBLE))
2641 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2642 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2643 rtvec v = rtvec_alloc (n_elts);
2644 unsigned int i;
2645 unsigned in_n_elts = 1;
2647 if (VECTOR_MODE_P (op0_mode))
2648 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2649 for (i = 0; i < n_elts; i++)
2651 if (i < in_n_elts)
2653 if (!VECTOR_MODE_P (op0_mode))
2654 RTVEC_ELT (v, i) = trueop0;
2655 else
2656 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2658 else
2660 if (!VECTOR_MODE_P (op1_mode))
2661 RTVEC_ELT (v, i) = trueop1;
2662 else
2663 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2664 i - in_n_elts);
2668 return gen_rtx_CONST_VECTOR (mode, v);
2671 return 0;
2673 default:
2674 gcc_unreachable ();
2677 return 0;
2681 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
2682 rtx op0, rtx op1)
2684 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
2685 HOST_WIDE_INT val;
2686 unsigned int width = GET_MODE_BITSIZE (mode);
2688 if (VECTOR_MODE_P (mode)
2689 && code != VEC_CONCAT
2690 && GET_CODE (op0) == CONST_VECTOR
2691 && GET_CODE (op1) == CONST_VECTOR)
2693 unsigned n_elts = GET_MODE_NUNITS (mode);
2694 enum machine_mode op0mode = GET_MODE (op0);
2695 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2696 enum machine_mode op1mode = GET_MODE (op1);
2697 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2698 rtvec v = rtvec_alloc (n_elts);
2699 unsigned int i;
2701 gcc_assert (op0_n_elts == n_elts);
2702 gcc_assert (op1_n_elts == n_elts);
2703 for (i = 0; i < n_elts; i++)
2705 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2706 CONST_VECTOR_ELT (op0, i),
2707 CONST_VECTOR_ELT (op1, i));
2708 if (!x)
2709 return 0;
2710 RTVEC_ELT (v, i) = x;
2713 return gen_rtx_CONST_VECTOR (mode, v);
2716 if (VECTOR_MODE_P (mode)
2717 && code == VEC_CONCAT
2718 && CONSTANT_P (op0) && CONSTANT_P (op1))
2720 unsigned n_elts = GET_MODE_NUNITS (mode);
2721 rtvec v = rtvec_alloc (n_elts);
2723 gcc_assert (n_elts >= 2);
2724 if (n_elts == 2)
2726 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2727 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2729 RTVEC_ELT (v, 0) = op0;
2730 RTVEC_ELT (v, 1) = op1;
2732 else
2734 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2735 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2736 unsigned i;
2738 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2739 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2740 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2742 for (i = 0; i < op0_n_elts; ++i)
2743 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2744 for (i = 0; i < op1_n_elts; ++i)
2745 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2748 return gen_rtx_CONST_VECTOR (mode, v);
2751 if (SCALAR_FLOAT_MODE_P (mode)
2752 && GET_CODE (op0) == CONST_DOUBLE
2753 && GET_CODE (op1) == CONST_DOUBLE
2754 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2756 if (code == AND
2757 || code == IOR
2758 || code == XOR)
2760 long tmp0[4];
2761 long tmp1[4];
2762 REAL_VALUE_TYPE r;
2763 int i;
2765 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
2766 GET_MODE (op0));
2767 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
2768 GET_MODE (op1));
2769 for (i = 0; i < 4; i++)
2771 switch (code)
2773 case AND:
2774 tmp0[i] &= tmp1[i];
2775 break;
2776 case IOR:
2777 tmp0[i] |= tmp1[i];
2778 break;
2779 case XOR:
2780 tmp0[i] ^= tmp1[i];
2781 break;
2782 default:
2783 gcc_unreachable ();
2786 real_from_target (&r, tmp0, mode);
2787 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
2789 else
2791 REAL_VALUE_TYPE f0, f1, value, result;
2792 bool inexact;
2794 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
2795 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
2796 real_convert (&f0, mode, &f0);
2797 real_convert (&f1, mode, &f1);
2799 if (HONOR_SNANS (mode)
2800 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
2801 return 0;
2803 if (code == DIV
2804 && REAL_VALUES_EQUAL (f1, dconst0)
2805 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
2806 return 0;
2808 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2809 && flag_trapping_math
2810 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
2812 int s0 = REAL_VALUE_NEGATIVE (f0);
2813 int s1 = REAL_VALUE_NEGATIVE (f1);
2815 switch (code)
2817 case PLUS:
2818 /* Inf + -Inf = NaN plus exception. */
2819 if (s0 != s1)
2820 return 0;
2821 break;
2822 case MINUS:
2823 /* Inf - Inf = NaN plus exception. */
2824 if (s0 == s1)
2825 return 0;
2826 break;
2827 case DIV:
2828 /* Inf / Inf = NaN plus exception. */
2829 return 0;
2830 default:
2831 break;
2835 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2836 && flag_trapping_math
2837 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
2838 || (REAL_VALUE_ISINF (f1)
2839 && REAL_VALUES_EQUAL (f0, dconst0))))
2840 /* Inf * 0 = NaN plus exception. */
2841 return 0;
2843 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
2844 &f0, &f1);
2845 real_convert (&result, mode, &value);
2847 /* Don't constant fold this floating point operation if
2848 the result has overflowed and flag_trapping_math. */
2850 if (flag_trapping_math
2851 && MODE_HAS_INFINITIES (mode)
2852 && REAL_VALUE_ISINF (result)
2853 && !REAL_VALUE_ISINF (f0)
2854 && !REAL_VALUE_ISINF (f1))
2855 /* Overflow plus exception. */
2856 return 0;
2858 /* Don't constant fold this floating point operation if the
2859 result may dependent upon the run-time rounding mode and
2860 flag_rounding_math is set, or if GCC's software emulation
2861 is unable to accurately represent the result. */
2863 if ((flag_rounding_math
2864 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
2865 && !flag_unsafe_math_optimizations))
2866 && (inexact || !real_identical (&result, &value)))
2867 return NULL_RTX;
2869 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
2873 /* We can fold some multi-word operations. */
2874 if (GET_MODE_CLASS (mode) == MODE_INT
2875 && width == HOST_BITS_PER_WIDE_INT * 2
2876 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
2877 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
2879 unsigned HOST_WIDE_INT l1, l2, lv, lt;
2880 HOST_WIDE_INT h1, h2, hv, ht;
2882 if (GET_CODE (op0) == CONST_DOUBLE)
2883 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
2884 else
2885 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
2887 if (GET_CODE (op1) == CONST_DOUBLE)
2888 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
2889 else
2890 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
2892 switch (code)
2894 case MINUS:
2895 /* A - B == A + (-B). */
2896 neg_double (l2, h2, &lv, &hv);
2897 l2 = lv, h2 = hv;
2899 /* Fall through.... */
2901 case PLUS:
2902 add_double (l1, h1, l2, h2, &lv, &hv);
2903 break;
2905 case MULT:
2906 mul_double (l1, h1, l2, h2, &lv, &hv);
2907 break;
2909 case DIV:
2910 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2911 &lv, &hv, &lt, &ht))
2912 return 0;
2913 break;
2915 case MOD:
2916 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2917 &lt, &ht, &lv, &hv))
2918 return 0;
2919 break;
2921 case UDIV:
2922 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2923 &lv, &hv, &lt, &ht))
2924 return 0;
2925 break;
2927 case UMOD:
2928 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2929 &lt, &ht, &lv, &hv))
2930 return 0;
2931 break;
2933 case AND:
2934 lv = l1 & l2, hv = h1 & h2;
2935 break;
2937 case IOR:
2938 lv = l1 | l2, hv = h1 | h2;
2939 break;
2941 case XOR:
2942 lv = l1 ^ l2, hv = h1 ^ h2;
2943 break;
2945 case SMIN:
2946 if (h1 < h2
2947 || (h1 == h2
2948 && ((unsigned HOST_WIDE_INT) l1
2949 < (unsigned HOST_WIDE_INT) l2)))
2950 lv = l1, hv = h1;
2951 else
2952 lv = l2, hv = h2;
2953 break;
2955 case SMAX:
2956 if (h1 > h2
2957 || (h1 == h2
2958 && ((unsigned HOST_WIDE_INT) l1
2959 > (unsigned HOST_WIDE_INT) l2)))
2960 lv = l1, hv = h1;
2961 else
2962 lv = l2, hv = h2;
2963 break;
2965 case UMIN:
2966 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
2967 || (h1 == h2
2968 && ((unsigned HOST_WIDE_INT) l1
2969 < (unsigned HOST_WIDE_INT) l2)))
2970 lv = l1, hv = h1;
2971 else
2972 lv = l2, hv = h2;
2973 break;
2975 case UMAX:
2976 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
2977 || (h1 == h2
2978 && ((unsigned HOST_WIDE_INT) l1
2979 > (unsigned HOST_WIDE_INT) l2)))
2980 lv = l1, hv = h1;
2981 else
2982 lv = l2, hv = h2;
2983 break;
2985 case LSHIFTRT: case ASHIFTRT:
2986 case ASHIFT:
2987 case ROTATE: case ROTATERT:
2988 if (SHIFT_COUNT_TRUNCATED)
2989 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
2991 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
2992 return 0;
2994 if (code == LSHIFTRT || code == ASHIFTRT)
2995 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
2996 code == ASHIFTRT);
2997 else if (code == ASHIFT)
2998 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
2999 else if (code == ROTATE)
3000 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3001 else /* code == ROTATERT */
3002 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3003 break;
3005 default:
3006 return 0;
3009 return immed_double_const (lv, hv, mode);
3012 if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
3013 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3015 /* Get the integer argument values in two forms:
3016 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3018 arg0 = INTVAL (op0);
3019 arg1 = INTVAL (op1);
3021 if (width < HOST_BITS_PER_WIDE_INT)
3023 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
3024 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
3026 arg0s = arg0;
3027 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3028 arg0s |= ((HOST_WIDE_INT) (-1) << width);
3030 arg1s = arg1;
3031 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3032 arg1s |= ((HOST_WIDE_INT) (-1) << width);
3034 else
3036 arg0s = arg0;
3037 arg1s = arg1;
3040 /* Compute the value of the arithmetic. */
3042 switch (code)
3044 case PLUS:
3045 val = arg0s + arg1s;
3046 break;
3048 case MINUS:
3049 val = arg0s - arg1s;
3050 break;
3052 case MULT:
3053 val = arg0s * arg1s;
3054 break;
3056 case DIV:
3057 if (arg1s == 0
3058 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3059 && arg1s == -1))
3060 return 0;
3061 val = arg0s / arg1s;
3062 break;
3064 case MOD:
3065 if (arg1s == 0
3066 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3067 && arg1s == -1))
3068 return 0;
3069 val = arg0s % arg1s;
3070 break;
3072 case UDIV:
3073 if (arg1 == 0
3074 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3075 && arg1s == -1))
3076 return 0;
3077 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3078 break;
3080 case UMOD:
3081 if (arg1 == 0
3082 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3083 && arg1s == -1))
3084 return 0;
3085 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3086 break;
3088 case AND:
3089 val = arg0 & arg1;
3090 break;
3092 case IOR:
3093 val = arg0 | arg1;
3094 break;
3096 case XOR:
3097 val = arg0 ^ arg1;
3098 break;
3100 case LSHIFTRT:
3101 case ASHIFT:
3102 case ASHIFTRT:
3103 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3104 the value is in range. We can't return any old value for
3105 out-of-range arguments because either the middle-end (via
3106 shift_truncation_mask) or the back-end might be relying on
3107 target-specific knowledge. Nor can we rely on
3108 shift_truncation_mask, since the shift might not be part of an
3109 ashlM3, lshrM3 or ashrM3 instruction. */
3110 if (SHIFT_COUNT_TRUNCATED)
3111 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3112 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3113 return 0;
3115 val = (code == ASHIFT
3116 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3117 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3119 /* Sign-extend the result for arithmetic right shifts. */
3120 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3121 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3122 break;
3124 case ROTATERT:
3125 if (arg1 < 0)
3126 return 0;
3128 arg1 %= width;
3129 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3130 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3131 break;
3133 case ROTATE:
3134 if (arg1 < 0)
3135 return 0;
3137 arg1 %= width;
3138 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3139 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3140 break;
3142 case COMPARE:
3143 /* Do nothing here. */
3144 return 0;
3146 case SMIN:
3147 val = arg0s <= arg1s ? arg0s : arg1s;
3148 break;
3150 case UMIN:
3151 val = ((unsigned HOST_WIDE_INT) arg0
3152 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3153 break;
3155 case SMAX:
3156 val = arg0s > arg1s ? arg0s : arg1s;
3157 break;
3159 case UMAX:
3160 val = ((unsigned HOST_WIDE_INT) arg0
3161 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3162 break;
3164 case SS_PLUS:
3165 case US_PLUS:
3166 case SS_MINUS:
3167 case US_MINUS:
3168 case SS_ASHIFT:
3169 /* ??? There are simplifications that can be done. */
3170 return 0;
3172 default:
3173 gcc_unreachable ();
3176 return gen_int_mode (val, mode);
3179 return NULL_RTX;
3184 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3185 PLUS or MINUS.
3187 Rather than test for specific case, we do this by a brute-force method
3188 and do all possible simplifications until no more changes occur. Then
3189 we rebuild the operation. */
3191 struct simplify_plus_minus_op_data
3193 rtx op;
3194 short neg;
3197 static int
3198 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
3200 const struct simplify_plus_minus_op_data *d1 = p1;
3201 const struct simplify_plus_minus_op_data *d2 = p2;
3202 int result;
3204 result = (commutative_operand_precedence (d2->op)
3205 - commutative_operand_precedence (d1->op));
3206 if (result)
3207 return result;
3209 /* Group together equal REGs to do more simplification. */
3210 if (REG_P (d1->op) && REG_P (d2->op))
3211 return REGNO (d1->op) - REGNO (d2->op);
3212 else
3213 return 0;
3216 static rtx
3217 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3218 rtx op1)
3220 struct simplify_plus_minus_op_data ops[8];
3221 rtx result, tem;
3222 int n_ops = 2, input_ops = 2;
3223 int changed, n_constants = 0, canonicalized = 0;
3224 int i, j;
3226 memset (ops, 0, sizeof ops);
3228 /* Set up the two operands and then expand them until nothing has been
3229 changed. If we run out of room in our array, give up; this should
3230 almost never happen. */
3232 ops[0].op = op0;
3233 ops[0].neg = 0;
3234 ops[1].op = op1;
3235 ops[1].neg = (code == MINUS);
3239 changed = 0;
3241 for (i = 0; i < n_ops; i++)
3243 rtx this_op = ops[i].op;
3244 int this_neg = ops[i].neg;
3245 enum rtx_code this_code = GET_CODE (this_op);
3247 switch (this_code)
3249 case PLUS:
3250 case MINUS:
3251 if (n_ops == 7)
3252 return NULL_RTX;
3254 ops[n_ops].op = XEXP (this_op, 1);
3255 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3256 n_ops++;
3258 ops[i].op = XEXP (this_op, 0);
3259 input_ops++;
3260 changed = 1;
3261 canonicalized |= this_neg;
3262 break;
3264 case NEG:
3265 ops[i].op = XEXP (this_op, 0);
3266 ops[i].neg = ! this_neg;
3267 changed = 1;
3268 canonicalized = 1;
3269 break;
3271 case CONST:
3272 if (n_ops < 7
3273 && GET_CODE (XEXP (this_op, 0)) == PLUS
3274 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3275 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3277 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3278 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3279 ops[n_ops].neg = this_neg;
3280 n_ops++;
3281 changed = 1;
3282 canonicalized = 1;
3284 break;
3286 case NOT:
3287 /* ~a -> (-a - 1) */
3288 if (n_ops != 7)
3290 ops[n_ops].op = constm1_rtx;
3291 ops[n_ops++].neg = this_neg;
3292 ops[i].op = XEXP (this_op, 0);
3293 ops[i].neg = !this_neg;
3294 changed = 1;
3295 canonicalized = 1;
3297 break;
3299 case CONST_INT:
3300 n_constants++;
3301 if (this_neg)
3303 ops[i].op = neg_const_int (mode, this_op);
3304 ops[i].neg = 0;
3305 changed = 1;
3306 canonicalized = 1;
3308 break;
3310 default:
3311 break;
3315 while (changed);
3317 if (n_constants > 1)
3318 canonicalized = 1;
3320 gcc_assert (n_ops >= 2);
3322 /* If we only have two operands, we can avoid the loops. */
3323 if (n_ops == 2)
3325 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3326 rtx lhs, rhs;
3328 /* Get the two operands. Be careful with the order, especially for
3329 the cases where code == MINUS. */
3330 if (ops[0].neg && ops[1].neg)
3332 lhs = gen_rtx_NEG (mode, ops[0].op);
3333 rhs = ops[1].op;
3335 else if (ops[0].neg)
3337 lhs = ops[1].op;
3338 rhs = ops[0].op;
3340 else
3342 lhs = ops[0].op;
3343 rhs = ops[1].op;
3346 return simplify_const_binary_operation (code, mode, lhs, rhs);
3349 /* Now simplify each pair of operands until nothing changes. */
3352 /* Insertion sort is good enough for an eight-element array. */
3353 for (i = 1; i < n_ops; i++)
3355 struct simplify_plus_minus_op_data save;
3356 j = i - 1;
3357 if (simplify_plus_minus_op_data_cmp (&ops[j], &ops[i]) < 0)
3358 continue;
3360 canonicalized = 1;
3361 save = ops[i];
3363 ops[j + 1] = ops[j];
3364 while (j-- && simplify_plus_minus_op_data_cmp (&ops[j], &save) > 0);
3365 ops[j + 1] = save;
3368 /* This is only useful the first time through. */
3369 if (!canonicalized)
3370 return NULL_RTX;
3372 changed = 0;
3373 for (i = n_ops - 1; i > 0; i--)
3374 for (j = i - 1; j >= 0; j--)
3376 rtx lhs = ops[j].op, rhs = ops[i].op;
3377 int lneg = ops[j].neg, rneg = ops[i].neg;
3379 if (lhs != 0 && rhs != 0)
3381 enum rtx_code ncode = PLUS;
3383 if (lneg != rneg)
3385 ncode = MINUS;
3386 if (lneg)
3387 tem = lhs, lhs = rhs, rhs = tem;
3389 else if (swap_commutative_operands_p (lhs, rhs))
3390 tem = lhs, lhs = rhs, rhs = tem;
3392 if ((GET_CODE (lhs) == CONST || GET_CODE (lhs) == CONST_INT)
3393 && (GET_CODE (rhs) == CONST || GET_CODE (rhs) == CONST_INT))
3395 rtx tem_lhs, tem_rhs;
3397 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3398 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3399 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3401 if (tem && !CONSTANT_P (tem))
3402 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3404 else
3405 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3407 /* Reject "simplifications" that just wrap the two
3408 arguments in a CONST. Failure to do so can result
3409 in infinite recursion with simplify_binary_operation
3410 when it calls us to simplify CONST operations. */
3411 if (tem
3412 && ! (GET_CODE (tem) == CONST
3413 && GET_CODE (XEXP (tem, 0)) == ncode
3414 && XEXP (XEXP (tem, 0), 0) == lhs
3415 && XEXP (XEXP (tem, 0), 1) == rhs))
3417 lneg &= rneg;
3418 if (GET_CODE (tem) == NEG)
3419 tem = XEXP (tem, 0), lneg = !lneg;
3420 if (GET_CODE (tem) == CONST_INT && lneg)
3421 tem = neg_const_int (mode, tem), lneg = 0;
3423 ops[i].op = tem;
3424 ops[i].neg = lneg;
3425 ops[j].op = NULL_RTX;
3426 changed = 1;
3431 /* Pack all the operands to the lower-numbered entries. */
3432 for (i = 0, j = 0; j < n_ops; j++)
3433 if (ops[j].op)
3435 ops[i] = ops[j];
3436 i++;
3438 n_ops = i;
3440 while (changed);
3442 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3443 if (n_ops == 2
3444 && GET_CODE (ops[1].op) == CONST_INT
3445 && CONSTANT_P (ops[0].op)
3446 && ops[0].neg)
3447 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3449 /* We suppressed creation of trivial CONST expressions in the
3450 combination loop to avoid recursion. Create one manually now.
3451 The combination loop should have ensured that there is exactly
3452 one CONST_INT, and the sort will have ensured that it is last
3453 in the array and that any other constant will be next-to-last. */
3455 if (n_ops > 1
3456 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
3457 && CONSTANT_P (ops[n_ops - 2].op))
3459 rtx value = ops[n_ops - 1].op;
3460 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3461 value = neg_const_int (mode, value);
3462 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3463 n_ops--;
3466 /* Put a non-negated operand first, if possible. */
3468 for (i = 0; i < n_ops && ops[i].neg; i++)
3469 continue;
3470 if (i == n_ops)
3471 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3472 else if (i != 0)
3474 tem = ops[0].op;
3475 ops[0] = ops[i];
3476 ops[i].op = tem;
3477 ops[i].neg = 1;
3480 /* Now make the result by performing the requested operations. */
3481 result = ops[0].op;
3482 for (i = 1; i < n_ops; i++)
3483 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3484 mode, result, ops[i].op);
3486 return result;
3489 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3490 static bool
3491 plus_minus_operand_p (rtx x)
3493 return GET_CODE (x) == PLUS
3494 || GET_CODE (x) == MINUS
3495 || (GET_CODE (x) == CONST
3496 && GET_CODE (XEXP (x, 0)) == PLUS
3497 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3498 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3501 /* Like simplify_binary_operation except used for relational operators.
3502 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3503 not also be VOIDmode.
3505 CMP_MODE specifies in which mode the comparison is done in, so it is
3506 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3507 the operands or, if both are VOIDmode, the operands are compared in
3508 "infinite precision". */
3510 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3511 enum machine_mode cmp_mode, rtx op0, rtx op1)
3513 rtx tem, trueop0, trueop1;
3515 if (cmp_mode == VOIDmode)
3516 cmp_mode = GET_MODE (op0);
3517 if (cmp_mode == VOIDmode)
3518 cmp_mode = GET_MODE (op1);
3520 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3521 if (tem)
3523 if (SCALAR_FLOAT_MODE_P (mode))
3525 if (tem == const0_rtx)
3526 return CONST0_RTX (mode);
3527 #ifdef FLOAT_STORE_FLAG_VALUE
3529 REAL_VALUE_TYPE val;
3530 val = FLOAT_STORE_FLAG_VALUE (mode);
3531 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3533 #else
3534 return NULL_RTX;
3535 #endif
3537 if (VECTOR_MODE_P (mode))
3539 if (tem == const0_rtx)
3540 return CONST0_RTX (mode);
3541 #ifdef VECTOR_STORE_FLAG_VALUE
3543 int i, units;
3544 rtvec v;
3546 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3547 if (val == NULL_RTX)
3548 return NULL_RTX;
3549 if (val == const1_rtx)
3550 return CONST1_RTX (mode);
3552 units = GET_MODE_NUNITS (mode);
3553 v = rtvec_alloc (units);
3554 for (i = 0; i < units; i++)
3555 RTVEC_ELT (v, i) = val;
3556 return gen_rtx_raw_CONST_VECTOR (mode, v);
3558 #else
3559 return NULL_RTX;
3560 #endif
3563 return tem;
3566 /* For the following tests, ensure const0_rtx is op1. */
3567 if (swap_commutative_operands_p (op0, op1)
3568 || (op0 == const0_rtx && op1 != const0_rtx))
3569 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3571 /* If op0 is a compare, extract the comparison arguments from it. */
3572 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3573 return simplify_relational_operation (code, mode, VOIDmode,
3574 XEXP (op0, 0), XEXP (op0, 1));
3576 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
3577 || CC0_P (op0))
3578 return NULL_RTX;
3580 trueop0 = avoid_constant_pool_reference (op0);
3581 trueop1 = avoid_constant_pool_reference (op1);
3582 return simplify_relational_operation_1 (code, mode, cmp_mode,
3583 trueop0, trueop1);
3586 /* This part of simplify_relational_operation is only used when CMP_MODE
3587 is not in class MODE_CC (i.e. it is a real comparison).
3589 MODE is the mode of the result, while CMP_MODE specifies in which
3590 mode the comparison is done in, so it is the mode of the operands. */
3592 static rtx
3593 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
3594 enum machine_mode cmp_mode, rtx op0, rtx op1)
3596 enum rtx_code op0code = GET_CODE (op0);
3598 if (op1 == const0_rtx && COMPARISON_P (op0))
3600 /* If op0 is a comparison, extract the comparison arguments
3601 from it. */
3602 if (code == NE)
3604 if (GET_MODE (op0) == mode)
3605 return simplify_rtx (op0);
3606 else
3607 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
3608 XEXP (op0, 0), XEXP (op0, 1));
3610 else if (code == EQ)
3612 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
3613 if (new_code != UNKNOWN)
3614 return simplify_gen_relational (new_code, mode, VOIDmode,
3615 XEXP (op0, 0), XEXP (op0, 1));
3619 if (op1 == const0_rtx)
3621 /* Canonicalize (GTU x 0) as (NE x 0). */
3622 if (code == GTU)
3623 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
3624 /* Canonicalize (LEU x 0) as (EQ x 0). */
3625 if (code == LEU)
3626 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
3628 else if (op1 == const1_rtx)
3630 switch (code)
3632 case GE:
3633 /* Canonicalize (GE x 1) as (GT x 0). */
3634 return simplify_gen_relational (GT, mode, cmp_mode,
3635 op0, const0_rtx);
3636 case GEU:
3637 /* Canonicalize (GEU x 1) as (NE x 0). */
3638 return simplify_gen_relational (NE, mode, cmp_mode,
3639 op0, const0_rtx);
3640 case LT:
3641 /* Canonicalize (LT x 1) as (LE x 0). */
3642 return simplify_gen_relational (LE, mode, cmp_mode,
3643 op0, const0_rtx);
3644 case LTU:
3645 /* Canonicalize (LTU x 1) as (EQ x 0). */
3646 return simplify_gen_relational (EQ, mode, cmp_mode,
3647 op0, const0_rtx);
3648 default:
3649 break;
3652 else if (op1 == constm1_rtx)
3654 /* Canonicalize (LE x -1) as (LT x 0). */
3655 if (code == LE)
3656 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
3657 /* Canonicalize (GT x -1) as (GE x 0). */
3658 if (code == GT)
3659 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
3662 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
3663 if ((code == EQ || code == NE)
3664 && (op0code == PLUS || op0code == MINUS)
3665 && CONSTANT_P (op1)
3666 && CONSTANT_P (XEXP (op0, 1))
3667 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
3669 rtx x = XEXP (op0, 0);
3670 rtx c = XEXP (op0, 1);
3672 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
3673 cmp_mode, op1, c);
3674 return simplify_gen_relational (code, mode, cmp_mode, x, c);
3677 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3678 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3679 if (code == NE
3680 && op1 == const0_rtx
3681 && GET_MODE_CLASS (mode) == MODE_INT
3682 && cmp_mode != VOIDmode
3683 /* ??? Work-around BImode bugs in the ia64 backend. */
3684 && mode != BImode
3685 && cmp_mode != BImode
3686 && nonzero_bits (op0, cmp_mode) == 1
3687 && STORE_FLAG_VALUE == 1)
3688 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
3689 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
3690 : lowpart_subreg (mode, op0, cmp_mode);
3692 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
3693 if ((code == EQ || code == NE)
3694 && op1 == const0_rtx
3695 && op0code == XOR)
3696 return simplify_gen_relational (code, mode, cmp_mode,
3697 XEXP (op0, 0), XEXP (op0, 1));
3699 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
3700 if ((code == EQ || code == NE)
3701 && op0code == XOR
3702 && rtx_equal_p (XEXP (op0, 0), op1)
3703 && !side_effects_p (XEXP (op0, 0)))
3704 return simplify_gen_relational (code, mode, cmp_mode,
3705 XEXP (op0, 1), const0_rtx);
3707 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
3708 if ((code == EQ || code == NE)
3709 && op0code == XOR
3710 && rtx_equal_p (XEXP (op0, 1), op1)
3711 && !side_effects_p (XEXP (op0, 1)))
3712 return simplify_gen_relational (code, mode, cmp_mode,
3713 XEXP (op0, 0), const0_rtx);
3715 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
3716 if ((code == EQ || code == NE)
3717 && op0code == XOR
3718 && (GET_CODE (op1) == CONST_INT
3719 || GET_CODE (op1) == CONST_DOUBLE)
3720 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
3721 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
3722 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
3723 simplify_gen_binary (XOR, cmp_mode,
3724 XEXP (op0, 1), op1));
3726 return NULL_RTX;
3729 /* Check if the given comparison (done in the given MODE) is actually a
3730 tautology or a contradiction.
3731 If no simplification is possible, this function returns zero.
3732 Otherwise, it returns either const_true_rtx or const0_rtx. */
3735 simplify_const_relational_operation (enum rtx_code code,
3736 enum machine_mode mode,
3737 rtx op0, rtx op1)
3739 int equal, op0lt, op0ltu, op1lt, op1ltu;
3740 rtx tem;
3741 rtx trueop0;
3742 rtx trueop1;
3744 gcc_assert (mode != VOIDmode
3745 || (GET_MODE (op0) == VOIDmode
3746 && GET_MODE (op1) == VOIDmode));
3748 /* If op0 is a compare, extract the comparison arguments from it. */
3749 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3751 op1 = XEXP (op0, 1);
3752 op0 = XEXP (op0, 0);
3754 if (GET_MODE (op0) != VOIDmode)
3755 mode = GET_MODE (op0);
3756 else if (GET_MODE (op1) != VOIDmode)
3757 mode = GET_MODE (op1);
3758 else
3759 return 0;
3762 /* We can't simplify MODE_CC values since we don't know what the
3763 actual comparison is. */
3764 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
3765 return 0;
3767 /* Make sure the constant is second. */
3768 if (swap_commutative_operands_p (op0, op1))
3770 tem = op0, op0 = op1, op1 = tem;
3771 code = swap_condition (code);
3774 trueop0 = avoid_constant_pool_reference (op0);
3775 trueop1 = avoid_constant_pool_reference (op1);
3777 /* For integer comparisons of A and B maybe we can simplify A - B and can
3778 then simplify a comparison of that with zero. If A and B are both either
3779 a register or a CONST_INT, this can't help; testing for these cases will
3780 prevent infinite recursion here and speed things up.
3782 We can only do this for EQ and NE comparisons as otherwise we may
3783 lose or introduce overflow which we cannot disregard as undefined as
3784 we do not know the signedness of the operation on either the left or
3785 the right hand side of the comparison. */
3787 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
3788 && (code == EQ || code == NE)
3789 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
3790 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
3791 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
3792 /* We cannot do this if tem is a nonzero address. */
3793 && ! nonzero_address_p (tem))
3794 return simplify_const_relational_operation (signed_condition (code),
3795 mode, tem, const0_rtx);
3797 if (! HONOR_NANS (mode) && code == ORDERED)
3798 return const_true_rtx;
3800 if (! HONOR_NANS (mode) && code == UNORDERED)
3801 return const0_rtx;
3803 /* For modes without NaNs, if the two operands are equal, we know the
3804 result except if they have side-effects. */
3805 if (! HONOR_NANS (GET_MODE (trueop0))
3806 && rtx_equal_p (trueop0, trueop1)
3807 && ! side_effects_p (trueop0))
3808 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
3810 /* If the operands are floating-point constants, see if we can fold
3811 the result. */
3812 else if (GET_CODE (trueop0) == CONST_DOUBLE
3813 && GET_CODE (trueop1) == CONST_DOUBLE
3814 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
3816 REAL_VALUE_TYPE d0, d1;
3818 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
3819 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
3821 /* Comparisons are unordered iff at least one of the values is NaN. */
3822 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
3823 switch (code)
3825 case UNEQ:
3826 case UNLT:
3827 case UNGT:
3828 case UNLE:
3829 case UNGE:
3830 case NE:
3831 case UNORDERED:
3832 return const_true_rtx;
3833 case EQ:
3834 case LT:
3835 case GT:
3836 case LE:
3837 case GE:
3838 case LTGT:
3839 case ORDERED:
3840 return const0_rtx;
3841 default:
3842 return 0;
3845 equal = REAL_VALUES_EQUAL (d0, d1);
3846 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
3847 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
3850 /* Otherwise, see if the operands are both integers. */
3851 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
3852 && (GET_CODE (trueop0) == CONST_DOUBLE
3853 || GET_CODE (trueop0) == CONST_INT)
3854 && (GET_CODE (trueop1) == CONST_DOUBLE
3855 || GET_CODE (trueop1) == CONST_INT))
3857 int width = GET_MODE_BITSIZE (mode);
3858 HOST_WIDE_INT l0s, h0s, l1s, h1s;
3859 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
3861 /* Get the two words comprising each integer constant. */
3862 if (GET_CODE (trueop0) == CONST_DOUBLE)
3864 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
3865 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
3867 else
3869 l0u = l0s = INTVAL (trueop0);
3870 h0u = h0s = HWI_SIGN_EXTEND (l0s);
3873 if (GET_CODE (trueop1) == CONST_DOUBLE)
3875 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
3876 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
3878 else
3880 l1u = l1s = INTVAL (trueop1);
3881 h1u = h1s = HWI_SIGN_EXTEND (l1s);
3884 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
3885 we have to sign or zero-extend the values. */
3886 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
3888 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
3889 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
3891 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3892 l0s |= ((HOST_WIDE_INT) (-1) << width);
3894 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3895 l1s |= ((HOST_WIDE_INT) (-1) << width);
3897 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
3898 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
3900 equal = (h0u == h1u && l0u == l1u);
3901 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
3902 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
3903 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
3904 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
3907 /* Otherwise, there are some code-specific tests we can make. */
3908 else
3910 /* Optimize comparisons with upper and lower bounds. */
3911 if (SCALAR_INT_MODE_P (mode)
3912 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3914 rtx mmin, mmax;
3915 int sign;
3917 if (code == GEU
3918 || code == LEU
3919 || code == GTU
3920 || code == LTU)
3921 sign = 0;
3922 else
3923 sign = 1;
3925 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
3927 tem = NULL_RTX;
3928 switch (code)
3930 case GEU:
3931 case GE:
3932 /* x >= min is always true. */
3933 if (rtx_equal_p (trueop1, mmin))
3934 tem = const_true_rtx;
3935 else
3936 break;
3938 case LEU:
3939 case LE:
3940 /* x <= max is always true. */
3941 if (rtx_equal_p (trueop1, mmax))
3942 tem = const_true_rtx;
3943 break;
3945 case GTU:
3946 case GT:
3947 /* x > max is always false. */
3948 if (rtx_equal_p (trueop1, mmax))
3949 tem = const0_rtx;
3950 break;
3952 case LTU:
3953 case LT:
3954 /* x < min is always false. */
3955 if (rtx_equal_p (trueop1, mmin))
3956 tem = const0_rtx;
3957 break;
3959 default:
3960 break;
3962 if (tem == const0_rtx
3963 || tem == const_true_rtx)
3964 return tem;
3967 switch (code)
3969 case EQ:
3970 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3971 return const0_rtx;
3972 break;
3974 case NE:
3975 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3976 return const_true_rtx;
3977 break;
3979 case LT:
3980 /* Optimize abs(x) < 0.0. */
3981 if (trueop1 == CONST0_RTX (mode)
3982 && !HONOR_SNANS (mode)
3983 && (!INTEGRAL_MODE_P (mode)
3984 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
3986 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3987 : trueop0;
3988 if (GET_CODE (tem) == ABS)
3989 return const0_rtx;
3991 break;
3993 case GE:
3994 /* Optimize abs(x) >= 0.0. */
3995 if (trueop1 == CONST0_RTX (mode)
3996 && !HONOR_NANS (mode)
3997 && (!INTEGRAL_MODE_P (mode)
3998 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4000 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
4001 : trueop0;
4002 if (GET_CODE (tem) == ABS)
4003 return const_true_rtx;
4005 break;
4007 case UNGE:
4008 /* Optimize ! (abs(x) < 0.0). */
4009 if (trueop1 == CONST0_RTX (mode))
4011 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
4012 : trueop0;
4013 if (GET_CODE (tem) == ABS)
4014 return const_true_rtx;
4016 break;
4018 default:
4019 break;
4022 return 0;
4025 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
4026 as appropriate. */
4027 switch (code)
4029 case EQ:
4030 case UNEQ:
4031 return equal ? const_true_rtx : const0_rtx;
4032 case NE:
4033 case LTGT:
4034 return ! equal ? const_true_rtx : const0_rtx;
4035 case LT:
4036 case UNLT:
4037 return op0lt ? const_true_rtx : const0_rtx;
4038 case GT:
4039 case UNGT:
4040 return op1lt ? const_true_rtx : const0_rtx;
4041 case LTU:
4042 return op0ltu ? const_true_rtx : const0_rtx;
4043 case GTU:
4044 return op1ltu ? const_true_rtx : const0_rtx;
4045 case LE:
4046 case UNLE:
4047 return equal || op0lt ? const_true_rtx : const0_rtx;
4048 case GE:
4049 case UNGE:
4050 return equal || op1lt ? const_true_rtx : const0_rtx;
4051 case LEU:
4052 return equal || op0ltu ? const_true_rtx : const0_rtx;
4053 case GEU:
4054 return equal || op1ltu ? const_true_rtx : const0_rtx;
4055 case ORDERED:
4056 return const_true_rtx;
4057 case UNORDERED:
4058 return const0_rtx;
4059 default:
4060 gcc_unreachable ();
4064 /* Simplify CODE, an operation with result mode MODE and three operands,
4065 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4066 a constant. Return 0 if no simplifications is possible. */
4069 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4070 enum machine_mode op0_mode, rtx op0, rtx op1,
4071 rtx op2)
4073 unsigned int width = GET_MODE_BITSIZE (mode);
4075 /* VOIDmode means "infinite" precision. */
4076 if (width == 0)
4077 width = HOST_BITS_PER_WIDE_INT;
4079 switch (code)
4081 case SIGN_EXTRACT:
4082 case ZERO_EXTRACT:
4083 if (GET_CODE (op0) == CONST_INT
4084 && GET_CODE (op1) == CONST_INT
4085 && GET_CODE (op2) == CONST_INT
4086 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4087 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4089 /* Extracting a bit-field from a constant */
4090 HOST_WIDE_INT val = INTVAL (op0);
4092 if (BITS_BIG_ENDIAN)
4093 val >>= (GET_MODE_BITSIZE (op0_mode)
4094 - INTVAL (op2) - INTVAL (op1));
4095 else
4096 val >>= INTVAL (op2);
4098 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4100 /* First zero-extend. */
4101 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4102 /* If desired, propagate sign bit. */
4103 if (code == SIGN_EXTRACT
4104 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4105 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4108 /* Clear the bits that don't belong in our mode,
4109 unless they and our sign bit are all one.
4110 So we get either a reasonable negative value or a reasonable
4111 unsigned value for this mode. */
4112 if (width < HOST_BITS_PER_WIDE_INT
4113 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4114 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4115 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4117 return gen_int_mode (val, mode);
4119 break;
4121 case IF_THEN_ELSE:
4122 if (GET_CODE (op0) == CONST_INT)
4123 return op0 != const0_rtx ? op1 : op2;
4125 /* Convert c ? a : a into "a". */
4126 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4127 return op1;
4129 /* Convert a != b ? a : b into "a". */
4130 if (GET_CODE (op0) == NE
4131 && ! side_effects_p (op0)
4132 && ! HONOR_NANS (mode)
4133 && ! HONOR_SIGNED_ZEROS (mode)
4134 && ((rtx_equal_p (XEXP (op0, 0), op1)
4135 && rtx_equal_p (XEXP (op0, 1), op2))
4136 || (rtx_equal_p (XEXP (op0, 0), op2)
4137 && rtx_equal_p (XEXP (op0, 1), op1))))
4138 return op1;
4140 /* Convert a == b ? a : b into "b". */
4141 if (GET_CODE (op0) == EQ
4142 && ! side_effects_p (op0)
4143 && ! HONOR_NANS (mode)
4144 && ! HONOR_SIGNED_ZEROS (mode)
4145 && ((rtx_equal_p (XEXP (op0, 0), op1)
4146 && rtx_equal_p (XEXP (op0, 1), op2))
4147 || (rtx_equal_p (XEXP (op0, 0), op2)
4148 && rtx_equal_p (XEXP (op0, 1), op1))))
4149 return op2;
4151 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4153 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4154 ? GET_MODE (XEXP (op0, 1))
4155 : GET_MODE (XEXP (op0, 0)));
4156 rtx temp;
4158 /* Look for happy constants in op1 and op2. */
4159 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
4161 HOST_WIDE_INT t = INTVAL (op1);
4162 HOST_WIDE_INT f = INTVAL (op2);
4164 if (t == STORE_FLAG_VALUE && f == 0)
4165 code = GET_CODE (op0);
4166 else if (t == 0 && f == STORE_FLAG_VALUE)
4168 enum rtx_code tmp;
4169 tmp = reversed_comparison_code (op0, NULL_RTX);
4170 if (tmp == UNKNOWN)
4171 break;
4172 code = tmp;
4174 else
4175 break;
4177 return simplify_gen_relational (code, mode, cmp_mode,
4178 XEXP (op0, 0), XEXP (op0, 1));
4181 if (cmp_mode == VOIDmode)
4182 cmp_mode = op0_mode;
4183 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4184 cmp_mode, XEXP (op0, 0),
4185 XEXP (op0, 1));
4187 /* See if any simplifications were possible. */
4188 if (temp)
4190 if (GET_CODE (temp) == CONST_INT)
4191 return temp == const0_rtx ? op2 : op1;
4192 else if (temp)
4193 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4196 break;
4198 case VEC_MERGE:
4199 gcc_assert (GET_MODE (op0) == mode);
4200 gcc_assert (GET_MODE (op1) == mode);
4201 gcc_assert (VECTOR_MODE_P (mode));
4202 op2 = avoid_constant_pool_reference (op2);
4203 if (GET_CODE (op2) == CONST_INT)
4205 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4206 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4207 int mask = (1 << n_elts) - 1;
4209 if (!(INTVAL (op2) & mask))
4210 return op1;
4211 if ((INTVAL (op2) & mask) == mask)
4212 return op0;
4214 op0 = avoid_constant_pool_reference (op0);
4215 op1 = avoid_constant_pool_reference (op1);
4216 if (GET_CODE (op0) == CONST_VECTOR
4217 && GET_CODE (op1) == CONST_VECTOR)
4219 rtvec v = rtvec_alloc (n_elts);
4220 unsigned int i;
4222 for (i = 0; i < n_elts; i++)
4223 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4224 ? CONST_VECTOR_ELT (op0, i)
4225 : CONST_VECTOR_ELT (op1, i));
4226 return gen_rtx_CONST_VECTOR (mode, v);
4229 break;
4231 default:
4232 gcc_unreachable ();
4235 return 0;
4238 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
4239 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
4241 Works by unpacking OP into a collection of 8-bit values
4242 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4243 and then repacking them again for OUTERMODE. */
4245 static rtx
4246 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4247 enum machine_mode innermode, unsigned int byte)
4249 /* We support up to 512-bit values (for V8DFmode). */
4250 enum {
4251 max_bitsize = 512,
4252 value_bit = 8,
4253 value_mask = (1 << value_bit) - 1
4255 unsigned char value[max_bitsize / value_bit];
4256 int value_start;
4257 int i;
4258 int elem;
4260 int num_elem;
4261 rtx * elems;
4262 int elem_bitsize;
4263 rtx result_s;
4264 rtvec result_v = NULL;
4265 enum mode_class outer_class;
4266 enum machine_mode outer_submode;
4268 /* Some ports misuse CCmode. */
4269 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
4270 return op;
4272 /* We have no way to represent a complex constant at the rtl level. */
4273 if (COMPLEX_MODE_P (outermode))
4274 return NULL_RTX;
4276 /* Unpack the value. */
4278 if (GET_CODE (op) == CONST_VECTOR)
4280 num_elem = CONST_VECTOR_NUNITS (op);
4281 elems = &CONST_VECTOR_ELT (op, 0);
4282 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4284 else
4286 num_elem = 1;
4287 elems = &op;
4288 elem_bitsize = max_bitsize;
4290 /* If this asserts, it is too complicated; reducing value_bit may help. */
4291 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4292 /* I don't know how to handle endianness of sub-units. */
4293 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4295 for (elem = 0; elem < num_elem; elem++)
4297 unsigned char * vp;
4298 rtx el = elems[elem];
4300 /* Vectors are kept in target memory order. (This is probably
4301 a mistake.) */
4303 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4304 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4305 / BITS_PER_UNIT);
4306 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4307 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4308 unsigned bytele = (subword_byte % UNITS_PER_WORD
4309 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4310 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4313 switch (GET_CODE (el))
4315 case CONST_INT:
4316 for (i = 0;
4317 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4318 i += value_bit)
4319 *vp++ = INTVAL (el) >> i;
4320 /* CONST_INTs are always logically sign-extended. */
4321 for (; i < elem_bitsize; i += value_bit)
4322 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4323 break;
4325 case CONST_DOUBLE:
4326 if (GET_MODE (el) == VOIDmode)
4328 /* If this triggers, someone should have generated a
4329 CONST_INT instead. */
4330 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4332 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4333 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4334 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4336 *vp++
4337 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4338 i += value_bit;
4340 /* It shouldn't matter what's done here, so fill it with
4341 zero. */
4342 for (; i < elem_bitsize; i += value_bit)
4343 *vp++ = 0;
4345 else
4347 long tmp[max_bitsize / 32];
4348 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4350 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4351 gcc_assert (bitsize <= elem_bitsize);
4352 gcc_assert (bitsize % value_bit == 0);
4354 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4355 GET_MODE (el));
4357 /* real_to_target produces its result in words affected by
4358 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4359 and use WORDS_BIG_ENDIAN instead; see the documentation
4360 of SUBREG in rtl.texi. */
4361 for (i = 0; i < bitsize; i += value_bit)
4363 int ibase;
4364 if (WORDS_BIG_ENDIAN)
4365 ibase = bitsize - 1 - i;
4366 else
4367 ibase = i;
4368 *vp++ = tmp[ibase / 32] >> i % 32;
4371 /* It shouldn't matter what's done here, so fill it with
4372 zero. */
4373 for (; i < elem_bitsize; i += value_bit)
4374 *vp++ = 0;
4376 break;
4378 default:
4379 gcc_unreachable ();
4383 /* Now, pick the right byte to start with. */
4384 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4385 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4386 will already have offset 0. */
4387 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4389 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4390 - byte);
4391 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4392 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4393 byte = (subword_byte % UNITS_PER_WORD
4394 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4397 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4398 so if it's become negative it will instead be very large.) */
4399 gcc_assert (byte < GET_MODE_SIZE (innermode));
4401 /* Convert from bytes to chunks of size value_bit. */
4402 value_start = byte * (BITS_PER_UNIT / value_bit);
4404 /* Re-pack the value. */
4406 if (VECTOR_MODE_P (outermode))
4408 num_elem = GET_MODE_NUNITS (outermode);
4409 result_v = rtvec_alloc (num_elem);
4410 elems = &RTVEC_ELT (result_v, 0);
4411 outer_submode = GET_MODE_INNER (outermode);
4413 else
4415 num_elem = 1;
4416 elems = &result_s;
4417 outer_submode = outermode;
4420 outer_class = GET_MODE_CLASS (outer_submode);
4421 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
4423 gcc_assert (elem_bitsize % value_bit == 0);
4424 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
4426 for (elem = 0; elem < num_elem; elem++)
4428 unsigned char *vp;
4430 /* Vectors are stored in target memory order. (This is probably
4431 a mistake.) */
4433 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4434 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4435 / BITS_PER_UNIT);
4436 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4437 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4438 unsigned bytele = (subword_byte % UNITS_PER_WORD
4439 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4440 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
4443 switch (outer_class)
4445 case MODE_INT:
4446 case MODE_PARTIAL_INT:
4448 unsigned HOST_WIDE_INT hi = 0, lo = 0;
4450 for (i = 0;
4451 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4452 i += value_bit)
4453 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
4454 for (; i < elem_bitsize; i += value_bit)
4455 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
4456 << (i - HOST_BITS_PER_WIDE_INT));
4458 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4459 know why. */
4460 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4461 elems[elem] = gen_int_mode (lo, outer_submode);
4462 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
4463 elems[elem] = immed_double_const (lo, hi, outer_submode);
4464 else
4465 return NULL_RTX;
4467 break;
4469 case MODE_FLOAT:
4470 case MODE_DECIMAL_FLOAT:
4472 REAL_VALUE_TYPE r;
4473 long tmp[max_bitsize / 32];
4475 /* real_from_target wants its input in words affected by
4476 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4477 and use WORDS_BIG_ENDIAN instead; see the documentation
4478 of SUBREG in rtl.texi. */
4479 for (i = 0; i < max_bitsize / 32; i++)
4480 tmp[i] = 0;
4481 for (i = 0; i < elem_bitsize; i += value_bit)
4483 int ibase;
4484 if (WORDS_BIG_ENDIAN)
4485 ibase = elem_bitsize - 1 - i;
4486 else
4487 ibase = i;
4488 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
4491 real_from_target (&r, tmp, outer_submode);
4492 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
4494 break;
4496 default:
4497 gcc_unreachable ();
4500 if (VECTOR_MODE_P (outermode))
4501 return gen_rtx_CONST_VECTOR (outermode, result_v);
4502 else
4503 return result_s;
4506 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
4507 Return 0 if no simplifications are possible. */
4509 simplify_subreg (enum machine_mode outermode, rtx op,
4510 enum machine_mode innermode, unsigned int byte)
4512 /* Little bit of sanity checking. */
4513 gcc_assert (innermode != VOIDmode);
4514 gcc_assert (outermode != VOIDmode);
4515 gcc_assert (innermode != BLKmode);
4516 gcc_assert (outermode != BLKmode);
4518 gcc_assert (GET_MODE (op) == innermode
4519 || GET_MODE (op) == VOIDmode);
4521 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
4522 gcc_assert (byte < GET_MODE_SIZE (innermode));
4524 if (outermode == innermode && !byte)
4525 return op;
4527 if (GET_CODE (op) == CONST_INT
4528 || GET_CODE (op) == CONST_DOUBLE
4529 || GET_CODE (op) == CONST_VECTOR)
4530 return simplify_immed_subreg (outermode, op, innermode, byte);
4532 /* Changing mode twice with SUBREG => just change it once,
4533 or not at all if changing back op starting mode. */
4534 if (GET_CODE (op) == SUBREG)
4536 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
4537 int final_offset = byte + SUBREG_BYTE (op);
4538 rtx newx;
4540 if (outermode == innermostmode
4541 && byte == 0 && SUBREG_BYTE (op) == 0)
4542 return SUBREG_REG (op);
4544 /* The SUBREG_BYTE represents offset, as if the value were stored
4545 in memory. Irritating exception is paradoxical subreg, where
4546 we define SUBREG_BYTE to be 0. On big endian machines, this
4547 value should be negative. For a moment, undo this exception. */
4548 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4550 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
4551 if (WORDS_BIG_ENDIAN)
4552 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4553 if (BYTES_BIG_ENDIAN)
4554 final_offset += difference % UNITS_PER_WORD;
4556 if (SUBREG_BYTE (op) == 0
4557 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
4559 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
4560 if (WORDS_BIG_ENDIAN)
4561 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4562 if (BYTES_BIG_ENDIAN)
4563 final_offset += difference % UNITS_PER_WORD;
4566 /* See whether resulting subreg will be paradoxical. */
4567 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
4569 /* In nonparadoxical subregs we can't handle negative offsets. */
4570 if (final_offset < 0)
4571 return NULL_RTX;
4572 /* Bail out in case resulting subreg would be incorrect. */
4573 if (final_offset % GET_MODE_SIZE (outermode)
4574 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
4575 return NULL_RTX;
4577 else
4579 int offset = 0;
4580 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
4582 /* In paradoxical subreg, see if we are still looking on lower part.
4583 If so, our SUBREG_BYTE will be 0. */
4584 if (WORDS_BIG_ENDIAN)
4585 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4586 if (BYTES_BIG_ENDIAN)
4587 offset += difference % UNITS_PER_WORD;
4588 if (offset == final_offset)
4589 final_offset = 0;
4590 else
4591 return NULL_RTX;
4594 /* Recurse for further possible simplifications. */
4595 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
4596 final_offset);
4597 if (newx)
4598 return newx;
4599 if (validate_subreg (outermode, innermostmode,
4600 SUBREG_REG (op), final_offset))
4601 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
4602 return NULL_RTX;
4605 /* Merge implicit and explicit truncations. */
4607 if (GET_CODE (op) == TRUNCATE
4608 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
4609 && subreg_lowpart_offset (outermode, innermode) == byte)
4610 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
4611 GET_MODE (XEXP (op, 0)));
4613 /* SUBREG of a hard register => just change the register number
4614 and/or mode. If the hard register is not valid in that mode,
4615 suppress this simplification. If the hard register is the stack,
4616 frame, or argument pointer, leave this as a SUBREG. */
4618 if (REG_P (op)
4619 && REGNO (op) < FIRST_PSEUDO_REGISTER
4620 #ifdef CANNOT_CHANGE_MODE_CLASS
4621 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
4622 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
4623 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
4624 #endif
4625 && ((reload_completed && !frame_pointer_needed)
4626 || (REGNO (op) != FRAME_POINTER_REGNUM
4627 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
4628 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
4629 #endif
4631 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4632 && REGNO (op) != ARG_POINTER_REGNUM
4633 #endif
4634 && REGNO (op) != STACK_POINTER_REGNUM
4635 && subreg_offset_representable_p (REGNO (op), innermode,
4636 byte, outermode))
4638 unsigned int regno = REGNO (op);
4639 unsigned int final_regno
4640 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
4642 /* ??? We do allow it if the current REG is not valid for
4643 its mode. This is a kludge to work around how float/complex
4644 arguments are passed on 32-bit SPARC and should be fixed. */
4645 if (HARD_REGNO_MODE_OK (final_regno, outermode)
4646 || ! HARD_REGNO_MODE_OK (regno, innermode))
4648 rtx x;
4649 int final_offset = byte;
4651 /* Adjust offset for paradoxical subregs. */
4652 if (byte == 0
4653 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4655 int difference = (GET_MODE_SIZE (innermode)
4656 - GET_MODE_SIZE (outermode));
4657 if (WORDS_BIG_ENDIAN)
4658 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4659 if (BYTES_BIG_ENDIAN)
4660 final_offset += difference % UNITS_PER_WORD;
4663 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
4665 /* Propagate original regno. We don't have any way to specify
4666 the offset inside original regno, so do so only for lowpart.
4667 The information is used only by alias analysis that can not
4668 grog partial register anyway. */
4670 if (subreg_lowpart_offset (outermode, innermode) == byte)
4671 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
4672 return x;
4676 /* If we have a SUBREG of a register that we are replacing and we are
4677 replacing it with a MEM, make a new MEM and try replacing the
4678 SUBREG with it. Don't do this if the MEM has a mode-dependent address
4679 or if we would be widening it. */
4681 if (MEM_P (op)
4682 && ! mode_dependent_address_p (XEXP (op, 0))
4683 /* Allow splitting of volatile memory references in case we don't
4684 have instruction to move the whole thing. */
4685 && (! MEM_VOLATILE_P (op)
4686 || ! have_insn_for (SET, innermode))
4687 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
4688 return adjust_address_nv (op, outermode, byte);
4690 /* Handle complex values represented as CONCAT
4691 of real and imaginary part. */
4692 if (GET_CODE (op) == CONCAT)
4694 unsigned int part_size, final_offset;
4695 rtx part, res;
4697 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
4698 if (byte < part_size)
4700 part = XEXP (op, 0);
4701 final_offset = byte;
4703 else
4705 part = XEXP (op, 1);
4706 final_offset = byte - part_size;
4709 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
4710 return NULL_RTX;
4712 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
4713 if (res)
4714 return res;
4715 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
4716 return gen_rtx_SUBREG (outermode, part, final_offset);
4717 return NULL_RTX;
4720 /* Optimize SUBREG truncations of zero and sign extended values. */
4721 if ((GET_CODE (op) == ZERO_EXTEND
4722 || GET_CODE (op) == SIGN_EXTEND)
4723 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
4725 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
4727 /* If we're requesting the lowpart of a zero or sign extension,
4728 there are three possibilities. If the outermode is the same
4729 as the origmode, we can omit both the extension and the subreg.
4730 If the outermode is not larger than the origmode, we can apply
4731 the truncation without the extension. Finally, if the outermode
4732 is larger than the origmode, but both are integer modes, we
4733 can just extend to the appropriate mode. */
4734 if (bitpos == 0)
4736 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
4737 if (outermode == origmode)
4738 return XEXP (op, 0);
4739 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
4740 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
4741 subreg_lowpart_offset (outermode,
4742 origmode));
4743 if (SCALAR_INT_MODE_P (outermode))
4744 return simplify_gen_unary (GET_CODE (op), outermode,
4745 XEXP (op, 0), origmode);
4748 /* A SUBREG resulting from a zero extension may fold to zero if
4749 it extracts higher bits that the ZERO_EXTEND's source bits. */
4750 if (GET_CODE (op) == ZERO_EXTEND
4751 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
4752 return CONST0_RTX (outermode);
4755 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
4756 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
4757 the outer subreg is effectively a truncation to the original mode. */
4758 if ((GET_CODE (op) == LSHIFTRT
4759 || GET_CODE (op) == ASHIFTRT)
4760 && SCALAR_INT_MODE_P (outermode)
4761 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
4762 to avoid the possibility that an outer LSHIFTRT shifts by more
4763 than the sign extension's sign_bit_copies and introduces zeros
4764 into the high bits of the result. */
4765 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
4766 && GET_CODE (XEXP (op, 1)) == CONST_INT
4767 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
4768 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4769 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4770 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4771 return simplify_gen_binary (ASHIFTRT, outermode,
4772 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4774 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
4775 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
4776 the outer subreg is effectively a truncation to the original mode. */
4777 if ((GET_CODE (op) == LSHIFTRT
4778 || GET_CODE (op) == ASHIFTRT)
4779 && SCALAR_INT_MODE_P (outermode)
4780 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4781 && GET_CODE (XEXP (op, 1)) == CONST_INT
4782 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4783 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4784 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4785 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4786 return simplify_gen_binary (LSHIFTRT, outermode,
4787 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4789 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
4790 to (ashift:QI (x:QI) C), where C is a suitable small constant and
4791 the outer subreg is effectively a truncation to the original mode. */
4792 if (GET_CODE (op) == ASHIFT
4793 && SCALAR_INT_MODE_P (outermode)
4794 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4795 && GET_CODE (XEXP (op, 1)) == CONST_INT
4796 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4797 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
4798 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4799 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4800 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4801 return simplify_gen_binary (ASHIFT, outermode,
4802 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4804 return NULL_RTX;
4807 /* Make a SUBREG operation or equivalent if it folds. */
4810 simplify_gen_subreg (enum machine_mode outermode, rtx op,
4811 enum machine_mode innermode, unsigned int byte)
4813 rtx newx;
4815 newx = simplify_subreg (outermode, op, innermode, byte);
4816 if (newx)
4817 return newx;
4819 if (GET_CODE (op) == SUBREG
4820 || GET_CODE (op) == CONCAT
4821 || GET_MODE (op) == VOIDmode)
4822 return NULL_RTX;
4824 if (validate_subreg (outermode, innermode, op, byte))
4825 return gen_rtx_SUBREG (outermode, op, byte);
4827 return NULL_RTX;
4830 /* Simplify X, an rtx expression.
4832 Return the simplified expression or NULL if no simplifications
4833 were possible.
4835 This is the preferred entry point into the simplification routines;
4836 however, we still allow passes to call the more specific routines.
4838 Right now GCC has three (yes, three) major bodies of RTL simplification
4839 code that need to be unified.
4841 1. fold_rtx in cse.c. This code uses various CSE specific
4842 information to aid in RTL simplification.
4844 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
4845 it uses combine specific information to aid in RTL
4846 simplification.
4848 3. The routines in this file.
4851 Long term we want to only have one body of simplification code; to
4852 get to that state I recommend the following steps:
4854 1. Pour over fold_rtx & simplify_rtx and move any simplifications
4855 which are not pass dependent state into these routines.
4857 2. As code is moved by #1, change fold_rtx & simplify_rtx to
4858 use this routine whenever possible.
4860 3. Allow for pass dependent state to be provided to these
4861 routines and add simplifications based on the pass dependent
4862 state. Remove code from cse.c & combine.c that becomes
4863 redundant/dead.
4865 It will take time, but ultimately the compiler will be easier to
4866 maintain and improve. It's totally silly that when we add a
4867 simplification that it needs to be added to 4 places (3 for RTL
4868 simplification and 1 for tree simplification. */
4871 simplify_rtx (rtx x)
4873 enum rtx_code code = GET_CODE (x);
4874 enum machine_mode mode = GET_MODE (x);
4876 switch (GET_RTX_CLASS (code))
4878 case RTX_UNARY:
4879 return simplify_unary_operation (code, mode,
4880 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
4881 case RTX_COMM_ARITH:
4882 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
4883 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
4885 /* Fall through.... */
4887 case RTX_BIN_ARITH:
4888 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
4890 case RTX_TERNARY:
4891 case RTX_BITFIELD_OPS:
4892 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
4893 XEXP (x, 0), XEXP (x, 1),
4894 XEXP (x, 2));
4896 case RTX_COMPARE:
4897 case RTX_COMM_COMPARE:
4898 return simplify_relational_operation (code, mode,
4899 ((GET_MODE (XEXP (x, 0))
4900 != VOIDmode)
4901 ? GET_MODE (XEXP (x, 0))
4902 : GET_MODE (XEXP (x, 1))),
4903 XEXP (x, 0),
4904 XEXP (x, 1));
4906 case RTX_EXTRA:
4907 if (code == SUBREG)
4908 return simplify_subreg (mode, SUBREG_REG (x),
4909 GET_MODE (SUBREG_REG (x)),
4910 SUBREG_BYTE (x));
4911 break;
4913 case RTX_OBJ:
4914 if (code == LO_SUM)
4916 /* Convert (lo_sum (high FOO) FOO) to FOO. */
4917 if (GET_CODE (XEXP (x, 0)) == HIGH
4918 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
4919 return XEXP (x, 1);
4921 break;
4923 default:
4924 break;
4926 return NULL;