Updated for libbid move.
[official-gcc.git] / gcc / simplify-rtx.c
blob9b27bbdbd70be96716c36766da0a4a95e9c03a34
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
21 02110-1301, USA. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "tm_p.h"
31 #include "regs.h"
32 #include "hard-reg-set.h"
33 #include "flags.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "recog.h"
37 #include "function.h"
38 #include "expr.h"
39 #include "toplev.h"
40 #include "output.h"
41 #include "ggc.h"
42 #include "target.h"
44 /* Simplification and canonicalization of RTL. */
46 /* Much code operates on (low, high) pairs; the low value is an
47 unsigned wide int, the high value a signed wide int. We
48 occasionally need to sign extend from low to high as if low were a
49 signed wide int. */
50 #define HWI_SIGN_EXTEND(low) \
51 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
53 static rtx neg_const_int (enum machine_mode, rtx);
54 static bool plus_minus_operand_p (rtx);
55 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
56 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
57 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
58 unsigned int);
59 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
60 rtx, rtx);
61 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
62 enum machine_mode, rtx, rtx);
63 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
64 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
65 rtx, rtx, rtx, rtx);
67 /* Negate a CONST_INT rtx, truncating (because a conversion from a
68 maximally negative number can overflow). */
69 static rtx
70 neg_const_int (enum machine_mode mode, rtx i)
72 return gen_int_mode (- INTVAL (i), mode);
75 /* Test whether expression, X, is an immediate constant that represents
76 the most significant bit of machine mode MODE. */
78 bool
79 mode_signbit_p (enum machine_mode mode, rtx x)
81 unsigned HOST_WIDE_INT val;
82 unsigned int width;
84 if (GET_MODE_CLASS (mode) != MODE_INT)
85 return false;
87 width = GET_MODE_BITSIZE (mode);
88 if (width == 0)
89 return false;
91 if (width <= HOST_BITS_PER_WIDE_INT
92 && GET_CODE (x) == CONST_INT)
93 val = INTVAL (x);
94 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
95 && GET_CODE (x) == CONST_DOUBLE
96 && CONST_DOUBLE_LOW (x) == 0)
98 val = CONST_DOUBLE_HIGH (x);
99 width -= HOST_BITS_PER_WIDE_INT;
101 else
102 return false;
104 if (width < HOST_BITS_PER_WIDE_INT)
105 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
106 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
109 /* Make a binary operation by properly ordering the operands and
110 seeing if the expression folds. */
113 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
114 rtx op1)
116 rtx tem;
118 /* If this simplifies, do it. */
119 tem = simplify_binary_operation (code, mode, op0, op1);
120 if (tem)
121 return tem;
123 /* Put complex operands first and constants second if commutative. */
124 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
125 && swap_commutative_operands_p (op0, op1))
126 tem = op0, op0 = op1, op1 = tem;
128 return gen_rtx_fmt_ee (code, mode, op0, op1);
131 /* If X is a MEM referencing the constant pool, return the real value.
132 Otherwise return X. */
134 avoid_constant_pool_reference (rtx x)
136 rtx c, tmp, addr;
137 enum machine_mode cmode;
138 HOST_WIDE_INT offset = 0;
140 switch (GET_CODE (x))
142 case MEM:
143 break;
145 case FLOAT_EXTEND:
146 /* Handle float extensions of constant pool references. */
147 tmp = XEXP (x, 0);
148 c = avoid_constant_pool_reference (tmp);
149 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
151 REAL_VALUE_TYPE d;
153 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
154 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
156 return x;
158 default:
159 return x;
162 if (GET_MODE (x) == BLKmode)
163 return x;
165 addr = XEXP (x, 0);
167 /* Call target hook to avoid the effects of -fpic etc.... */
168 addr = targetm.delegitimize_address (addr);
170 /* Split the address into a base and integer offset. */
171 if (GET_CODE (addr) == CONST
172 && GET_CODE (XEXP (addr, 0)) == PLUS
173 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
175 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
176 addr = XEXP (XEXP (addr, 0), 0);
179 if (GET_CODE (addr) == LO_SUM)
180 addr = XEXP (addr, 1);
182 /* If this is a constant pool reference, we can turn it into its
183 constant and hope that simplifications happen. */
184 if (GET_CODE (addr) == SYMBOL_REF
185 && CONSTANT_POOL_ADDRESS_P (addr))
187 c = get_pool_constant (addr);
188 cmode = get_pool_mode (addr);
190 /* If we're accessing the constant in a different mode than it was
191 originally stored, attempt to fix that up via subreg simplifications.
192 If that fails we have no choice but to return the original memory. */
193 if (offset != 0 || cmode != GET_MODE (x))
195 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
196 if (tem && CONSTANT_P (tem))
197 return tem;
199 else
200 return c;
203 return x;
206 /* Make a unary operation by first seeing if it folds and otherwise making
207 the specified operation. */
210 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
211 enum machine_mode op_mode)
213 rtx tem;
215 /* If this simplifies, use it. */
216 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
217 return tem;
219 return gen_rtx_fmt_e (code, mode, op);
222 /* Likewise for ternary operations. */
225 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
226 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
228 rtx tem;
230 /* If this simplifies, use it. */
231 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
232 op0, op1, op2)))
233 return tem;
235 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
238 /* Likewise, for relational operations.
239 CMP_MODE specifies mode comparison is done in. */
242 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
243 enum machine_mode cmp_mode, rtx op0, rtx op1)
245 rtx tem;
247 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
248 op0, op1)))
249 return tem;
251 return gen_rtx_fmt_ee (code, mode, op0, op1);
254 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
255 resulting RTX. Return a new RTX which is as simplified as possible. */
258 simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
260 enum rtx_code code = GET_CODE (x);
261 enum machine_mode mode = GET_MODE (x);
262 enum machine_mode op_mode;
263 rtx op0, op1, op2;
265 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
266 to build a new expression substituting recursively. If we can't do
267 anything, return our input. */
269 if (x == old_rtx)
270 return new_rtx;
272 switch (GET_RTX_CLASS (code))
274 case RTX_UNARY:
275 op0 = XEXP (x, 0);
276 op_mode = GET_MODE (op0);
277 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
278 if (op0 == XEXP (x, 0))
279 return x;
280 return simplify_gen_unary (code, mode, op0, op_mode);
282 case RTX_BIN_ARITH:
283 case RTX_COMM_ARITH:
284 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
285 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
286 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
287 return x;
288 return simplify_gen_binary (code, mode, op0, op1);
290 case RTX_COMPARE:
291 case RTX_COMM_COMPARE:
292 op0 = XEXP (x, 0);
293 op1 = XEXP (x, 1);
294 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
295 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
296 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
297 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
298 return x;
299 return simplify_gen_relational (code, mode, op_mode, op0, op1);
301 case RTX_TERNARY:
302 case RTX_BITFIELD_OPS:
303 op0 = XEXP (x, 0);
304 op_mode = GET_MODE (op0);
305 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
306 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
307 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
308 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
309 return x;
310 if (op_mode == VOIDmode)
311 op_mode = GET_MODE (op0);
312 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
314 case RTX_EXTRA:
315 /* The only case we try to handle is a SUBREG. */
316 if (code == SUBREG)
318 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
319 if (op0 == SUBREG_REG (x))
320 return x;
321 op0 = simplify_gen_subreg (GET_MODE (x), op0,
322 GET_MODE (SUBREG_REG (x)),
323 SUBREG_BYTE (x));
324 return op0 ? op0 : x;
326 break;
328 case RTX_OBJ:
329 if (code == MEM)
331 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
332 if (op0 == XEXP (x, 0))
333 return x;
334 return replace_equiv_address_nv (x, op0);
336 else if (code == LO_SUM)
338 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
339 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
341 /* (lo_sum (high x) x) -> x */
342 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
343 return op1;
345 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
346 return x;
347 return gen_rtx_LO_SUM (mode, op0, op1);
349 else if (code == REG)
351 if (rtx_equal_p (x, old_rtx))
352 return new_rtx;
354 break;
356 default:
357 break;
359 return x;
362 /* Try to simplify a unary operation CODE whose output mode is to be
363 MODE with input operand OP whose mode was originally OP_MODE.
364 Return zero if no simplification can be made. */
366 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
367 rtx op, enum machine_mode op_mode)
369 rtx trueop, tem;
371 if (GET_CODE (op) == CONST)
372 op = XEXP (op, 0);
374 trueop = avoid_constant_pool_reference (op);
376 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
377 if (tem)
378 return tem;
380 return simplify_unary_operation_1 (code, mode, op);
383 /* Perform some simplifications we can do even if the operands
384 aren't constant. */
385 static rtx
386 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
388 enum rtx_code reversed;
389 rtx temp;
391 switch (code)
393 case NOT:
394 /* (not (not X)) == X. */
395 if (GET_CODE (op) == NOT)
396 return XEXP (op, 0);
398 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
399 comparison is all ones. */
400 if (COMPARISON_P (op)
401 && (mode == BImode || STORE_FLAG_VALUE == -1)
402 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
403 return simplify_gen_relational (reversed, mode, VOIDmode,
404 XEXP (op, 0), XEXP (op, 1));
406 /* (not (plus X -1)) can become (neg X). */
407 if (GET_CODE (op) == PLUS
408 && XEXP (op, 1) == constm1_rtx)
409 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
411 /* Similarly, (not (neg X)) is (plus X -1). */
412 if (GET_CODE (op) == NEG)
413 return plus_constant (XEXP (op, 0), -1);
415 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
416 if (GET_CODE (op) == XOR
417 && GET_CODE (XEXP (op, 1)) == CONST_INT
418 && (temp = simplify_unary_operation (NOT, mode,
419 XEXP (op, 1), mode)) != 0)
420 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
422 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
423 if (GET_CODE (op) == PLUS
424 && GET_CODE (XEXP (op, 1)) == CONST_INT
425 && mode_signbit_p (mode, XEXP (op, 1))
426 && (temp = simplify_unary_operation (NOT, mode,
427 XEXP (op, 1), mode)) != 0)
428 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
431 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
432 operands other than 1, but that is not valid. We could do a
433 similar simplification for (not (lshiftrt C X)) where C is
434 just the sign bit, but this doesn't seem common enough to
435 bother with. */
436 if (GET_CODE (op) == ASHIFT
437 && XEXP (op, 0) == const1_rtx)
439 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
440 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
443 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
444 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
445 so we can perform the above simplification. */
447 if (STORE_FLAG_VALUE == -1
448 && GET_CODE (op) == ASHIFTRT
449 && GET_CODE (XEXP (op, 1)) == CONST_INT
450 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
451 return simplify_gen_relational (GE, mode, VOIDmode,
452 XEXP (op, 0), const0_rtx);
455 if (GET_CODE (op) == SUBREG
456 && subreg_lowpart_p (op)
457 && (GET_MODE_SIZE (GET_MODE (op))
458 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
459 && GET_CODE (SUBREG_REG (op)) == ASHIFT
460 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
462 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
463 rtx x;
465 x = gen_rtx_ROTATE (inner_mode,
466 simplify_gen_unary (NOT, inner_mode, const1_rtx,
467 inner_mode),
468 XEXP (SUBREG_REG (op), 1));
469 return rtl_hooks.gen_lowpart_no_emit (mode, x);
472 /* Apply De Morgan's laws to reduce number of patterns for machines
473 with negating logical insns (and-not, nand, etc.). If result has
474 only one NOT, put it first, since that is how the patterns are
475 coded. */
477 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
479 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
480 enum machine_mode op_mode;
482 op_mode = GET_MODE (in1);
483 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
485 op_mode = GET_MODE (in2);
486 if (op_mode == VOIDmode)
487 op_mode = mode;
488 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
490 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
492 rtx tem = in2;
493 in2 = in1; in1 = tem;
496 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
497 mode, in1, in2);
499 break;
501 case NEG:
502 /* (neg (neg X)) == X. */
503 if (GET_CODE (op) == NEG)
504 return XEXP (op, 0);
506 /* (neg (plus X 1)) can become (not X). */
507 if (GET_CODE (op) == PLUS
508 && XEXP (op, 1) == const1_rtx)
509 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
511 /* Similarly, (neg (not X)) is (plus X 1). */
512 if (GET_CODE (op) == NOT)
513 return plus_constant (XEXP (op, 0), 1);
515 /* (neg (minus X Y)) can become (minus Y X). This transformation
516 isn't safe for modes with signed zeros, since if X and Y are
517 both +0, (minus Y X) is the same as (minus X Y). If the
518 rounding mode is towards +infinity (or -infinity) then the two
519 expressions will be rounded differently. */
520 if (GET_CODE (op) == MINUS
521 && !HONOR_SIGNED_ZEROS (mode)
522 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
523 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
525 if (GET_CODE (op) == PLUS
526 && !HONOR_SIGNED_ZEROS (mode)
527 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
529 /* (neg (plus A C)) is simplified to (minus -C A). */
530 if (GET_CODE (XEXP (op, 1)) == CONST_INT
531 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
533 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
534 if (temp)
535 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
538 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
539 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
540 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
543 /* (neg (mult A B)) becomes (mult (neg A) B).
544 This works even for floating-point values. */
545 if (GET_CODE (op) == MULT
546 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
548 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
549 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
552 /* NEG commutes with ASHIFT since it is multiplication. Only do
553 this if we can then eliminate the NEG (e.g., if the operand
554 is a constant). */
555 if (GET_CODE (op) == ASHIFT)
557 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
558 if (temp)
559 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
562 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
563 C is equal to the width of MODE minus 1. */
564 if (GET_CODE (op) == ASHIFTRT
565 && GET_CODE (XEXP (op, 1)) == CONST_INT
566 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
567 return simplify_gen_binary (LSHIFTRT, mode,
568 XEXP (op, 0), XEXP (op, 1));
570 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
571 C is equal to the width of MODE minus 1. */
572 if (GET_CODE (op) == LSHIFTRT
573 && GET_CODE (XEXP (op, 1)) == CONST_INT
574 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
575 return simplify_gen_binary (ASHIFTRT, mode,
576 XEXP (op, 0), XEXP (op, 1));
578 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
579 if (GET_CODE (op) == XOR
580 && XEXP (op, 1) == const1_rtx
581 && nonzero_bits (XEXP (op, 0), mode) == 1)
582 return plus_constant (XEXP (op, 0), -1);
584 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
585 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
586 if (GET_CODE (op) == LT
587 && XEXP (op, 1) == const0_rtx)
589 enum machine_mode inner = GET_MODE (XEXP (op, 0));
590 int isize = GET_MODE_BITSIZE (inner);
591 if (STORE_FLAG_VALUE == 1)
593 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
594 GEN_INT (isize - 1));
595 if (mode == inner)
596 return temp;
597 if (GET_MODE_BITSIZE (mode) > isize)
598 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
599 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
601 else if (STORE_FLAG_VALUE == -1)
603 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
604 GEN_INT (isize - 1));
605 if (mode == inner)
606 return temp;
607 if (GET_MODE_BITSIZE (mode) > isize)
608 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
609 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
612 break;
614 case TRUNCATE:
615 /* We can't handle truncation to a partial integer mode here
616 because we don't know the real bitsize of the partial
617 integer mode. */
618 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
619 break;
621 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
622 if ((GET_CODE (op) == SIGN_EXTEND
623 || GET_CODE (op) == ZERO_EXTEND)
624 && GET_MODE (XEXP (op, 0)) == mode)
625 return XEXP (op, 0);
627 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
628 (OP:SI foo:SI) if OP is NEG or ABS. */
629 if ((GET_CODE (op) == ABS
630 || GET_CODE (op) == NEG)
631 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
632 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
633 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
634 return simplify_gen_unary (GET_CODE (op), mode,
635 XEXP (XEXP (op, 0), 0), mode);
637 /* (truncate:A (subreg:B (truncate:C X) 0)) is
638 (truncate:A X). */
639 if (GET_CODE (op) == SUBREG
640 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
641 && subreg_lowpart_p (op))
642 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
643 GET_MODE (XEXP (SUBREG_REG (op), 0)));
645 /* If we know that the value is already truncated, we can
646 replace the TRUNCATE with a SUBREG. Note that this is also
647 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
648 modes we just have to apply a different definition for
649 truncation. But don't do this for an (LSHIFTRT (MULT ...))
650 since this will cause problems with the umulXi3_highpart
651 patterns. */
652 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
653 GET_MODE_BITSIZE (GET_MODE (op)))
654 ? (num_sign_bit_copies (op, GET_MODE (op))
655 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
656 - GET_MODE_BITSIZE (mode)))
657 : truncated_to_mode (mode, op))
658 && ! (GET_CODE (op) == LSHIFTRT
659 && GET_CODE (XEXP (op, 0)) == MULT))
660 return rtl_hooks.gen_lowpart_no_emit (mode, op);
662 /* A truncate of a comparison can be replaced with a subreg if
663 STORE_FLAG_VALUE permits. This is like the previous test,
664 but it works even if the comparison is done in a mode larger
665 than HOST_BITS_PER_WIDE_INT. */
666 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
667 && COMPARISON_P (op)
668 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
669 return rtl_hooks.gen_lowpart_no_emit (mode, op);
670 break;
672 case FLOAT_TRUNCATE:
673 if (DECIMAL_FLOAT_MODE_P (mode))
674 break;
676 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
677 if (GET_CODE (op) == FLOAT_EXTEND
678 && GET_MODE (XEXP (op, 0)) == mode)
679 return XEXP (op, 0);
681 /* (float_truncate:SF (float_truncate:DF foo:XF))
682 = (float_truncate:SF foo:XF).
683 This may eliminate double rounding, so it is unsafe.
685 (float_truncate:SF (float_extend:XF foo:DF))
686 = (float_truncate:SF foo:DF).
688 (float_truncate:DF (float_extend:XF foo:SF))
689 = (float_extend:SF foo:DF). */
690 if ((GET_CODE (op) == FLOAT_TRUNCATE
691 && flag_unsafe_math_optimizations)
692 || GET_CODE (op) == FLOAT_EXTEND)
693 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
694 0)))
695 > GET_MODE_SIZE (mode)
696 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
697 mode,
698 XEXP (op, 0), mode);
700 /* (float_truncate (float x)) is (float x) */
701 if (GET_CODE (op) == FLOAT
702 && (flag_unsafe_math_optimizations
703 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
704 && ((unsigned)significand_size (GET_MODE (op))
705 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
706 - num_sign_bit_copies (XEXP (op, 0),
707 GET_MODE (XEXP (op, 0))))))))
708 return simplify_gen_unary (FLOAT, mode,
709 XEXP (op, 0),
710 GET_MODE (XEXP (op, 0)));
712 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
713 (OP:SF foo:SF) if OP is NEG or ABS. */
714 if ((GET_CODE (op) == ABS
715 || GET_CODE (op) == NEG)
716 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
717 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
718 return simplify_gen_unary (GET_CODE (op), mode,
719 XEXP (XEXP (op, 0), 0), mode);
721 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
722 is (float_truncate:SF x). */
723 if (GET_CODE (op) == SUBREG
724 && subreg_lowpart_p (op)
725 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
726 return SUBREG_REG (op);
727 break;
729 case FLOAT_EXTEND:
730 if (DECIMAL_FLOAT_MODE_P (mode))
731 break;
733 /* (float_extend (float_extend x)) is (float_extend x)
735 (float_extend (float x)) is (float x) assuming that double
736 rounding can't happen.
738 if (GET_CODE (op) == FLOAT_EXTEND
739 || (GET_CODE (op) == FLOAT
740 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
741 && ((unsigned)significand_size (GET_MODE (op))
742 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
743 - num_sign_bit_copies (XEXP (op, 0),
744 GET_MODE (XEXP (op, 0)))))))
745 return simplify_gen_unary (GET_CODE (op), mode,
746 XEXP (op, 0),
747 GET_MODE (XEXP (op, 0)));
749 break;
751 case ABS:
752 /* (abs (neg <foo>)) -> (abs <foo>) */
753 if (GET_CODE (op) == NEG)
754 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
755 GET_MODE (XEXP (op, 0)));
757 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
758 do nothing. */
759 if (GET_MODE (op) == VOIDmode)
760 break;
762 /* If operand is something known to be positive, ignore the ABS. */
763 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
764 || ((GET_MODE_BITSIZE (GET_MODE (op))
765 <= HOST_BITS_PER_WIDE_INT)
766 && ((nonzero_bits (op, GET_MODE (op))
767 & ((HOST_WIDE_INT) 1
768 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
769 == 0)))
770 return op;
772 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
773 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
774 return gen_rtx_NEG (mode, op);
776 break;
778 case FFS:
779 /* (ffs (*_extend <X>)) = (ffs <X>) */
780 if (GET_CODE (op) == SIGN_EXTEND
781 || GET_CODE (op) == ZERO_EXTEND)
782 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
783 GET_MODE (XEXP (op, 0)));
784 break;
786 case POPCOUNT:
787 switch (GET_CODE (op))
789 case BSWAP:
790 case ZERO_EXTEND:
791 /* (popcount (zero_extend <X>)) = (popcount <X>) */
792 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
793 GET_MODE (XEXP (op, 0)));
795 case ROTATE:
796 case ROTATERT:
797 /* Rotations don't affect popcount. */
798 if (!side_effects_p (XEXP (op, 1)))
799 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
800 GET_MODE (XEXP (op, 0)));
801 break;
803 default:
804 break;
806 break;
808 case PARITY:
809 switch (GET_CODE (op))
811 case NOT:
812 case BSWAP:
813 case ZERO_EXTEND:
814 case SIGN_EXTEND:
815 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
816 GET_MODE (XEXP (op, 0)));
818 case ROTATE:
819 case ROTATERT:
820 /* Rotations don't affect parity. */
821 if (!side_effects_p (XEXP (op, 1)))
822 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
823 GET_MODE (XEXP (op, 0)));
824 break;
826 default:
827 break;
829 break;
831 case BSWAP:
832 /* (bswap (bswap x)) -> x. */
833 if (GET_CODE (op) == BSWAP)
834 return XEXP (op, 0);
835 break;
837 case FLOAT:
838 /* (float (sign_extend <X>)) = (float <X>). */
839 if (GET_CODE (op) == SIGN_EXTEND)
840 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
841 GET_MODE (XEXP (op, 0)));
842 break;
844 case SIGN_EXTEND:
845 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
846 becomes just the MINUS if its mode is MODE. This allows
847 folding switch statements on machines using casesi (such as
848 the VAX). */
849 if (GET_CODE (op) == TRUNCATE
850 && GET_MODE (XEXP (op, 0)) == mode
851 && GET_CODE (XEXP (op, 0)) == MINUS
852 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
853 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
854 return XEXP (op, 0);
856 /* Check for a sign extension of a subreg of a promoted
857 variable, where the promotion is sign-extended, and the
858 target mode is the same as the variable's promotion. */
859 if (GET_CODE (op) == SUBREG
860 && SUBREG_PROMOTED_VAR_P (op)
861 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
862 && GET_MODE (XEXP (op, 0)) == mode)
863 return XEXP (op, 0);
865 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
866 if (! POINTERS_EXTEND_UNSIGNED
867 && mode == Pmode && GET_MODE (op) == ptr_mode
868 && (CONSTANT_P (op)
869 || (GET_CODE (op) == SUBREG
870 && REG_P (SUBREG_REG (op))
871 && REG_POINTER (SUBREG_REG (op))
872 && GET_MODE (SUBREG_REG (op)) == Pmode)))
873 return convert_memory_address (Pmode, op);
874 #endif
875 break;
877 case ZERO_EXTEND:
878 /* Check for a zero extension of a subreg of a promoted
879 variable, where the promotion is zero-extended, and the
880 target mode is the same as the variable's promotion. */
881 if (GET_CODE (op) == SUBREG
882 && SUBREG_PROMOTED_VAR_P (op)
883 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
884 && GET_MODE (XEXP (op, 0)) == mode)
885 return XEXP (op, 0);
887 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
888 if (POINTERS_EXTEND_UNSIGNED > 0
889 && mode == Pmode && GET_MODE (op) == ptr_mode
890 && (CONSTANT_P (op)
891 || (GET_CODE (op) == SUBREG
892 && REG_P (SUBREG_REG (op))
893 && REG_POINTER (SUBREG_REG (op))
894 && GET_MODE (SUBREG_REG (op)) == Pmode)))
895 return convert_memory_address (Pmode, op);
896 #endif
897 break;
899 default:
900 break;
903 return 0;
906 /* Try to compute the value of a unary operation CODE whose output mode is to
907 be MODE with input operand OP whose mode was originally OP_MODE.
908 Return zero if the value cannot be computed. */
910 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
911 rtx op, enum machine_mode op_mode)
913 unsigned int width = GET_MODE_BITSIZE (mode);
915 if (code == VEC_DUPLICATE)
917 gcc_assert (VECTOR_MODE_P (mode));
918 if (GET_MODE (op) != VOIDmode)
920 if (!VECTOR_MODE_P (GET_MODE (op)))
921 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
922 else
923 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
924 (GET_MODE (op)));
926 if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
927 || GET_CODE (op) == CONST_VECTOR)
929 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
930 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
931 rtvec v = rtvec_alloc (n_elts);
932 unsigned int i;
934 if (GET_CODE (op) != CONST_VECTOR)
935 for (i = 0; i < n_elts; i++)
936 RTVEC_ELT (v, i) = op;
937 else
939 enum machine_mode inmode = GET_MODE (op);
940 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
941 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
943 gcc_assert (in_n_elts < n_elts);
944 gcc_assert ((n_elts % in_n_elts) == 0);
945 for (i = 0; i < n_elts; i++)
946 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
948 return gen_rtx_CONST_VECTOR (mode, v);
952 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
954 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
955 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
956 enum machine_mode opmode = GET_MODE (op);
957 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
958 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
959 rtvec v = rtvec_alloc (n_elts);
960 unsigned int i;
962 gcc_assert (op_n_elts == n_elts);
963 for (i = 0; i < n_elts; i++)
965 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
966 CONST_VECTOR_ELT (op, i),
967 GET_MODE_INNER (opmode));
968 if (!x)
969 return 0;
970 RTVEC_ELT (v, i) = x;
972 return gen_rtx_CONST_VECTOR (mode, v);
975 /* The order of these tests is critical so that, for example, we don't
976 check the wrong mode (input vs. output) for a conversion operation,
977 such as FIX. At some point, this should be simplified. */
979 if (code == FLOAT && GET_MODE (op) == VOIDmode
980 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
982 HOST_WIDE_INT hv, lv;
983 REAL_VALUE_TYPE d;
985 if (GET_CODE (op) == CONST_INT)
986 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
987 else
988 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
990 REAL_VALUE_FROM_INT (d, lv, hv, mode);
991 d = real_value_truncate (mode, d);
992 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
994 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
995 && (GET_CODE (op) == CONST_DOUBLE
996 || GET_CODE (op) == CONST_INT))
998 HOST_WIDE_INT hv, lv;
999 REAL_VALUE_TYPE d;
1001 if (GET_CODE (op) == CONST_INT)
1002 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1003 else
1004 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1006 if (op_mode == VOIDmode)
1008 /* We don't know how to interpret negative-looking numbers in
1009 this case, so don't try to fold those. */
1010 if (hv < 0)
1011 return 0;
1013 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
1015 else
1016 hv = 0, lv &= GET_MODE_MASK (op_mode);
1018 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1019 d = real_value_truncate (mode, d);
1020 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1023 if (GET_CODE (op) == CONST_INT
1024 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1026 HOST_WIDE_INT arg0 = INTVAL (op);
1027 HOST_WIDE_INT val;
1029 switch (code)
1031 case NOT:
1032 val = ~ arg0;
1033 break;
1035 case NEG:
1036 val = - arg0;
1037 break;
1039 case ABS:
1040 val = (arg0 >= 0 ? arg0 : - arg0);
1041 break;
1043 case FFS:
1044 /* Don't use ffs here. Instead, get low order bit and then its
1045 number. If arg0 is zero, this will return 0, as desired. */
1046 arg0 &= GET_MODE_MASK (mode);
1047 val = exact_log2 (arg0 & (- arg0)) + 1;
1048 break;
1050 case CLZ:
1051 arg0 &= GET_MODE_MASK (mode);
1052 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1054 else
1055 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1056 break;
1058 case CTZ:
1059 arg0 &= GET_MODE_MASK (mode);
1060 if (arg0 == 0)
1062 /* Even if the value at zero is undefined, we have to come
1063 up with some replacement. Seems good enough. */
1064 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1065 val = GET_MODE_BITSIZE (mode);
1067 else
1068 val = exact_log2 (arg0 & -arg0);
1069 break;
1071 case POPCOUNT:
1072 arg0 &= GET_MODE_MASK (mode);
1073 val = 0;
1074 while (arg0)
1075 val++, arg0 &= arg0 - 1;
1076 break;
1078 case PARITY:
1079 arg0 &= GET_MODE_MASK (mode);
1080 val = 0;
1081 while (arg0)
1082 val++, arg0 &= arg0 - 1;
1083 val &= 1;
1084 break;
1086 case BSWAP:
1088 unsigned int s;
1090 val = 0;
1091 for (s = 0; s < width; s += 8)
1093 unsigned int d = width - s - 8;
1094 unsigned HOST_WIDE_INT byte;
1095 byte = (arg0 >> s) & 0xff;
1096 val |= byte << d;
1099 break;
1101 case TRUNCATE:
1102 val = arg0;
1103 break;
1105 case ZERO_EXTEND:
1106 /* When zero-extending a CONST_INT, we need to know its
1107 original mode. */
1108 gcc_assert (op_mode != VOIDmode);
1109 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1111 /* If we were really extending the mode,
1112 we would have to distinguish between zero-extension
1113 and sign-extension. */
1114 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1115 val = arg0;
1117 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1118 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1119 else
1120 return 0;
1121 break;
1123 case SIGN_EXTEND:
1124 if (op_mode == VOIDmode)
1125 op_mode = mode;
1126 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1128 /* If we were really extending the mode,
1129 we would have to distinguish between zero-extension
1130 and sign-extension. */
1131 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1132 val = arg0;
1134 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1137 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1138 if (val
1139 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1140 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1142 else
1143 return 0;
1144 break;
1146 case SQRT:
1147 case FLOAT_EXTEND:
1148 case FLOAT_TRUNCATE:
1149 case SS_TRUNCATE:
1150 case US_TRUNCATE:
1151 case SS_NEG:
1152 return 0;
1154 default:
1155 gcc_unreachable ();
1158 return gen_int_mode (val, mode);
1161 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1162 for a DImode operation on a CONST_INT. */
1163 else if (GET_MODE (op) == VOIDmode
1164 && width <= HOST_BITS_PER_WIDE_INT * 2
1165 && (GET_CODE (op) == CONST_DOUBLE
1166 || GET_CODE (op) == CONST_INT))
1168 unsigned HOST_WIDE_INT l1, lv;
1169 HOST_WIDE_INT h1, hv;
1171 if (GET_CODE (op) == CONST_DOUBLE)
1172 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1173 else
1174 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1176 switch (code)
1178 case NOT:
1179 lv = ~ l1;
1180 hv = ~ h1;
1181 break;
1183 case NEG:
1184 neg_double (l1, h1, &lv, &hv);
1185 break;
1187 case ABS:
1188 if (h1 < 0)
1189 neg_double (l1, h1, &lv, &hv);
1190 else
1191 lv = l1, hv = h1;
1192 break;
1194 case FFS:
1195 hv = 0;
1196 if (l1 == 0)
1198 if (h1 == 0)
1199 lv = 0;
1200 else
1201 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1203 else
1204 lv = exact_log2 (l1 & -l1) + 1;
1205 break;
1207 case CLZ:
1208 hv = 0;
1209 if (h1 != 0)
1210 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1211 - HOST_BITS_PER_WIDE_INT;
1212 else if (l1 != 0)
1213 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1214 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1215 lv = GET_MODE_BITSIZE (mode);
1216 break;
1218 case CTZ:
1219 hv = 0;
1220 if (l1 != 0)
1221 lv = exact_log2 (l1 & -l1);
1222 else if (h1 != 0)
1223 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1224 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1225 lv = GET_MODE_BITSIZE (mode);
1226 break;
1228 case POPCOUNT:
1229 hv = 0;
1230 lv = 0;
1231 while (l1)
1232 lv++, l1 &= l1 - 1;
1233 while (h1)
1234 lv++, h1 &= h1 - 1;
1235 break;
1237 case PARITY:
1238 hv = 0;
1239 lv = 0;
1240 while (l1)
1241 lv++, l1 &= l1 - 1;
1242 while (h1)
1243 lv++, h1 &= h1 - 1;
1244 lv &= 1;
1245 break;
1247 case BSWAP:
1249 unsigned int s;
1251 hv = 0;
1252 lv = 0;
1253 for (s = 0; s < width; s += 8)
1255 unsigned int d = width - s - 8;
1256 unsigned HOST_WIDE_INT byte;
1258 if (s < HOST_BITS_PER_WIDE_INT)
1259 byte = (l1 >> s) & 0xff;
1260 else
1261 byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1263 if (d < HOST_BITS_PER_WIDE_INT)
1264 lv |= byte << d;
1265 else
1266 hv |= byte << (d - HOST_BITS_PER_WIDE_INT);
1269 break;
1271 case TRUNCATE:
1272 /* This is just a change-of-mode, so do nothing. */
1273 lv = l1, hv = h1;
1274 break;
1276 case ZERO_EXTEND:
1277 gcc_assert (op_mode != VOIDmode);
1279 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1280 return 0;
1282 hv = 0;
1283 lv = l1 & GET_MODE_MASK (op_mode);
1284 break;
1286 case SIGN_EXTEND:
1287 if (op_mode == VOIDmode
1288 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1289 return 0;
1290 else
1292 lv = l1 & GET_MODE_MASK (op_mode);
1293 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1294 && (lv & ((HOST_WIDE_INT) 1
1295 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1296 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1298 hv = HWI_SIGN_EXTEND (lv);
1300 break;
1302 case SQRT:
1303 return 0;
1305 default:
1306 return 0;
1309 return immed_double_const (lv, hv, mode);
1312 else if (GET_CODE (op) == CONST_DOUBLE
1313 && SCALAR_FLOAT_MODE_P (mode))
1315 REAL_VALUE_TYPE d, t;
1316 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1318 switch (code)
1320 case SQRT:
1321 if (HONOR_SNANS (mode) && real_isnan (&d))
1322 return 0;
1323 real_sqrt (&t, mode, &d);
1324 d = t;
1325 break;
1326 case ABS:
1327 d = REAL_VALUE_ABS (d);
1328 break;
1329 case NEG:
1330 d = REAL_VALUE_NEGATE (d);
1331 break;
1332 case FLOAT_TRUNCATE:
1333 d = real_value_truncate (mode, d);
1334 break;
1335 case FLOAT_EXTEND:
1336 /* All this does is change the mode. */
1337 break;
1338 case FIX:
1339 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1340 break;
1341 case NOT:
1343 long tmp[4];
1344 int i;
1346 real_to_target (tmp, &d, GET_MODE (op));
1347 for (i = 0; i < 4; i++)
1348 tmp[i] = ~tmp[i];
1349 real_from_target (&d, tmp, mode);
1350 break;
1352 default:
1353 gcc_unreachable ();
1355 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1358 else if (GET_CODE (op) == CONST_DOUBLE
1359 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1360 && GET_MODE_CLASS (mode) == MODE_INT
1361 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1363 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1364 operators are intentionally left unspecified (to ease implementation
1365 by target backends), for consistency, this routine implements the
1366 same semantics for constant folding as used by the middle-end. */
1368 /* This was formerly used only for non-IEEE float.
1369 eggert@twinsun.com says it is safe for IEEE also. */
1370 HOST_WIDE_INT xh, xl, th, tl;
1371 REAL_VALUE_TYPE x, t;
1372 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1373 switch (code)
1375 case FIX:
1376 if (REAL_VALUE_ISNAN (x))
1377 return const0_rtx;
1379 /* Test against the signed upper bound. */
1380 if (width > HOST_BITS_PER_WIDE_INT)
1382 th = ((unsigned HOST_WIDE_INT) 1
1383 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1384 tl = -1;
1386 else
1388 th = 0;
1389 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1391 real_from_integer (&t, VOIDmode, tl, th, 0);
1392 if (REAL_VALUES_LESS (t, x))
1394 xh = th;
1395 xl = tl;
1396 break;
1399 /* Test against the signed lower bound. */
1400 if (width > HOST_BITS_PER_WIDE_INT)
1402 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1403 tl = 0;
1405 else
1407 th = -1;
1408 tl = (HOST_WIDE_INT) -1 << (width - 1);
1410 real_from_integer (&t, VOIDmode, tl, th, 0);
1411 if (REAL_VALUES_LESS (x, t))
1413 xh = th;
1414 xl = tl;
1415 break;
1417 REAL_VALUE_TO_INT (&xl, &xh, x);
1418 break;
1420 case UNSIGNED_FIX:
1421 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1422 return const0_rtx;
1424 /* Test against the unsigned upper bound. */
1425 if (width == 2*HOST_BITS_PER_WIDE_INT)
1427 th = -1;
1428 tl = -1;
1430 else if (width >= HOST_BITS_PER_WIDE_INT)
1432 th = ((unsigned HOST_WIDE_INT) 1
1433 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1434 tl = -1;
1436 else
1438 th = 0;
1439 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1441 real_from_integer (&t, VOIDmode, tl, th, 1);
1442 if (REAL_VALUES_LESS (t, x))
1444 xh = th;
1445 xl = tl;
1446 break;
1449 REAL_VALUE_TO_INT (&xl, &xh, x);
1450 break;
1452 default:
1453 gcc_unreachable ();
1455 return immed_double_const (xl, xh, mode);
1458 return NULL_RTX;
1461 /* Subroutine of simplify_binary_operation to simplify a commutative,
1462 associative binary operation CODE with result mode MODE, operating
1463 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1464 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1465 canonicalization is possible. */
1467 static rtx
1468 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1469 rtx op0, rtx op1)
1471 rtx tem;
1473 /* Linearize the operator to the left. */
1474 if (GET_CODE (op1) == code)
1476 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1477 if (GET_CODE (op0) == code)
1479 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1480 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1483 /* "a op (b op c)" becomes "(b op c) op a". */
1484 if (! swap_commutative_operands_p (op1, op0))
1485 return simplify_gen_binary (code, mode, op1, op0);
1487 tem = op0;
1488 op0 = op1;
1489 op1 = tem;
1492 if (GET_CODE (op0) == code)
1494 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1495 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1497 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1498 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1501 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1502 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1503 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1504 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1505 if (tem != 0)
1506 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1508 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1509 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1510 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1511 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1512 if (tem != 0)
1513 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1516 return 0;
1520 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1521 and OP1. Return 0 if no simplification is possible.
1523 Don't use this for relational operations such as EQ or LT.
1524 Use simplify_relational_operation instead. */
1526 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1527 rtx op0, rtx op1)
1529 rtx trueop0, trueop1;
1530 rtx tem;
1532 /* Relational operations don't work here. We must know the mode
1533 of the operands in order to do the comparison correctly.
1534 Assuming a full word can give incorrect results.
1535 Consider comparing 128 with -128 in QImode. */
1536 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1537 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1539 /* Make sure the constant is second. */
1540 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1541 && swap_commutative_operands_p (op0, op1))
1543 tem = op0, op0 = op1, op1 = tem;
1546 trueop0 = avoid_constant_pool_reference (op0);
1547 trueop1 = avoid_constant_pool_reference (op1);
1549 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1550 if (tem)
1551 return tem;
1552 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1555 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1556 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1557 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1558 actual constants. */
1560 static rtx
1561 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1562 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1564 rtx tem, reversed, opleft, opright;
1565 HOST_WIDE_INT val;
1566 unsigned int width = GET_MODE_BITSIZE (mode);
1568 /* Even if we can't compute a constant result,
1569 there are some cases worth simplifying. */
1571 switch (code)
1573 case PLUS:
1574 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1575 when x is NaN, infinite, or finite and nonzero. They aren't
1576 when x is -0 and the rounding mode is not towards -infinity,
1577 since (-0) + 0 is then 0. */
1578 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1579 return op0;
1581 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1582 transformations are safe even for IEEE. */
1583 if (GET_CODE (op0) == NEG)
1584 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1585 else if (GET_CODE (op1) == NEG)
1586 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1588 /* (~a) + 1 -> -a */
1589 if (INTEGRAL_MODE_P (mode)
1590 && GET_CODE (op0) == NOT
1591 && trueop1 == const1_rtx)
1592 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1594 /* Handle both-operands-constant cases. We can only add
1595 CONST_INTs to constants since the sum of relocatable symbols
1596 can't be handled by most assemblers. Don't add CONST_INT
1597 to CONST_INT since overflow won't be computed properly if wider
1598 than HOST_BITS_PER_WIDE_INT. */
1600 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1601 && GET_CODE (op1) == CONST_INT)
1602 return plus_constant (op0, INTVAL (op1));
1603 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1604 && GET_CODE (op0) == CONST_INT)
1605 return plus_constant (op1, INTVAL (op0));
1607 /* See if this is something like X * C - X or vice versa or
1608 if the multiplication is written as a shift. If so, we can
1609 distribute and make a new multiply, shift, or maybe just
1610 have X (if C is 2 in the example above). But don't make
1611 something more expensive than we had before. */
1613 if (SCALAR_INT_MODE_P (mode))
1615 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1616 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1617 rtx lhs = op0, rhs = op1;
1619 if (GET_CODE (lhs) == NEG)
1621 coeff0l = -1;
1622 coeff0h = -1;
1623 lhs = XEXP (lhs, 0);
1625 else if (GET_CODE (lhs) == MULT
1626 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1628 coeff0l = INTVAL (XEXP (lhs, 1));
1629 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1630 lhs = XEXP (lhs, 0);
1632 else if (GET_CODE (lhs) == ASHIFT
1633 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1634 && INTVAL (XEXP (lhs, 1)) >= 0
1635 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1637 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1638 coeff0h = 0;
1639 lhs = XEXP (lhs, 0);
1642 if (GET_CODE (rhs) == NEG)
1644 coeff1l = -1;
1645 coeff1h = -1;
1646 rhs = XEXP (rhs, 0);
1648 else if (GET_CODE (rhs) == MULT
1649 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1651 coeff1l = INTVAL (XEXP (rhs, 1));
1652 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1653 rhs = XEXP (rhs, 0);
1655 else if (GET_CODE (rhs) == ASHIFT
1656 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1657 && INTVAL (XEXP (rhs, 1)) >= 0
1658 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1660 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1661 coeff1h = 0;
1662 rhs = XEXP (rhs, 0);
1665 if (rtx_equal_p (lhs, rhs))
1667 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1668 rtx coeff;
1669 unsigned HOST_WIDE_INT l;
1670 HOST_WIDE_INT h;
1672 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1673 coeff = immed_double_const (l, h, mode);
1675 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1676 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1677 ? tem : 0;
1681 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1682 if ((GET_CODE (op1) == CONST_INT
1683 || GET_CODE (op1) == CONST_DOUBLE)
1684 && GET_CODE (op0) == XOR
1685 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1686 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1687 && mode_signbit_p (mode, op1))
1688 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1689 simplify_gen_binary (XOR, mode, op1,
1690 XEXP (op0, 1)));
1692 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1693 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1694 && GET_CODE (op0) == MULT
1695 && GET_CODE (XEXP (op0, 0)) == NEG)
1697 rtx in1, in2;
1699 in1 = XEXP (XEXP (op0, 0), 0);
1700 in2 = XEXP (op0, 1);
1701 return simplify_gen_binary (MINUS, mode, op1,
1702 simplify_gen_binary (MULT, mode,
1703 in1, in2));
1706 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1707 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1708 is 1. */
1709 if (COMPARISON_P (op0)
1710 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1711 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1712 && (reversed = reversed_comparison (op0, mode)))
1713 return
1714 simplify_gen_unary (NEG, mode, reversed, mode);
1716 /* If one of the operands is a PLUS or a MINUS, see if we can
1717 simplify this by the associative law.
1718 Don't use the associative law for floating point.
1719 The inaccuracy makes it nonassociative,
1720 and subtle programs can break if operations are associated. */
1722 if (INTEGRAL_MODE_P (mode)
1723 && (plus_minus_operand_p (op0)
1724 || plus_minus_operand_p (op1))
1725 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1726 return tem;
1728 /* Reassociate floating point addition only when the user
1729 specifies unsafe math optimizations. */
1730 if (FLOAT_MODE_P (mode)
1731 && flag_unsafe_math_optimizations)
1733 tem = simplify_associative_operation (code, mode, op0, op1);
1734 if (tem)
1735 return tem;
1737 break;
1739 case COMPARE:
1740 #ifdef HAVE_cc0
1741 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1742 using cc0, in which case we want to leave it as a COMPARE
1743 so we can distinguish it from a register-register-copy.
1745 In IEEE floating point, x-0 is not the same as x. */
1747 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1748 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1749 && trueop1 == CONST0_RTX (mode))
1750 return op0;
1751 #endif
1753 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1754 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1755 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1756 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1758 rtx xop00 = XEXP (op0, 0);
1759 rtx xop10 = XEXP (op1, 0);
1761 #ifdef HAVE_cc0
1762 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1763 #else
1764 if (REG_P (xop00) && REG_P (xop10)
1765 && GET_MODE (xop00) == GET_MODE (xop10)
1766 && REGNO (xop00) == REGNO (xop10)
1767 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1768 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1769 #endif
1770 return xop00;
1772 break;
1774 case MINUS:
1775 /* We can't assume x-x is 0 even with non-IEEE floating point,
1776 but since it is zero except in very strange circumstances, we
1777 will treat it as zero with -funsafe-math-optimizations and
1778 -ffinite-math-only. */
1779 if (rtx_equal_p (trueop0, trueop1)
1780 && ! side_effects_p (op0)
1781 && (! FLOAT_MODE_P (mode)
1782 || (flag_unsafe_math_optimizations
1783 && !HONOR_NANS (mode)
1784 && !HONOR_INFINITIES (mode))))
1785 return CONST0_RTX (mode);
1787 /* Change subtraction from zero into negation. (0 - x) is the
1788 same as -x when x is NaN, infinite, or finite and nonzero.
1789 But if the mode has signed zeros, and does not round towards
1790 -infinity, then 0 - 0 is 0, not -0. */
1791 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1792 return simplify_gen_unary (NEG, mode, op1, mode);
1794 /* (-1 - a) is ~a. */
1795 if (trueop0 == constm1_rtx)
1796 return simplify_gen_unary (NOT, mode, op1, mode);
1798 /* Subtracting 0 has no effect unless the mode has signed zeros
1799 and supports rounding towards -infinity. In such a case,
1800 0 - 0 is -0. */
1801 if (!(HONOR_SIGNED_ZEROS (mode)
1802 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1803 && trueop1 == CONST0_RTX (mode))
1804 return op0;
1806 /* See if this is something like X * C - X or vice versa or
1807 if the multiplication is written as a shift. If so, we can
1808 distribute and make a new multiply, shift, or maybe just
1809 have X (if C is 2 in the example above). But don't make
1810 something more expensive than we had before. */
1812 if (SCALAR_INT_MODE_P (mode))
1814 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1815 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1816 rtx lhs = op0, rhs = op1;
1818 if (GET_CODE (lhs) == NEG)
1820 coeff0l = -1;
1821 coeff0h = -1;
1822 lhs = XEXP (lhs, 0);
1824 else if (GET_CODE (lhs) == MULT
1825 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1827 coeff0l = INTVAL (XEXP (lhs, 1));
1828 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1829 lhs = XEXP (lhs, 0);
1831 else if (GET_CODE (lhs) == ASHIFT
1832 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1833 && INTVAL (XEXP (lhs, 1)) >= 0
1834 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1836 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1837 coeff0h = 0;
1838 lhs = XEXP (lhs, 0);
1841 if (GET_CODE (rhs) == NEG)
1843 negcoeff1l = 1;
1844 negcoeff1h = 0;
1845 rhs = XEXP (rhs, 0);
1847 else if (GET_CODE (rhs) == MULT
1848 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1850 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1851 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1852 rhs = XEXP (rhs, 0);
1854 else if (GET_CODE (rhs) == ASHIFT
1855 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1856 && INTVAL (XEXP (rhs, 1)) >= 0
1857 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1859 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1860 negcoeff1h = -1;
1861 rhs = XEXP (rhs, 0);
1864 if (rtx_equal_p (lhs, rhs))
1866 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1867 rtx coeff;
1868 unsigned HOST_WIDE_INT l;
1869 HOST_WIDE_INT h;
1871 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1872 coeff = immed_double_const (l, h, mode);
1874 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1875 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1876 ? tem : 0;
1880 /* (a - (-b)) -> (a + b). True even for IEEE. */
1881 if (GET_CODE (op1) == NEG)
1882 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1884 /* (-x - c) may be simplified as (-c - x). */
1885 if (GET_CODE (op0) == NEG
1886 && (GET_CODE (op1) == CONST_INT
1887 || GET_CODE (op1) == CONST_DOUBLE))
1889 tem = simplify_unary_operation (NEG, mode, op1, mode);
1890 if (tem)
1891 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1894 /* Don't let a relocatable value get a negative coeff. */
1895 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1896 return simplify_gen_binary (PLUS, mode,
1897 op0,
1898 neg_const_int (mode, op1));
1900 /* (x - (x & y)) -> (x & ~y) */
1901 if (GET_CODE (op1) == AND)
1903 if (rtx_equal_p (op0, XEXP (op1, 0)))
1905 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1906 GET_MODE (XEXP (op1, 1)));
1907 return simplify_gen_binary (AND, mode, op0, tem);
1909 if (rtx_equal_p (op0, XEXP (op1, 1)))
1911 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1912 GET_MODE (XEXP (op1, 0)));
1913 return simplify_gen_binary (AND, mode, op0, tem);
1917 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1918 by reversing the comparison code if valid. */
1919 if (STORE_FLAG_VALUE == 1
1920 && trueop0 == const1_rtx
1921 && COMPARISON_P (op1)
1922 && (reversed = reversed_comparison (op1, mode)))
1923 return reversed;
1925 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
1926 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1927 && GET_CODE (op1) == MULT
1928 && GET_CODE (XEXP (op1, 0)) == NEG)
1930 rtx in1, in2;
1932 in1 = XEXP (XEXP (op1, 0), 0);
1933 in2 = XEXP (op1, 1);
1934 return simplify_gen_binary (PLUS, mode,
1935 simplify_gen_binary (MULT, mode,
1936 in1, in2),
1937 op0);
1940 /* Canonicalize (minus (neg A) (mult B C)) to
1941 (minus (mult (neg B) C) A). */
1942 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1943 && GET_CODE (op1) == MULT
1944 && GET_CODE (op0) == NEG)
1946 rtx in1, in2;
1948 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
1949 in2 = XEXP (op1, 1);
1950 return simplify_gen_binary (MINUS, mode,
1951 simplify_gen_binary (MULT, mode,
1952 in1, in2),
1953 XEXP (op0, 0));
1956 /* If one of the operands is a PLUS or a MINUS, see if we can
1957 simplify this by the associative law. This will, for example,
1958 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1959 Don't use the associative law for floating point.
1960 The inaccuracy makes it nonassociative,
1961 and subtle programs can break if operations are associated. */
1963 if (INTEGRAL_MODE_P (mode)
1964 && (plus_minus_operand_p (op0)
1965 || plus_minus_operand_p (op1))
1966 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1967 return tem;
1968 break;
1970 case MULT:
1971 if (trueop1 == constm1_rtx)
1972 return simplify_gen_unary (NEG, mode, op0, mode);
1974 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1975 x is NaN, since x * 0 is then also NaN. Nor is it valid
1976 when the mode has signed zeros, since multiplying a negative
1977 number by 0 will give -0, not 0. */
1978 if (!HONOR_NANS (mode)
1979 && !HONOR_SIGNED_ZEROS (mode)
1980 && trueop1 == CONST0_RTX (mode)
1981 && ! side_effects_p (op0))
1982 return op1;
1984 /* In IEEE floating point, x*1 is not equivalent to x for
1985 signalling NaNs. */
1986 if (!HONOR_SNANS (mode)
1987 && trueop1 == CONST1_RTX (mode))
1988 return op0;
1990 /* Convert multiply by constant power of two into shift unless
1991 we are still generating RTL. This test is a kludge. */
1992 if (GET_CODE (trueop1) == CONST_INT
1993 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1994 /* If the mode is larger than the host word size, and the
1995 uppermost bit is set, then this isn't a power of two due
1996 to implicit sign extension. */
1997 && (width <= HOST_BITS_PER_WIDE_INT
1998 || val != HOST_BITS_PER_WIDE_INT - 1))
1999 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2001 /* Likewise for multipliers wider than a word. */
2002 if (GET_CODE (trueop1) == CONST_DOUBLE
2003 && (GET_MODE (trueop1) == VOIDmode
2004 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
2005 && GET_MODE (op0) == mode
2006 && CONST_DOUBLE_LOW (trueop1) == 0
2007 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
2008 return simplify_gen_binary (ASHIFT, mode, op0,
2009 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2011 /* x*2 is x+x and x*(-1) is -x */
2012 if (GET_CODE (trueop1) == CONST_DOUBLE
2013 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2014 && GET_MODE (op0) == mode)
2016 REAL_VALUE_TYPE d;
2017 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2019 if (REAL_VALUES_EQUAL (d, dconst2))
2020 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2022 if (!HONOR_SNANS (mode)
2023 && REAL_VALUES_EQUAL (d, dconstm1))
2024 return simplify_gen_unary (NEG, mode, op0, mode);
2027 /* Optimize -x * -x as x * x. */
2028 if (FLOAT_MODE_P (mode)
2029 && GET_CODE (op0) == NEG
2030 && GET_CODE (op1) == NEG
2031 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2032 && !side_effects_p (XEXP (op0, 0)))
2033 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2035 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2036 if (SCALAR_FLOAT_MODE_P (mode)
2037 && GET_CODE (op0) == ABS
2038 && GET_CODE (op1) == ABS
2039 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2040 && !side_effects_p (XEXP (op0, 0)))
2041 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2043 /* Reassociate multiplication, but for floating point MULTs
2044 only when the user specifies unsafe math optimizations. */
2045 if (! FLOAT_MODE_P (mode)
2046 || flag_unsafe_math_optimizations)
2048 tem = simplify_associative_operation (code, mode, op0, op1);
2049 if (tem)
2050 return tem;
2052 break;
2054 case IOR:
2055 if (trueop1 == const0_rtx)
2056 return op0;
2057 if (GET_CODE (trueop1) == CONST_INT
2058 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2059 == GET_MODE_MASK (mode)))
2060 return op1;
2061 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2062 return op0;
2063 /* A | (~A) -> -1 */
2064 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2065 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2066 && ! side_effects_p (op0)
2067 && SCALAR_INT_MODE_P (mode))
2068 return constm1_rtx;
2070 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2071 if (GET_CODE (op1) == CONST_INT
2072 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2073 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
2074 return op1;
2076 /* Canonicalize (X & C1) | C2. */
2077 if (GET_CODE (op0) == AND
2078 && GET_CODE (trueop1) == CONST_INT
2079 && GET_CODE (XEXP (op0, 1)) == CONST_INT)
2081 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2082 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2083 HOST_WIDE_INT c2 = INTVAL (trueop1);
2085 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2086 if ((c1 & c2) == c1
2087 && !side_effects_p (XEXP (op0, 0)))
2088 return trueop1;
2090 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2091 if (((c1|c2) & mask) == mask)
2092 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2094 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2095 if (((c1 & ~c2) & mask) != (c1 & mask))
2097 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2098 gen_int_mode (c1 & ~c2, mode));
2099 return simplify_gen_binary (IOR, mode, tem, op1);
2103 /* Convert (A & B) | A to A. */
2104 if (GET_CODE (op0) == AND
2105 && (rtx_equal_p (XEXP (op0, 0), op1)
2106 || rtx_equal_p (XEXP (op0, 1), op1))
2107 && ! side_effects_p (XEXP (op0, 0))
2108 && ! side_effects_p (XEXP (op0, 1)))
2109 return op1;
2111 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2112 mode size to (rotate A CX). */
2114 if (GET_CODE (op1) == ASHIFT
2115 || GET_CODE (op1) == SUBREG)
2117 opleft = op1;
2118 opright = op0;
2120 else
2122 opright = op1;
2123 opleft = op0;
2126 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2127 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2128 && GET_CODE (XEXP (opleft, 1)) == CONST_INT
2129 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2130 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2131 == GET_MODE_BITSIZE (mode)))
2132 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2134 /* Same, but for ashift that has been "simplified" to a wider mode
2135 by simplify_shift_const. */
2137 if (GET_CODE (opleft) == SUBREG
2138 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2139 && GET_CODE (opright) == LSHIFTRT
2140 && GET_CODE (XEXP (opright, 0)) == SUBREG
2141 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2142 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2143 && (GET_MODE_SIZE (GET_MODE (opleft))
2144 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2145 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2146 SUBREG_REG (XEXP (opright, 0)))
2147 && GET_CODE (XEXP (SUBREG_REG (opleft), 1)) == CONST_INT
2148 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2149 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2150 == GET_MODE_BITSIZE (mode)))
2151 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2152 XEXP (SUBREG_REG (opleft), 1));
2154 /* If we have (ior (and (X C1) C2)), simplify this by making
2155 C1 as small as possible if C1 actually changes. */
2156 if (GET_CODE (op1) == CONST_INT
2157 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2158 || INTVAL (op1) > 0)
2159 && GET_CODE (op0) == AND
2160 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2161 && GET_CODE (op1) == CONST_INT
2162 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2163 return simplify_gen_binary (IOR, mode,
2164 simplify_gen_binary
2165 (AND, mode, XEXP (op0, 0),
2166 GEN_INT (INTVAL (XEXP (op0, 1))
2167 & ~INTVAL (op1))),
2168 op1);
2170 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2171 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2172 the PLUS does not affect any of the bits in OP1: then we can do
2173 the IOR as a PLUS and we can associate. This is valid if OP1
2174 can be safely shifted left C bits. */
2175 if (GET_CODE (trueop1) == CONST_INT && GET_CODE (op0) == ASHIFTRT
2176 && GET_CODE (XEXP (op0, 0)) == PLUS
2177 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
2178 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2179 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2181 int count = INTVAL (XEXP (op0, 1));
2182 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2184 if (mask >> count == INTVAL (trueop1)
2185 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2186 return simplify_gen_binary (ASHIFTRT, mode,
2187 plus_constant (XEXP (op0, 0), mask),
2188 XEXP (op0, 1));
2191 tem = simplify_associative_operation (code, mode, op0, op1);
2192 if (tem)
2193 return tem;
2194 break;
2196 case XOR:
2197 if (trueop1 == const0_rtx)
2198 return op0;
2199 if (GET_CODE (trueop1) == CONST_INT
2200 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2201 == GET_MODE_MASK (mode)))
2202 return simplify_gen_unary (NOT, mode, op0, mode);
2203 if (rtx_equal_p (trueop0, trueop1)
2204 && ! side_effects_p (op0)
2205 && GET_MODE_CLASS (mode) != MODE_CC)
2206 return CONST0_RTX (mode);
2208 /* Canonicalize XOR of the most significant bit to PLUS. */
2209 if ((GET_CODE (op1) == CONST_INT
2210 || GET_CODE (op1) == CONST_DOUBLE)
2211 && mode_signbit_p (mode, op1))
2212 return simplify_gen_binary (PLUS, mode, op0, op1);
2213 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2214 if ((GET_CODE (op1) == CONST_INT
2215 || GET_CODE (op1) == CONST_DOUBLE)
2216 && GET_CODE (op0) == PLUS
2217 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
2218 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2219 && mode_signbit_p (mode, XEXP (op0, 1)))
2220 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2221 simplify_gen_binary (XOR, mode, op1,
2222 XEXP (op0, 1)));
2224 /* If we are XORing two things that have no bits in common,
2225 convert them into an IOR. This helps to detect rotation encoded
2226 using those methods and possibly other simplifications. */
2228 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2229 && (nonzero_bits (op0, mode)
2230 & nonzero_bits (op1, mode)) == 0)
2231 return (simplify_gen_binary (IOR, mode, op0, op1));
2233 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2234 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2235 (NOT y). */
2237 int num_negated = 0;
2239 if (GET_CODE (op0) == NOT)
2240 num_negated++, op0 = XEXP (op0, 0);
2241 if (GET_CODE (op1) == NOT)
2242 num_negated++, op1 = XEXP (op1, 0);
2244 if (num_negated == 2)
2245 return simplify_gen_binary (XOR, mode, op0, op1);
2246 else if (num_negated == 1)
2247 return simplify_gen_unary (NOT, mode,
2248 simplify_gen_binary (XOR, mode, op0, op1),
2249 mode);
2252 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2253 correspond to a machine insn or result in further simplifications
2254 if B is a constant. */
2256 if (GET_CODE (op0) == AND
2257 && rtx_equal_p (XEXP (op0, 1), op1)
2258 && ! side_effects_p (op1))
2259 return simplify_gen_binary (AND, mode,
2260 simplify_gen_unary (NOT, mode,
2261 XEXP (op0, 0), mode),
2262 op1);
2264 else if (GET_CODE (op0) == AND
2265 && rtx_equal_p (XEXP (op0, 0), op1)
2266 && ! side_effects_p (op1))
2267 return simplify_gen_binary (AND, mode,
2268 simplify_gen_unary (NOT, mode,
2269 XEXP (op0, 1), mode),
2270 op1);
2272 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2273 comparison if STORE_FLAG_VALUE is 1. */
2274 if (STORE_FLAG_VALUE == 1
2275 && trueop1 == const1_rtx
2276 && COMPARISON_P (op0)
2277 && (reversed = reversed_comparison (op0, mode)))
2278 return reversed;
2280 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2281 is (lt foo (const_int 0)), so we can perform the above
2282 simplification if STORE_FLAG_VALUE is 1. */
2284 if (STORE_FLAG_VALUE == 1
2285 && trueop1 == const1_rtx
2286 && GET_CODE (op0) == LSHIFTRT
2287 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2288 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2289 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2291 /* (xor (comparison foo bar) (const_int sign-bit))
2292 when STORE_FLAG_VALUE is the sign bit. */
2293 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2294 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2295 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2296 && trueop1 == const_true_rtx
2297 && COMPARISON_P (op0)
2298 && (reversed = reversed_comparison (op0, mode)))
2299 return reversed;
2301 break;
2303 tem = simplify_associative_operation (code, mode, op0, op1);
2304 if (tem)
2305 return tem;
2306 break;
2308 case AND:
2309 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2310 return trueop1;
2311 /* If we are turning off bits already known off in OP0, we need
2312 not do an AND. */
2313 if (GET_CODE (trueop1) == CONST_INT
2314 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2315 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
2316 return op0;
2317 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2318 && GET_MODE_CLASS (mode) != MODE_CC)
2319 return op0;
2320 /* A & (~A) -> 0 */
2321 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2322 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2323 && ! side_effects_p (op0)
2324 && GET_MODE_CLASS (mode) != MODE_CC)
2325 return CONST0_RTX (mode);
2327 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2328 there are no nonzero bits of C outside of X's mode. */
2329 if ((GET_CODE (op0) == SIGN_EXTEND
2330 || GET_CODE (op0) == ZERO_EXTEND)
2331 && GET_CODE (trueop1) == CONST_INT
2332 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2333 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2334 & INTVAL (trueop1)) == 0)
2336 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2337 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2338 gen_int_mode (INTVAL (trueop1),
2339 imode));
2340 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2343 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2344 if (GET_CODE (op0) == IOR
2345 && GET_CODE (trueop1) == CONST_INT
2346 && GET_CODE (XEXP (op0, 1)) == CONST_INT)
2348 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2349 return simplify_gen_binary (IOR, mode,
2350 simplify_gen_binary (AND, mode,
2351 XEXP (op0, 0), op1),
2352 gen_int_mode (tmp, mode));
2355 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2356 insn (and may simplify more). */
2357 if (GET_CODE (op0) == XOR
2358 && rtx_equal_p (XEXP (op0, 0), op1)
2359 && ! side_effects_p (op1))
2360 return simplify_gen_binary (AND, mode,
2361 simplify_gen_unary (NOT, mode,
2362 XEXP (op0, 1), mode),
2363 op1);
2365 if (GET_CODE (op0) == XOR
2366 && rtx_equal_p (XEXP (op0, 1), op1)
2367 && ! side_effects_p (op1))
2368 return simplify_gen_binary (AND, mode,
2369 simplify_gen_unary (NOT, mode,
2370 XEXP (op0, 0), mode),
2371 op1);
2373 /* Similarly for (~(A ^ B)) & A. */
2374 if (GET_CODE (op0) == NOT
2375 && GET_CODE (XEXP (op0, 0)) == XOR
2376 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2377 && ! side_effects_p (op1))
2378 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2380 if (GET_CODE (op0) == NOT
2381 && GET_CODE (XEXP (op0, 0)) == XOR
2382 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2383 && ! side_effects_p (op1))
2384 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2386 /* Convert (A | B) & A to A. */
2387 if (GET_CODE (op0) == IOR
2388 && (rtx_equal_p (XEXP (op0, 0), op1)
2389 || rtx_equal_p (XEXP (op0, 1), op1))
2390 && ! side_effects_p (XEXP (op0, 0))
2391 && ! side_effects_p (XEXP (op0, 1)))
2392 return op1;
2394 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2395 ((A & N) + B) & M -> (A + B) & M
2396 Similarly if (N & M) == 0,
2397 ((A | N) + B) & M -> (A + B) & M
2398 and for - instead of + and/or ^ instead of |. */
2399 if (GET_CODE (trueop1) == CONST_INT
2400 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2401 && ~INTVAL (trueop1)
2402 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2403 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2405 rtx pmop[2];
2406 int which;
2408 pmop[0] = XEXP (op0, 0);
2409 pmop[1] = XEXP (op0, 1);
2411 for (which = 0; which < 2; which++)
2413 tem = pmop[which];
2414 switch (GET_CODE (tem))
2416 case AND:
2417 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2418 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2419 == INTVAL (trueop1))
2420 pmop[which] = XEXP (tem, 0);
2421 break;
2422 case IOR:
2423 case XOR:
2424 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2425 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2426 pmop[which] = XEXP (tem, 0);
2427 break;
2428 default:
2429 break;
2433 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2435 tem = simplify_gen_binary (GET_CODE (op0), mode,
2436 pmop[0], pmop[1]);
2437 return simplify_gen_binary (code, mode, tem, op1);
2440 tem = simplify_associative_operation (code, mode, op0, op1);
2441 if (tem)
2442 return tem;
2443 break;
2445 case UDIV:
2446 /* 0/x is 0 (or x&0 if x has side-effects). */
2447 if (trueop0 == CONST0_RTX (mode))
2449 if (side_effects_p (op1))
2450 return simplify_gen_binary (AND, mode, op1, trueop0);
2451 return trueop0;
2453 /* x/1 is x. */
2454 if (trueop1 == CONST1_RTX (mode))
2455 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2456 /* Convert divide by power of two into shift. */
2457 if (GET_CODE (trueop1) == CONST_INT
2458 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2459 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2460 break;
2462 case DIV:
2463 /* Handle floating point and integers separately. */
2464 if (SCALAR_FLOAT_MODE_P (mode))
2466 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2467 safe for modes with NaNs, since 0.0 / 0.0 will then be
2468 NaN rather than 0.0. Nor is it safe for modes with signed
2469 zeros, since dividing 0 by a negative number gives -0.0 */
2470 if (trueop0 == CONST0_RTX (mode)
2471 && !HONOR_NANS (mode)
2472 && !HONOR_SIGNED_ZEROS (mode)
2473 && ! side_effects_p (op1))
2474 return op0;
2475 /* x/1.0 is x. */
2476 if (trueop1 == CONST1_RTX (mode)
2477 && !HONOR_SNANS (mode))
2478 return op0;
2480 if (GET_CODE (trueop1) == CONST_DOUBLE
2481 && trueop1 != CONST0_RTX (mode))
2483 REAL_VALUE_TYPE d;
2484 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2486 /* x/-1.0 is -x. */
2487 if (REAL_VALUES_EQUAL (d, dconstm1)
2488 && !HONOR_SNANS (mode))
2489 return simplify_gen_unary (NEG, mode, op0, mode);
2491 /* Change FP division by a constant into multiplication.
2492 Only do this with -funsafe-math-optimizations. */
2493 if (flag_unsafe_math_optimizations
2494 && !REAL_VALUES_EQUAL (d, dconst0))
2496 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2497 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2498 return simplify_gen_binary (MULT, mode, op0, tem);
2502 else
2504 /* 0/x is 0 (or x&0 if x has side-effects). */
2505 if (trueop0 == CONST0_RTX (mode))
2507 if (side_effects_p (op1))
2508 return simplify_gen_binary (AND, mode, op1, trueop0);
2509 return trueop0;
2511 /* x/1 is x. */
2512 if (trueop1 == CONST1_RTX (mode))
2513 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2514 /* x/-1 is -x. */
2515 if (trueop1 == constm1_rtx)
2517 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2518 return simplify_gen_unary (NEG, mode, x, mode);
2521 break;
2523 case UMOD:
2524 /* 0%x is 0 (or x&0 if x has side-effects). */
2525 if (trueop0 == CONST0_RTX (mode))
2527 if (side_effects_p (op1))
2528 return simplify_gen_binary (AND, mode, op1, trueop0);
2529 return trueop0;
2531 /* x%1 is 0 (of x&0 if x has side-effects). */
2532 if (trueop1 == CONST1_RTX (mode))
2534 if (side_effects_p (op0))
2535 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2536 return CONST0_RTX (mode);
2538 /* Implement modulus by power of two as AND. */
2539 if (GET_CODE (trueop1) == CONST_INT
2540 && exact_log2 (INTVAL (trueop1)) > 0)
2541 return simplify_gen_binary (AND, mode, op0,
2542 GEN_INT (INTVAL (op1) - 1));
2543 break;
2545 case MOD:
2546 /* 0%x is 0 (or x&0 if x has side-effects). */
2547 if (trueop0 == CONST0_RTX (mode))
2549 if (side_effects_p (op1))
2550 return simplify_gen_binary (AND, mode, op1, trueop0);
2551 return trueop0;
2553 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2554 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2556 if (side_effects_p (op0))
2557 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2558 return CONST0_RTX (mode);
2560 break;
2562 case ROTATERT:
2563 case ROTATE:
2564 case ASHIFTRT:
2565 if (trueop1 == CONST0_RTX (mode))
2566 return op0;
2567 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2568 return op0;
2569 /* Rotating ~0 always results in ~0. */
2570 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2571 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2572 && ! side_effects_p (op1))
2573 return op0;
2574 break;
2576 case ASHIFT:
2577 case SS_ASHIFT:
2578 if (trueop1 == CONST0_RTX (mode))
2579 return op0;
2580 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2581 return op0;
2582 break;
2584 case LSHIFTRT:
2585 if (trueop1 == CONST0_RTX (mode))
2586 return op0;
2587 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2588 return op0;
2589 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2590 if (GET_CODE (op0) == CLZ
2591 && GET_CODE (trueop1) == CONST_INT
2592 && STORE_FLAG_VALUE == 1
2593 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2595 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2596 unsigned HOST_WIDE_INT zero_val = 0;
2598 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2599 && zero_val == GET_MODE_BITSIZE (imode)
2600 && INTVAL (trueop1) == exact_log2 (zero_val))
2601 return simplify_gen_relational (EQ, mode, imode,
2602 XEXP (op0, 0), const0_rtx);
2604 break;
2606 case SMIN:
2607 if (width <= HOST_BITS_PER_WIDE_INT
2608 && GET_CODE (trueop1) == CONST_INT
2609 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2610 && ! side_effects_p (op0))
2611 return op1;
2612 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2613 return op0;
2614 tem = simplify_associative_operation (code, mode, op0, op1);
2615 if (tem)
2616 return tem;
2617 break;
2619 case SMAX:
2620 if (width <= HOST_BITS_PER_WIDE_INT
2621 && GET_CODE (trueop1) == CONST_INT
2622 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2623 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2624 && ! side_effects_p (op0))
2625 return op1;
2626 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2627 return op0;
2628 tem = simplify_associative_operation (code, mode, op0, op1);
2629 if (tem)
2630 return tem;
2631 break;
2633 case UMIN:
2634 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2635 return op1;
2636 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2637 return op0;
2638 tem = simplify_associative_operation (code, mode, op0, op1);
2639 if (tem)
2640 return tem;
2641 break;
2643 case UMAX:
2644 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2645 return op1;
2646 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2647 return op0;
2648 tem = simplify_associative_operation (code, mode, op0, op1);
2649 if (tem)
2650 return tem;
2651 break;
2653 case SS_PLUS:
2654 case US_PLUS:
2655 case SS_MINUS:
2656 case US_MINUS:
2657 /* ??? There are simplifications that can be done. */
2658 return 0;
2660 case VEC_SELECT:
2661 if (!VECTOR_MODE_P (mode))
2663 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2664 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2665 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2666 gcc_assert (XVECLEN (trueop1, 0) == 1);
2667 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2669 if (GET_CODE (trueop0) == CONST_VECTOR)
2670 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2671 (trueop1, 0, 0)));
2673 else
2675 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2676 gcc_assert (GET_MODE_INNER (mode)
2677 == GET_MODE_INNER (GET_MODE (trueop0)));
2678 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2680 if (GET_CODE (trueop0) == CONST_VECTOR)
2682 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2683 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2684 rtvec v = rtvec_alloc (n_elts);
2685 unsigned int i;
2687 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2688 for (i = 0; i < n_elts; i++)
2690 rtx x = XVECEXP (trueop1, 0, i);
2692 gcc_assert (GET_CODE (x) == CONST_INT);
2693 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2694 INTVAL (x));
2697 return gen_rtx_CONST_VECTOR (mode, v);
2701 if (XVECLEN (trueop1, 0) == 1
2702 && GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT
2703 && GET_CODE (trueop0) == VEC_CONCAT)
2705 rtx vec = trueop0;
2706 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2708 /* Try to find the element in the VEC_CONCAT. */
2709 while (GET_MODE (vec) != mode
2710 && GET_CODE (vec) == VEC_CONCAT)
2712 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2713 if (offset < vec_size)
2714 vec = XEXP (vec, 0);
2715 else
2717 offset -= vec_size;
2718 vec = XEXP (vec, 1);
2720 vec = avoid_constant_pool_reference (vec);
2723 if (GET_MODE (vec) == mode)
2724 return vec;
2727 return 0;
2728 case VEC_CONCAT:
2730 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2731 ? GET_MODE (trueop0)
2732 : GET_MODE_INNER (mode));
2733 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2734 ? GET_MODE (trueop1)
2735 : GET_MODE_INNER (mode));
2737 gcc_assert (VECTOR_MODE_P (mode));
2738 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2739 == GET_MODE_SIZE (mode));
2741 if (VECTOR_MODE_P (op0_mode))
2742 gcc_assert (GET_MODE_INNER (mode)
2743 == GET_MODE_INNER (op0_mode));
2744 else
2745 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2747 if (VECTOR_MODE_P (op1_mode))
2748 gcc_assert (GET_MODE_INNER (mode)
2749 == GET_MODE_INNER (op1_mode));
2750 else
2751 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2753 if ((GET_CODE (trueop0) == CONST_VECTOR
2754 || GET_CODE (trueop0) == CONST_INT
2755 || GET_CODE (trueop0) == CONST_DOUBLE)
2756 && (GET_CODE (trueop1) == CONST_VECTOR
2757 || GET_CODE (trueop1) == CONST_INT
2758 || GET_CODE (trueop1) == CONST_DOUBLE))
2760 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2761 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2762 rtvec v = rtvec_alloc (n_elts);
2763 unsigned int i;
2764 unsigned in_n_elts = 1;
2766 if (VECTOR_MODE_P (op0_mode))
2767 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2768 for (i = 0; i < n_elts; i++)
2770 if (i < in_n_elts)
2772 if (!VECTOR_MODE_P (op0_mode))
2773 RTVEC_ELT (v, i) = trueop0;
2774 else
2775 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2777 else
2779 if (!VECTOR_MODE_P (op1_mode))
2780 RTVEC_ELT (v, i) = trueop1;
2781 else
2782 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2783 i - in_n_elts);
2787 return gen_rtx_CONST_VECTOR (mode, v);
2790 return 0;
2792 default:
2793 gcc_unreachable ();
2796 return 0;
2800 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
2801 rtx op0, rtx op1)
2803 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
2804 HOST_WIDE_INT val;
2805 unsigned int width = GET_MODE_BITSIZE (mode);
2807 if (VECTOR_MODE_P (mode)
2808 && code != VEC_CONCAT
2809 && GET_CODE (op0) == CONST_VECTOR
2810 && GET_CODE (op1) == CONST_VECTOR)
2812 unsigned n_elts = GET_MODE_NUNITS (mode);
2813 enum machine_mode op0mode = GET_MODE (op0);
2814 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2815 enum machine_mode op1mode = GET_MODE (op1);
2816 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2817 rtvec v = rtvec_alloc (n_elts);
2818 unsigned int i;
2820 gcc_assert (op0_n_elts == n_elts);
2821 gcc_assert (op1_n_elts == n_elts);
2822 for (i = 0; i < n_elts; i++)
2824 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2825 CONST_VECTOR_ELT (op0, i),
2826 CONST_VECTOR_ELT (op1, i));
2827 if (!x)
2828 return 0;
2829 RTVEC_ELT (v, i) = x;
2832 return gen_rtx_CONST_VECTOR (mode, v);
2835 if (VECTOR_MODE_P (mode)
2836 && code == VEC_CONCAT
2837 && CONSTANT_P (op0) && CONSTANT_P (op1))
2839 unsigned n_elts = GET_MODE_NUNITS (mode);
2840 rtvec v = rtvec_alloc (n_elts);
2842 gcc_assert (n_elts >= 2);
2843 if (n_elts == 2)
2845 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2846 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2848 RTVEC_ELT (v, 0) = op0;
2849 RTVEC_ELT (v, 1) = op1;
2851 else
2853 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2854 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2855 unsigned i;
2857 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2858 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2859 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2861 for (i = 0; i < op0_n_elts; ++i)
2862 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2863 for (i = 0; i < op1_n_elts; ++i)
2864 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2867 return gen_rtx_CONST_VECTOR (mode, v);
2870 if (SCALAR_FLOAT_MODE_P (mode)
2871 && GET_CODE (op0) == CONST_DOUBLE
2872 && GET_CODE (op1) == CONST_DOUBLE
2873 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2875 if (code == AND
2876 || code == IOR
2877 || code == XOR)
2879 long tmp0[4];
2880 long tmp1[4];
2881 REAL_VALUE_TYPE r;
2882 int i;
2884 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
2885 GET_MODE (op0));
2886 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
2887 GET_MODE (op1));
2888 for (i = 0; i < 4; i++)
2890 switch (code)
2892 case AND:
2893 tmp0[i] &= tmp1[i];
2894 break;
2895 case IOR:
2896 tmp0[i] |= tmp1[i];
2897 break;
2898 case XOR:
2899 tmp0[i] ^= tmp1[i];
2900 break;
2901 default:
2902 gcc_unreachable ();
2905 real_from_target (&r, tmp0, mode);
2906 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
2908 else
2910 REAL_VALUE_TYPE f0, f1, value, result;
2911 bool inexact;
2913 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
2914 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
2915 real_convert (&f0, mode, &f0);
2916 real_convert (&f1, mode, &f1);
2918 if (HONOR_SNANS (mode)
2919 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
2920 return 0;
2922 if (code == DIV
2923 && REAL_VALUES_EQUAL (f1, dconst0)
2924 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
2925 return 0;
2927 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2928 && flag_trapping_math
2929 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
2931 int s0 = REAL_VALUE_NEGATIVE (f0);
2932 int s1 = REAL_VALUE_NEGATIVE (f1);
2934 switch (code)
2936 case PLUS:
2937 /* Inf + -Inf = NaN plus exception. */
2938 if (s0 != s1)
2939 return 0;
2940 break;
2941 case MINUS:
2942 /* Inf - Inf = NaN plus exception. */
2943 if (s0 == s1)
2944 return 0;
2945 break;
2946 case DIV:
2947 /* Inf / Inf = NaN plus exception. */
2948 return 0;
2949 default:
2950 break;
2954 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2955 && flag_trapping_math
2956 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
2957 || (REAL_VALUE_ISINF (f1)
2958 && REAL_VALUES_EQUAL (f0, dconst0))))
2959 /* Inf * 0 = NaN plus exception. */
2960 return 0;
2962 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
2963 &f0, &f1);
2964 real_convert (&result, mode, &value);
2966 /* Don't constant fold this floating point operation if
2967 the result has overflowed and flag_trapping_math. */
2969 if (flag_trapping_math
2970 && MODE_HAS_INFINITIES (mode)
2971 && REAL_VALUE_ISINF (result)
2972 && !REAL_VALUE_ISINF (f0)
2973 && !REAL_VALUE_ISINF (f1))
2974 /* Overflow plus exception. */
2975 return 0;
2977 /* Don't constant fold this floating point operation if the
2978 result may dependent upon the run-time rounding mode and
2979 flag_rounding_math is set, or if GCC's software emulation
2980 is unable to accurately represent the result. */
2982 if ((flag_rounding_math
2983 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
2984 && !flag_unsafe_math_optimizations))
2985 && (inexact || !real_identical (&result, &value)))
2986 return NULL_RTX;
2988 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
2992 /* We can fold some multi-word operations. */
2993 if (GET_MODE_CLASS (mode) == MODE_INT
2994 && width == HOST_BITS_PER_WIDE_INT * 2
2995 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
2996 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
2998 unsigned HOST_WIDE_INT l1, l2, lv, lt;
2999 HOST_WIDE_INT h1, h2, hv, ht;
3001 if (GET_CODE (op0) == CONST_DOUBLE)
3002 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
3003 else
3004 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
3006 if (GET_CODE (op1) == CONST_DOUBLE)
3007 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
3008 else
3009 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
3011 switch (code)
3013 case MINUS:
3014 /* A - B == A + (-B). */
3015 neg_double (l2, h2, &lv, &hv);
3016 l2 = lv, h2 = hv;
3018 /* Fall through.... */
3020 case PLUS:
3021 add_double (l1, h1, l2, h2, &lv, &hv);
3022 break;
3024 case MULT:
3025 mul_double (l1, h1, l2, h2, &lv, &hv);
3026 break;
3028 case DIV:
3029 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3030 &lv, &hv, &lt, &ht))
3031 return 0;
3032 break;
3034 case MOD:
3035 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3036 &lt, &ht, &lv, &hv))
3037 return 0;
3038 break;
3040 case UDIV:
3041 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3042 &lv, &hv, &lt, &ht))
3043 return 0;
3044 break;
3046 case UMOD:
3047 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3048 &lt, &ht, &lv, &hv))
3049 return 0;
3050 break;
3052 case AND:
3053 lv = l1 & l2, hv = h1 & h2;
3054 break;
3056 case IOR:
3057 lv = l1 | l2, hv = h1 | h2;
3058 break;
3060 case XOR:
3061 lv = l1 ^ l2, hv = h1 ^ h2;
3062 break;
3064 case SMIN:
3065 if (h1 < h2
3066 || (h1 == h2
3067 && ((unsigned HOST_WIDE_INT) l1
3068 < (unsigned HOST_WIDE_INT) l2)))
3069 lv = l1, hv = h1;
3070 else
3071 lv = l2, hv = h2;
3072 break;
3074 case SMAX:
3075 if (h1 > h2
3076 || (h1 == h2
3077 && ((unsigned HOST_WIDE_INT) l1
3078 > (unsigned HOST_WIDE_INT) l2)))
3079 lv = l1, hv = h1;
3080 else
3081 lv = l2, hv = h2;
3082 break;
3084 case UMIN:
3085 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
3086 || (h1 == h2
3087 && ((unsigned HOST_WIDE_INT) l1
3088 < (unsigned HOST_WIDE_INT) l2)))
3089 lv = l1, hv = h1;
3090 else
3091 lv = l2, hv = h2;
3092 break;
3094 case UMAX:
3095 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
3096 || (h1 == h2
3097 && ((unsigned HOST_WIDE_INT) l1
3098 > (unsigned HOST_WIDE_INT) l2)))
3099 lv = l1, hv = h1;
3100 else
3101 lv = l2, hv = h2;
3102 break;
3104 case LSHIFTRT: case ASHIFTRT:
3105 case ASHIFT:
3106 case ROTATE: case ROTATERT:
3107 if (SHIFT_COUNT_TRUNCATED)
3108 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
3110 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
3111 return 0;
3113 if (code == LSHIFTRT || code == ASHIFTRT)
3114 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
3115 code == ASHIFTRT);
3116 else if (code == ASHIFT)
3117 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
3118 else if (code == ROTATE)
3119 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3120 else /* code == ROTATERT */
3121 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3122 break;
3124 default:
3125 return 0;
3128 return immed_double_const (lv, hv, mode);
3131 if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
3132 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3134 /* Get the integer argument values in two forms:
3135 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3137 arg0 = INTVAL (op0);
3138 arg1 = INTVAL (op1);
3140 if (width < HOST_BITS_PER_WIDE_INT)
3142 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
3143 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
3145 arg0s = arg0;
3146 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3147 arg0s |= ((HOST_WIDE_INT) (-1) << width);
3149 arg1s = arg1;
3150 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3151 arg1s |= ((HOST_WIDE_INT) (-1) << width);
3153 else
3155 arg0s = arg0;
3156 arg1s = arg1;
3159 /* Compute the value of the arithmetic. */
3161 switch (code)
3163 case PLUS:
3164 val = arg0s + arg1s;
3165 break;
3167 case MINUS:
3168 val = arg0s - arg1s;
3169 break;
3171 case MULT:
3172 val = arg0s * arg1s;
3173 break;
3175 case DIV:
3176 if (arg1s == 0
3177 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3178 && arg1s == -1))
3179 return 0;
3180 val = arg0s / arg1s;
3181 break;
3183 case MOD:
3184 if (arg1s == 0
3185 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3186 && arg1s == -1))
3187 return 0;
3188 val = arg0s % arg1s;
3189 break;
3191 case UDIV:
3192 if (arg1 == 0
3193 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3194 && arg1s == -1))
3195 return 0;
3196 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3197 break;
3199 case UMOD:
3200 if (arg1 == 0
3201 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3202 && arg1s == -1))
3203 return 0;
3204 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3205 break;
3207 case AND:
3208 val = arg0 & arg1;
3209 break;
3211 case IOR:
3212 val = arg0 | arg1;
3213 break;
3215 case XOR:
3216 val = arg0 ^ arg1;
3217 break;
3219 case LSHIFTRT:
3220 case ASHIFT:
3221 case ASHIFTRT:
3222 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3223 the value is in range. We can't return any old value for
3224 out-of-range arguments because either the middle-end (via
3225 shift_truncation_mask) or the back-end might be relying on
3226 target-specific knowledge. Nor can we rely on
3227 shift_truncation_mask, since the shift might not be part of an
3228 ashlM3, lshrM3 or ashrM3 instruction. */
3229 if (SHIFT_COUNT_TRUNCATED)
3230 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3231 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3232 return 0;
3234 val = (code == ASHIFT
3235 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3236 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3238 /* Sign-extend the result for arithmetic right shifts. */
3239 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3240 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3241 break;
3243 case ROTATERT:
3244 if (arg1 < 0)
3245 return 0;
3247 arg1 %= width;
3248 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3249 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3250 break;
3252 case ROTATE:
3253 if (arg1 < 0)
3254 return 0;
3256 arg1 %= width;
3257 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3258 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3259 break;
3261 case COMPARE:
3262 /* Do nothing here. */
3263 return 0;
3265 case SMIN:
3266 val = arg0s <= arg1s ? arg0s : arg1s;
3267 break;
3269 case UMIN:
3270 val = ((unsigned HOST_WIDE_INT) arg0
3271 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3272 break;
3274 case SMAX:
3275 val = arg0s > arg1s ? arg0s : arg1s;
3276 break;
3278 case UMAX:
3279 val = ((unsigned HOST_WIDE_INT) arg0
3280 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3281 break;
3283 case SS_PLUS:
3284 case US_PLUS:
3285 case SS_MINUS:
3286 case US_MINUS:
3287 case SS_ASHIFT:
3288 /* ??? There are simplifications that can be done. */
3289 return 0;
3291 default:
3292 gcc_unreachable ();
3295 return gen_int_mode (val, mode);
3298 return NULL_RTX;
3303 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3304 PLUS or MINUS.
3306 Rather than test for specific case, we do this by a brute-force method
3307 and do all possible simplifications until no more changes occur. Then
3308 we rebuild the operation. */
3310 struct simplify_plus_minus_op_data
3312 rtx op;
3313 short neg;
3316 static int
3317 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
3319 const struct simplify_plus_minus_op_data *d1 = p1;
3320 const struct simplify_plus_minus_op_data *d2 = p2;
3321 int result;
3323 result = (commutative_operand_precedence (d2->op)
3324 - commutative_operand_precedence (d1->op));
3325 if (result)
3326 return result;
3328 /* Group together equal REGs to do more simplification. */
3329 if (REG_P (d1->op) && REG_P (d2->op))
3330 return REGNO (d1->op) - REGNO (d2->op);
3331 else
3332 return 0;
3335 static rtx
3336 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3337 rtx op1)
3339 struct simplify_plus_minus_op_data ops[8];
3340 rtx result, tem;
3341 int n_ops = 2, input_ops = 2;
3342 int changed, n_constants = 0, canonicalized = 0;
3343 int i, j;
3345 memset (ops, 0, sizeof ops);
3347 /* Set up the two operands and then expand them until nothing has been
3348 changed. If we run out of room in our array, give up; this should
3349 almost never happen. */
3351 ops[0].op = op0;
3352 ops[0].neg = 0;
3353 ops[1].op = op1;
3354 ops[1].neg = (code == MINUS);
3358 changed = 0;
3360 for (i = 0; i < n_ops; i++)
3362 rtx this_op = ops[i].op;
3363 int this_neg = ops[i].neg;
3364 enum rtx_code this_code = GET_CODE (this_op);
3366 switch (this_code)
3368 case PLUS:
3369 case MINUS:
3370 if (n_ops == 7)
3371 return NULL_RTX;
3373 ops[n_ops].op = XEXP (this_op, 1);
3374 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3375 n_ops++;
3377 ops[i].op = XEXP (this_op, 0);
3378 input_ops++;
3379 changed = 1;
3380 canonicalized |= this_neg;
3381 break;
3383 case NEG:
3384 ops[i].op = XEXP (this_op, 0);
3385 ops[i].neg = ! this_neg;
3386 changed = 1;
3387 canonicalized = 1;
3388 break;
3390 case CONST:
3391 if (n_ops < 7
3392 && GET_CODE (XEXP (this_op, 0)) == PLUS
3393 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3394 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3396 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3397 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3398 ops[n_ops].neg = this_neg;
3399 n_ops++;
3400 changed = 1;
3401 canonicalized = 1;
3403 break;
3405 case NOT:
3406 /* ~a -> (-a - 1) */
3407 if (n_ops != 7)
3409 ops[n_ops].op = constm1_rtx;
3410 ops[n_ops++].neg = this_neg;
3411 ops[i].op = XEXP (this_op, 0);
3412 ops[i].neg = !this_neg;
3413 changed = 1;
3414 canonicalized = 1;
3416 break;
3418 case CONST_INT:
3419 n_constants++;
3420 if (this_neg)
3422 ops[i].op = neg_const_int (mode, this_op);
3423 ops[i].neg = 0;
3424 changed = 1;
3425 canonicalized = 1;
3427 break;
3429 default:
3430 break;
3434 while (changed);
3436 if (n_constants > 1)
3437 canonicalized = 1;
3439 gcc_assert (n_ops >= 2);
3441 /* If we only have two operands, we can avoid the loops. */
3442 if (n_ops == 2)
3444 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3445 rtx lhs, rhs;
3447 /* Get the two operands. Be careful with the order, especially for
3448 the cases where code == MINUS. */
3449 if (ops[0].neg && ops[1].neg)
3451 lhs = gen_rtx_NEG (mode, ops[0].op);
3452 rhs = ops[1].op;
3454 else if (ops[0].neg)
3456 lhs = ops[1].op;
3457 rhs = ops[0].op;
3459 else
3461 lhs = ops[0].op;
3462 rhs = ops[1].op;
3465 return simplify_const_binary_operation (code, mode, lhs, rhs);
3468 /* Now simplify each pair of operands until nothing changes. */
3471 /* Insertion sort is good enough for an eight-element array. */
3472 for (i = 1; i < n_ops; i++)
3474 struct simplify_plus_minus_op_data save;
3475 j = i - 1;
3476 if (simplify_plus_minus_op_data_cmp (&ops[j], &ops[i]) < 0)
3477 continue;
3479 canonicalized = 1;
3480 save = ops[i];
3482 ops[j + 1] = ops[j];
3483 while (j-- && simplify_plus_minus_op_data_cmp (&ops[j], &save) > 0);
3484 ops[j + 1] = save;
3487 /* This is only useful the first time through. */
3488 if (!canonicalized)
3489 return NULL_RTX;
3491 changed = 0;
3492 for (i = n_ops - 1; i > 0; i--)
3493 for (j = i - 1; j >= 0; j--)
3495 rtx lhs = ops[j].op, rhs = ops[i].op;
3496 int lneg = ops[j].neg, rneg = ops[i].neg;
3498 if (lhs != 0 && rhs != 0)
3500 enum rtx_code ncode = PLUS;
3502 if (lneg != rneg)
3504 ncode = MINUS;
3505 if (lneg)
3506 tem = lhs, lhs = rhs, rhs = tem;
3508 else if (swap_commutative_operands_p (lhs, rhs))
3509 tem = lhs, lhs = rhs, rhs = tem;
3511 if ((GET_CODE (lhs) == CONST || GET_CODE (lhs) == CONST_INT)
3512 && (GET_CODE (rhs) == CONST || GET_CODE (rhs) == CONST_INT))
3514 rtx tem_lhs, tem_rhs;
3516 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3517 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3518 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3520 if (tem && !CONSTANT_P (tem))
3521 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3523 else
3524 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3526 /* Reject "simplifications" that just wrap the two
3527 arguments in a CONST. Failure to do so can result
3528 in infinite recursion with simplify_binary_operation
3529 when it calls us to simplify CONST operations. */
3530 if (tem
3531 && ! (GET_CODE (tem) == CONST
3532 && GET_CODE (XEXP (tem, 0)) == ncode
3533 && XEXP (XEXP (tem, 0), 0) == lhs
3534 && XEXP (XEXP (tem, 0), 1) == rhs))
3536 lneg &= rneg;
3537 if (GET_CODE (tem) == NEG)
3538 tem = XEXP (tem, 0), lneg = !lneg;
3539 if (GET_CODE (tem) == CONST_INT && lneg)
3540 tem = neg_const_int (mode, tem), lneg = 0;
3542 ops[i].op = tem;
3543 ops[i].neg = lneg;
3544 ops[j].op = NULL_RTX;
3545 changed = 1;
3550 /* Pack all the operands to the lower-numbered entries. */
3551 for (i = 0, j = 0; j < n_ops; j++)
3552 if (ops[j].op)
3554 ops[i] = ops[j];
3555 i++;
3557 n_ops = i;
3559 while (changed);
3561 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3562 if (n_ops == 2
3563 && GET_CODE (ops[1].op) == CONST_INT
3564 && CONSTANT_P (ops[0].op)
3565 && ops[0].neg)
3566 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3568 /* We suppressed creation of trivial CONST expressions in the
3569 combination loop to avoid recursion. Create one manually now.
3570 The combination loop should have ensured that there is exactly
3571 one CONST_INT, and the sort will have ensured that it is last
3572 in the array and that any other constant will be next-to-last. */
3574 if (n_ops > 1
3575 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
3576 && CONSTANT_P (ops[n_ops - 2].op))
3578 rtx value = ops[n_ops - 1].op;
3579 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3580 value = neg_const_int (mode, value);
3581 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3582 n_ops--;
3585 /* Put a non-negated operand first, if possible. */
3587 for (i = 0; i < n_ops && ops[i].neg; i++)
3588 continue;
3589 if (i == n_ops)
3590 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3591 else if (i != 0)
3593 tem = ops[0].op;
3594 ops[0] = ops[i];
3595 ops[i].op = tem;
3596 ops[i].neg = 1;
3599 /* Now make the result by performing the requested operations. */
3600 result = ops[0].op;
3601 for (i = 1; i < n_ops; i++)
3602 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3603 mode, result, ops[i].op);
3605 return result;
3608 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3609 static bool
3610 plus_minus_operand_p (rtx x)
3612 return GET_CODE (x) == PLUS
3613 || GET_CODE (x) == MINUS
3614 || (GET_CODE (x) == CONST
3615 && GET_CODE (XEXP (x, 0)) == PLUS
3616 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3617 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3620 /* Like simplify_binary_operation except used for relational operators.
3621 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3622 not also be VOIDmode.
3624 CMP_MODE specifies in which mode the comparison is done in, so it is
3625 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3626 the operands or, if both are VOIDmode, the operands are compared in
3627 "infinite precision". */
3629 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3630 enum machine_mode cmp_mode, rtx op0, rtx op1)
3632 rtx tem, trueop0, trueop1;
3634 if (cmp_mode == VOIDmode)
3635 cmp_mode = GET_MODE (op0);
3636 if (cmp_mode == VOIDmode)
3637 cmp_mode = GET_MODE (op1);
3639 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3640 if (tem)
3642 if (SCALAR_FLOAT_MODE_P (mode))
3644 if (tem == const0_rtx)
3645 return CONST0_RTX (mode);
3646 #ifdef FLOAT_STORE_FLAG_VALUE
3648 REAL_VALUE_TYPE val;
3649 val = FLOAT_STORE_FLAG_VALUE (mode);
3650 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3652 #else
3653 return NULL_RTX;
3654 #endif
3656 if (VECTOR_MODE_P (mode))
3658 if (tem == const0_rtx)
3659 return CONST0_RTX (mode);
3660 #ifdef VECTOR_STORE_FLAG_VALUE
3662 int i, units;
3663 rtvec v;
3665 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3666 if (val == NULL_RTX)
3667 return NULL_RTX;
3668 if (val == const1_rtx)
3669 return CONST1_RTX (mode);
3671 units = GET_MODE_NUNITS (mode);
3672 v = rtvec_alloc (units);
3673 for (i = 0; i < units; i++)
3674 RTVEC_ELT (v, i) = val;
3675 return gen_rtx_raw_CONST_VECTOR (mode, v);
3677 #else
3678 return NULL_RTX;
3679 #endif
3682 return tem;
3685 /* For the following tests, ensure const0_rtx is op1. */
3686 if (swap_commutative_operands_p (op0, op1)
3687 || (op0 == const0_rtx && op1 != const0_rtx))
3688 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3690 /* If op0 is a compare, extract the comparison arguments from it. */
3691 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3692 return simplify_relational_operation (code, mode, VOIDmode,
3693 XEXP (op0, 0), XEXP (op0, 1));
3695 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
3696 || CC0_P (op0))
3697 return NULL_RTX;
3699 trueop0 = avoid_constant_pool_reference (op0);
3700 trueop1 = avoid_constant_pool_reference (op1);
3701 return simplify_relational_operation_1 (code, mode, cmp_mode,
3702 trueop0, trueop1);
3705 /* This part of simplify_relational_operation is only used when CMP_MODE
3706 is not in class MODE_CC (i.e. it is a real comparison).
3708 MODE is the mode of the result, while CMP_MODE specifies in which
3709 mode the comparison is done in, so it is the mode of the operands. */
3711 static rtx
3712 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
3713 enum machine_mode cmp_mode, rtx op0, rtx op1)
3715 enum rtx_code op0code = GET_CODE (op0);
3717 if (op1 == const0_rtx && COMPARISON_P (op0))
3719 /* If op0 is a comparison, extract the comparison arguments
3720 from it. */
3721 if (code == NE)
3723 if (GET_MODE (op0) == mode)
3724 return simplify_rtx (op0);
3725 else
3726 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
3727 XEXP (op0, 0), XEXP (op0, 1));
3729 else if (code == EQ)
3731 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
3732 if (new_code != UNKNOWN)
3733 return simplify_gen_relational (new_code, mode, VOIDmode,
3734 XEXP (op0, 0), XEXP (op0, 1));
3738 if (op1 == const0_rtx)
3740 /* Canonicalize (GTU x 0) as (NE x 0). */
3741 if (code == GTU)
3742 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
3743 /* Canonicalize (LEU x 0) as (EQ x 0). */
3744 if (code == LEU)
3745 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
3747 else if (op1 == const1_rtx)
3749 switch (code)
3751 case GE:
3752 /* Canonicalize (GE x 1) as (GT x 0). */
3753 return simplify_gen_relational (GT, mode, cmp_mode,
3754 op0, const0_rtx);
3755 case GEU:
3756 /* Canonicalize (GEU x 1) as (NE x 0). */
3757 return simplify_gen_relational (NE, mode, cmp_mode,
3758 op0, const0_rtx);
3759 case LT:
3760 /* Canonicalize (LT x 1) as (LE x 0). */
3761 return simplify_gen_relational (LE, mode, cmp_mode,
3762 op0, const0_rtx);
3763 case LTU:
3764 /* Canonicalize (LTU x 1) as (EQ x 0). */
3765 return simplify_gen_relational (EQ, mode, cmp_mode,
3766 op0, const0_rtx);
3767 default:
3768 break;
3771 else if (op1 == constm1_rtx)
3773 /* Canonicalize (LE x -1) as (LT x 0). */
3774 if (code == LE)
3775 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
3776 /* Canonicalize (GT x -1) as (GE x 0). */
3777 if (code == GT)
3778 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
3781 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
3782 if ((code == EQ || code == NE)
3783 && (op0code == PLUS || op0code == MINUS)
3784 && CONSTANT_P (op1)
3785 && CONSTANT_P (XEXP (op0, 1))
3786 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
3788 rtx x = XEXP (op0, 0);
3789 rtx c = XEXP (op0, 1);
3791 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
3792 cmp_mode, op1, c);
3793 return simplify_gen_relational (code, mode, cmp_mode, x, c);
3796 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3797 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3798 if (code == NE
3799 && op1 == const0_rtx
3800 && GET_MODE_CLASS (mode) == MODE_INT
3801 && cmp_mode != VOIDmode
3802 /* ??? Work-around BImode bugs in the ia64 backend. */
3803 && mode != BImode
3804 && cmp_mode != BImode
3805 && nonzero_bits (op0, cmp_mode) == 1
3806 && STORE_FLAG_VALUE == 1)
3807 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
3808 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
3809 : lowpart_subreg (mode, op0, cmp_mode);
3811 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
3812 if ((code == EQ || code == NE)
3813 && op1 == const0_rtx
3814 && op0code == XOR)
3815 return simplify_gen_relational (code, mode, cmp_mode,
3816 XEXP (op0, 0), XEXP (op0, 1));
3818 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
3819 if ((code == EQ || code == NE)
3820 && op0code == XOR
3821 && rtx_equal_p (XEXP (op0, 0), op1)
3822 && !side_effects_p (XEXP (op0, 0)))
3823 return simplify_gen_relational (code, mode, cmp_mode,
3824 XEXP (op0, 1), const0_rtx);
3826 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
3827 if ((code == EQ || code == NE)
3828 && op0code == XOR
3829 && rtx_equal_p (XEXP (op0, 1), op1)
3830 && !side_effects_p (XEXP (op0, 1)))
3831 return simplify_gen_relational (code, mode, cmp_mode,
3832 XEXP (op0, 0), const0_rtx);
3834 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
3835 if ((code == EQ || code == NE)
3836 && op0code == XOR
3837 && (GET_CODE (op1) == CONST_INT
3838 || GET_CODE (op1) == CONST_DOUBLE)
3839 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
3840 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
3841 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
3842 simplify_gen_binary (XOR, cmp_mode,
3843 XEXP (op0, 1), op1));
3845 if (op0code == POPCOUNT && op1 == const0_rtx)
3846 switch (code)
3848 case EQ:
3849 case LE:
3850 case LEU:
3851 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
3852 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
3853 XEXP (op0, 0), const0_rtx);
3855 case NE:
3856 case GT:
3857 case GTU:
3858 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
3859 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
3860 XEXP (op0, 0), const0_rtx);
3862 default:
3863 break;
3866 return NULL_RTX;
3869 /* Check if the given comparison (done in the given MODE) is actually a
3870 tautology or a contradiction.
3871 If no simplification is possible, this function returns zero.
3872 Otherwise, it returns either const_true_rtx or const0_rtx. */
3875 simplify_const_relational_operation (enum rtx_code code,
3876 enum machine_mode mode,
3877 rtx op0, rtx op1)
3879 int equal, op0lt, op0ltu, op1lt, op1ltu;
3880 rtx tem;
3881 rtx trueop0;
3882 rtx trueop1;
3884 gcc_assert (mode != VOIDmode
3885 || (GET_MODE (op0) == VOIDmode
3886 && GET_MODE (op1) == VOIDmode));
3888 /* If op0 is a compare, extract the comparison arguments from it. */
3889 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3891 op1 = XEXP (op0, 1);
3892 op0 = XEXP (op0, 0);
3894 if (GET_MODE (op0) != VOIDmode)
3895 mode = GET_MODE (op0);
3896 else if (GET_MODE (op1) != VOIDmode)
3897 mode = GET_MODE (op1);
3898 else
3899 return 0;
3902 /* We can't simplify MODE_CC values since we don't know what the
3903 actual comparison is. */
3904 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
3905 return 0;
3907 /* Make sure the constant is second. */
3908 if (swap_commutative_operands_p (op0, op1))
3910 tem = op0, op0 = op1, op1 = tem;
3911 code = swap_condition (code);
3914 trueop0 = avoid_constant_pool_reference (op0);
3915 trueop1 = avoid_constant_pool_reference (op1);
3917 /* For integer comparisons of A and B maybe we can simplify A - B and can
3918 then simplify a comparison of that with zero. If A and B are both either
3919 a register or a CONST_INT, this can't help; testing for these cases will
3920 prevent infinite recursion here and speed things up.
3922 We can only do this for EQ and NE comparisons as otherwise we may
3923 lose or introduce overflow which we cannot disregard as undefined as
3924 we do not know the signedness of the operation on either the left or
3925 the right hand side of the comparison. */
3927 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
3928 && (code == EQ || code == NE)
3929 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
3930 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
3931 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
3932 /* We cannot do this if tem is a nonzero address. */
3933 && ! nonzero_address_p (tem))
3934 return simplify_const_relational_operation (signed_condition (code),
3935 mode, tem, const0_rtx);
3937 if (! HONOR_NANS (mode) && code == ORDERED)
3938 return const_true_rtx;
3940 if (! HONOR_NANS (mode) && code == UNORDERED)
3941 return const0_rtx;
3943 /* For modes without NaNs, if the two operands are equal, we know the
3944 result except if they have side-effects. */
3945 if (! HONOR_NANS (GET_MODE (trueop0))
3946 && rtx_equal_p (trueop0, trueop1)
3947 && ! side_effects_p (trueop0))
3948 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
3950 /* If the operands are floating-point constants, see if we can fold
3951 the result. */
3952 else if (GET_CODE (trueop0) == CONST_DOUBLE
3953 && GET_CODE (trueop1) == CONST_DOUBLE
3954 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
3956 REAL_VALUE_TYPE d0, d1;
3958 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
3959 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
3961 /* Comparisons are unordered iff at least one of the values is NaN. */
3962 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
3963 switch (code)
3965 case UNEQ:
3966 case UNLT:
3967 case UNGT:
3968 case UNLE:
3969 case UNGE:
3970 case NE:
3971 case UNORDERED:
3972 return const_true_rtx;
3973 case EQ:
3974 case LT:
3975 case GT:
3976 case LE:
3977 case GE:
3978 case LTGT:
3979 case ORDERED:
3980 return const0_rtx;
3981 default:
3982 return 0;
3985 equal = REAL_VALUES_EQUAL (d0, d1);
3986 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
3987 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
3990 /* Otherwise, see if the operands are both integers. */
3991 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
3992 && (GET_CODE (trueop0) == CONST_DOUBLE
3993 || GET_CODE (trueop0) == CONST_INT)
3994 && (GET_CODE (trueop1) == CONST_DOUBLE
3995 || GET_CODE (trueop1) == CONST_INT))
3997 int width = GET_MODE_BITSIZE (mode);
3998 HOST_WIDE_INT l0s, h0s, l1s, h1s;
3999 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4001 /* Get the two words comprising each integer constant. */
4002 if (GET_CODE (trueop0) == CONST_DOUBLE)
4004 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4005 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4007 else
4009 l0u = l0s = INTVAL (trueop0);
4010 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4013 if (GET_CODE (trueop1) == CONST_DOUBLE)
4015 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4016 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4018 else
4020 l1u = l1s = INTVAL (trueop1);
4021 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4024 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4025 we have to sign or zero-extend the values. */
4026 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4028 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
4029 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
4031 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
4032 l0s |= ((HOST_WIDE_INT) (-1) << width);
4034 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
4035 l1s |= ((HOST_WIDE_INT) (-1) << width);
4037 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4038 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4040 equal = (h0u == h1u && l0u == l1u);
4041 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
4042 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
4043 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
4044 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
4047 /* Otherwise, there are some code-specific tests we can make. */
4048 else
4050 /* Optimize comparisons with upper and lower bounds. */
4051 if (SCALAR_INT_MODE_P (mode)
4052 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
4054 rtx mmin, mmax;
4055 int sign;
4057 if (code == GEU
4058 || code == LEU
4059 || code == GTU
4060 || code == LTU)
4061 sign = 0;
4062 else
4063 sign = 1;
4065 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
4067 tem = NULL_RTX;
4068 switch (code)
4070 case GEU:
4071 case GE:
4072 /* x >= min is always true. */
4073 if (rtx_equal_p (trueop1, mmin))
4074 tem = const_true_rtx;
4075 else
4076 break;
4078 case LEU:
4079 case LE:
4080 /* x <= max is always true. */
4081 if (rtx_equal_p (trueop1, mmax))
4082 tem = const_true_rtx;
4083 break;
4085 case GTU:
4086 case GT:
4087 /* x > max is always false. */
4088 if (rtx_equal_p (trueop1, mmax))
4089 tem = const0_rtx;
4090 break;
4092 case LTU:
4093 case LT:
4094 /* x < min is always false. */
4095 if (rtx_equal_p (trueop1, mmin))
4096 tem = const0_rtx;
4097 break;
4099 default:
4100 break;
4102 if (tem == const0_rtx
4103 || tem == const_true_rtx)
4104 return tem;
4107 switch (code)
4109 case EQ:
4110 if (trueop1 == const0_rtx && nonzero_address_p (op0))
4111 return const0_rtx;
4112 break;
4114 case NE:
4115 if (trueop1 == const0_rtx && nonzero_address_p (op0))
4116 return const_true_rtx;
4117 break;
4119 case LT:
4120 /* Optimize abs(x) < 0.0. */
4121 if (trueop1 == CONST0_RTX (mode)
4122 && !HONOR_SNANS (mode)
4123 && (!INTEGRAL_MODE_P (mode)
4124 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4126 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
4127 : trueop0;
4128 if (GET_CODE (tem) == ABS)
4130 if (INTEGRAL_MODE_P (mode)
4131 && (issue_strict_overflow_warning
4132 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4133 warning (OPT_Wstrict_overflow,
4134 ("assuming signed overflow does not occur when "
4135 "assuming abs (x) < 0 is false"));
4136 return const0_rtx;
4140 /* Optimize popcount (x) < 0. */
4141 if (GET_CODE (trueop0) == POPCOUNT && trueop1 == const0_rtx)
4142 return const_true_rtx;
4143 break;
4145 case GE:
4146 /* Optimize abs(x) >= 0.0. */
4147 if (trueop1 == CONST0_RTX (mode)
4148 && !HONOR_NANS (mode)
4149 && (!INTEGRAL_MODE_P (mode)
4150 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4152 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
4153 : trueop0;
4154 if (GET_CODE (tem) == ABS)
4156 if (INTEGRAL_MODE_P (mode)
4157 && (issue_strict_overflow_warning
4158 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4159 warning (OPT_Wstrict_overflow,
4160 ("assuming signed overflow does not occur when "
4161 "assuming abs (x) >= 0 is true"));
4162 return const_true_rtx;
4166 /* Optimize popcount (x) >= 0. */
4167 if (GET_CODE (trueop0) == POPCOUNT && trueop1 == const0_rtx)
4168 return const_true_rtx;
4169 break;
4171 case UNGE:
4172 /* Optimize ! (abs(x) < 0.0). */
4173 if (trueop1 == CONST0_RTX (mode))
4175 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
4176 : trueop0;
4177 if (GET_CODE (tem) == ABS)
4178 return const_true_rtx;
4180 break;
4182 default:
4183 break;
4186 return 0;
4189 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
4190 as appropriate. */
4191 switch (code)
4193 case EQ:
4194 case UNEQ:
4195 return equal ? const_true_rtx : const0_rtx;
4196 case NE:
4197 case LTGT:
4198 return ! equal ? const_true_rtx : const0_rtx;
4199 case LT:
4200 case UNLT:
4201 return op0lt ? const_true_rtx : const0_rtx;
4202 case GT:
4203 case UNGT:
4204 return op1lt ? const_true_rtx : const0_rtx;
4205 case LTU:
4206 return op0ltu ? const_true_rtx : const0_rtx;
4207 case GTU:
4208 return op1ltu ? const_true_rtx : const0_rtx;
4209 case LE:
4210 case UNLE:
4211 return equal || op0lt ? const_true_rtx : const0_rtx;
4212 case GE:
4213 case UNGE:
4214 return equal || op1lt ? const_true_rtx : const0_rtx;
4215 case LEU:
4216 return equal || op0ltu ? const_true_rtx : const0_rtx;
4217 case GEU:
4218 return equal || op1ltu ? const_true_rtx : const0_rtx;
4219 case ORDERED:
4220 return const_true_rtx;
4221 case UNORDERED:
4222 return const0_rtx;
4223 default:
4224 gcc_unreachable ();
4228 /* Simplify CODE, an operation with result mode MODE and three operands,
4229 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4230 a constant. Return 0 if no simplifications is possible. */
4233 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4234 enum machine_mode op0_mode, rtx op0, rtx op1,
4235 rtx op2)
4237 unsigned int width = GET_MODE_BITSIZE (mode);
4239 /* VOIDmode means "infinite" precision. */
4240 if (width == 0)
4241 width = HOST_BITS_PER_WIDE_INT;
4243 switch (code)
4245 case SIGN_EXTRACT:
4246 case ZERO_EXTRACT:
4247 if (GET_CODE (op0) == CONST_INT
4248 && GET_CODE (op1) == CONST_INT
4249 && GET_CODE (op2) == CONST_INT
4250 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4251 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4253 /* Extracting a bit-field from a constant */
4254 HOST_WIDE_INT val = INTVAL (op0);
4256 if (BITS_BIG_ENDIAN)
4257 val >>= (GET_MODE_BITSIZE (op0_mode)
4258 - INTVAL (op2) - INTVAL (op1));
4259 else
4260 val >>= INTVAL (op2);
4262 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4264 /* First zero-extend. */
4265 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4266 /* If desired, propagate sign bit. */
4267 if (code == SIGN_EXTRACT
4268 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4269 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4272 /* Clear the bits that don't belong in our mode,
4273 unless they and our sign bit are all one.
4274 So we get either a reasonable negative value or a reasonable
4275 unsigned value for this mode. */
4276 if (width < HOST_BITS_PER_WIDE_INT
4277 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4278 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4279 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4281 return gen_int_mode (val, mode);
4283 break;
4285 case IF_THEN_ELSE:
4286 if (GET_CODE (op0) == CONST_INT)
4287 return op0 != const0_rtx ? op1 : op2;
4289 /* Convert c ? a : a into "a". */
4290 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4291 return op1;
4293 /* Convert a != b ? a : b into "a". */
4294 if (GET_CODE (op0) == NE
4295 && ! side_effects_p (op0)
4296 && ! HONOR_NANS (mode)
4297 && ! HONOR_SIGNED_ZEROS (mode)
4298 && ((rtx_equal_p (XEXP (op0, 0), op1)
4299 && rtx_equal_p (XEXP (op0, 1), op2))
4300 || (rtx_equal_p (XEXP (op0, 0), op2)
4301 && rtx_equal_p (XEXP (op0, 1), op1))))
4302 return op1;
4304 /* Convert a == b ? a : b into "b". */
4305 if (GET_CODE (op0) == EQ
4306 && ! side_effects_p (op0)
4307 && ! HONOR_NANS (mode)
4308 && ! HONOR_SIGNED_ZEROS (mode)
4309 && ((rtx_equal_p (XEXP (op0, 0), op1)
4310 && rtx_equal_p (XEXP (op0, 1), op2))
4311 || (rtx_equal_p (XEXP (op0, 0), op2)
4312 && rtx_equal_p (XEXP (op0, 1), op1))))
4313 return op2;
4315 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4317 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4318 ? GET_MODE (XEXP (op0, 1))
4319 : GET_MODE (XEXP (op0, 0)));
4320 rtx temp;
4322 /* Look for happy constants in op1 and op2. */
4323 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
4325 HOST_WIDE_INT t = INTVAL (op1);
4326 HOST_WIDE_INT f = INTVAL (op2);
4328 if (t == STORE_FLAG_VALUE && f == 0)
4329 code = GET_CODE (op0);
4330 else if (t == 0 && f == STORE_FLAG_VALUE)
4332 enum rtx_code tmp;
4333 tmp = reversed_comparison_code (op0, NULL_RTX);
4334 if (tmp == UNKNOWN)
4335 break;
4336 code = tmp;
4338 else
4339 break;
4341 return simplify_gen_relational (code, mode, cmp_mode,
4342 XEXP (op0, 0), XEXP (op0, 1));
4345 if (cmp_mode == VOIDmode)
4346 cmp_mode = op0_mode;
4347 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4348 cmp_mode, XEXP (op0, 0),
4349 XEXP (op0, 1));
4351 /* See if any simplifications were possible. */
4352 if (temp)
4354 if (GET_CODE (temp) == CONST_INT)
4355 return temp == const0_rtx ? op2 : op1;
4356 else if (temp)
4357 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4360 break;
4362 case VEC_MERGE:
4363 gcc_assert (GET_MODE (op0) == mode);
4364 gcc_assert (GET_MODE (op1) == mode);
4365 gcc_assert (VECTOR_MODE_P (mode));
4366 op2 = avoid_constant_pool_reference (op2);
4367 if (GET_CODE (op2) == CONST_INT)
4369 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4370 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4371 int mask = (1 << n_elts) - 1;
4373 if (!(INTVAL (op2) & mask))
4374 return op1;
4375 if ((INTVAL (op2) & mask) == mask)
4376 return op0;
4378 op0 = avoid_constant_pool_reference (op0);
4379 op1 = avoid_constant_pool_reference (op1);
4380 if (GET_CODE (op0) == CONST_VECTOR
4381 && GET_CODE (op1) == CONST_VECTOR)
4383 rtvec v = rtvec_alloc (n_elts);
4384 unsigned int i;
4386 for (i = 0; i < n_elts; i++)
4387 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4388 ? CONST_VECTOR_ELT (op0, i)
4389 : CONST_VECTOR_ELT (op1, i));
4390 return gen_rtx_CONST_VECTOR (mode, v);
4393 break;
4395 default:
4396 gcc_unreachable ();
4399 return 0;
4402 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
4403 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
4405 Works by unpacking OP into a collection of 8-bit values
4406 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4407 and then repacking them again for OUTERMODE. */
4409 static rtx
4410 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4411 enum machine_mode innermode, unsigned int byte)
4413 /* We support up to 512-bit values (for V8DFmode). */
4414 enum {
4415 max_bitsize = 512,
4416 value_bit = 8,
4417 value_mask = (1 << value_bit) - 1
4419 unsigned char value[max_bitsize / value_bit];
4420 int value_start;
4421 int i;
4422 int elem;
4424 int num_elem;
4425 rtx * elems;
4426 int elem_bitsize;
4427 rtx result_s;
4428 rtvec result_v = NULL;
4429 enum mode_class outer_class;
4430 enum machine_mode outer_submode;
4432 /* Some ports misuse CCmode. */
4433 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
4434 return op;
4436 /* We have no way to represent a complex constant at the rtl level. */
4437 if (COMPLEX_MODE_P (outermode))
4438 return NULL_RTX;
4440 /* Unpack the value. */
4442 if (GET_CODE (op) == CONST_VECTOR)
4444 num_elem = CONST_VECTOR_NUNITS (op);
4445 elems = &CONST_VECTOR_ELT (op, 0);
4446 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4448 else
4450 num_elem = 1;
4451 elems = &op;
4452 elem_bitsize = max_bitsize;
4454 /* If this asserts, it is too complicated; reducing value_bit may help. */
4455 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4456 /* I don't know how to handle endianness of sub-units. */
4457 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4459 for (elem = 0; elem < num_elem; elem++)
4461 unsigned char * vp;
4462 rtx el = elems[elem];
4464 /* Vectors are kept in target memory order. (This is probably
4465 a mistake.) */
4467 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4468 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4469 / BITS_PER_UNIT);
4470 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4471 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4472 unsigned bytele = (subword_byte % UNITS_PER_WORD
4473 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4474 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4477 switch (GET_CODE (el))
4479 case CONST_INT:
4480 for (i = 0;
4481 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4482 i += value_bit)
4483 *vp++ = INTVAL (el) >> i;
4484 /* CONST_INTs are always logically sign-extended. */
4485 for (; i < elem_bitsize; i += value_bit)
4486 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4487 break;
4489 case CONST_DOUBLE:
4490 if (GET_MODE (el) == VOIDmode)
4492 /* If this triggers, someone should have generated a
4493 CONST_INT instead. */
4494 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4496 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4497 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4498 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4500 *vp++
4501 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4502 i += value_bit;
4504 /* It shouldn't matter what's done here, so fill it with
4505 zero. */
4506 for (; i < elem_bitsize; i += value_bit)
4507 *vp++ = 0;
4509 else
4511 long tmp[max_bitsize / 32];
4512 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4514 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4515 gcc_assert (bitsize <= elem_bitsize);
4516 gcc_assert (bitsize % value_bit == 0);
4518 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4519 GET_MODE (el));
4521 /* real_to_target produces its result in words affected by
4522 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4523 and use WORDS_BIG_ENDIAN instead; see the documentation
4524 of SUBREG in rtl.texi. */
4525 for (i = 0; i < bitsize; i += value_bit)
4527 int ibase;
4528 if (WORDS_BIG_ENDIAN)
4529 ibase = bitsize - 1 - i;
4530 else
4531 ibase = i;
4532 *vp++ = tmp[ibase / 32] >> i % 32;
4535 /* It shouldn't matter what's done here, so fill it with
4536 zero. */
4537 for (; i < elem_bitsize; i += value_bit)
4538 *vp++ = 0;
4540 break;
4542 default:
4543 gcc_unreachable ();
4547 /* Now, pick the right byte to start with. */
4548 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4549 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4550 will already have offset 0. */
4551 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4553 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4554 - byte);
4555 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4556 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4557 byte = (subword_byte % UNITS_PER_WORD
4558 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4561 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4562 so if it's become negative it will instead be very large.) */
4563 gcc_assert (byte < GET_MODE_SIZE (innermode));
4565 /* Convert from bytes to chunks of size value_bit. */
4566 value_start = byte * (BITS_PER_UNIT / value_bit);
4568 /* Re-pack the value. */
4570 if (VECTOR_MODE_P (outermode))
4572 num_elem = GET_MODE_NUNITS (outermode);
4573 result_v = rtvec_alloc (num_elem);
4574 elems = &RTVEC_ELT (result_v, 0);
4575 outer_submode = GET_MODE_INNER (outermode);
4577 else
4579 num_elem = 1;
4580 elems = &result_s;
4581 outer_submode = outermode;
4584 outer_class = GET_MODE_CLASS (outer_submode);
4585 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
4587 gcc_assert (elem_bitsize % value_bit == 0);
4588 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
4590 for (elem = 0; elem < num_elem; elem++)
4592 unsigned char *vp;
4594 /* Vectors are stored in target memory order. (This is probably
4595 a mistake.) */
4597 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4598 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4599 / BITS_PER_UNIT);
4600 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4601 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4602 unsigned bytele = (subword_byte % UNITS_PER_WORD
4603 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4604 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
4607 switch (outer_class)
4609 case MODE_INT:
4610 case MODE_PARTIAL_INT:
4612 unsigned HOST_WIDE_INT hi = 0, lo = 0;
4614 for (i = 0;
4615 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4616 i += value_bit)
4617 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
4618 for (; i < elem_bitsize; i += value_bit)
4619 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
4620 << (i - HOST_BITS_PER_WIDE_INT));
4622 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4623 know why. */
4624 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4625 elems[elem] = gen_int_mode (lo, outer_submode);
4626 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
4627 elems[elem] = immed_double_const (lo, hi, outer_submode);
4628 else
4629 return NULL_RTX;
4631 break;
4633 case MODE_FLOAT:
4634 case MODE_DECIMAL_FLOAT:
4636 REAL_VALUE_TYPE r;
4637 long tmp[max_bitsize / 32];
4639 /* real_from_target wants its input in words affected by
4640 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4641 and use WORDS_BIG_ENDIAN instead; see the documentation
4642 of SUBREG in rtl.texi. */
4643 for (i = 0; i < max_bitsize / 32; i++)
4644 tmp[i] = 0;
4645 for (i = 0; i < elem_bitsize; i += value_bit)
4647 int ibase;
4648 if (WORDS_BIG_ENDIAN)
4649 ibase = elem_bitsize - 1 - i;
4650 else
4651 ibase = i;
4652 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
4655 real_from_target (&r, tmp, outer_submode);
4656 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
4658 break;
4660 default:
4661 gcc_unreachable ();
4664 if (VECTOR_MODE_P (outermode))
4665 return gen_rtx_CONST_VECTOR (outermode, result_v);
4666 else
4667 return result_s;
4670 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
4671 Return 0 if no simplifications are possible. */
4673 simplify_subreg (enum machine_mode outermode, rtx op,
4674 enum machine_mode innermode, unsigned int byte)
4676 /* Little bit of sanity checking. */
4677 gcc_assert (innermode != VOIDmode);
4678 gcc_assert (outermode != VOIDmode);
4679 gcc_assert (innermode != BLKmode);
4680 gcc_assert (outermode != BLKmode);
4682 gcc_assert (GET_MODE (op) == innermode
4683 || GET_MODE (op) == VOIDmode);
4685 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
4686 gcc_assert (byte < GET_MODE_SIZE (innermode));
4688 if (outermode == innermode && !byte)
4689 return op;
4691 if (GET_CODE (op) == CONST_INT
4692 || GET_CODE (op) == CONST_DOUBLE
4693 || GET_CODE (op) == CONST_VECTOR)
4694 return simplify_immed_subreg (outermode, op, innermode, byte);
4696 /* Changing mode twice with SUBREG => just change it once,
4697 or not at all if changing back op starting mode. */
4698 if (GET_CODE (op) == SUBREG)
4700 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
4701 int final_offset = byte + SUBREG_BYTE (op);
4702 rtx newx;
4704 if (outermode == innermostmode
4705 && byte == 0 && SUBREG_BYTE (op) == 0)
4706 return SUBREG_REG (op);
4708 /* The SUBREG_BYTE represents offset, as if the value were stored
4709 in memory. Irritating exception is paradoxical subreg, where
4710 we define SUBREG_BYTE to be 0. On big endian machines, this
4711 value should be negative. For a moment, undo this exception. */
4712 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4714 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
4715 if (WORDS_BIG_ENDIAN)
4716 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4717 if (BYTES_BIG_ENDIAN)
4718 final_offset += difference % UNITS_PER_WORD;
4720 if (SUBREG_BYTE (op) == 0
4721 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
4723 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
4724 if (WORDS_BIG_ENDIAN)
4725 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4726 if (BYTES_BIG_ENDIAN)
4727 final_offset += difference % UNITS_PER_WORD;
4730 /* See whether resulting subreg will be paradoxical. */
4731 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
4733 /* In nonparadoxical subregs we can't handle negative offsets. */
4734 if (final_offset < 0)
4735 return NULL_RTX;
4736 /* Bail out in case resulting subreg would be incorrect. */
4737 if (final_offset % GET_MODE_SIZE (outermode)
4738 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
4739 return NULL_RTX;
4741 else
4743 int offset = 0;
4744 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
4746 /* In paradoxical subreg, see if we are still looking on lower part.
4747 If so, our SUBREG_BYTE will be 0. */
4748 if (WORDS_BIG_ENDIAN)
4749 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4750 if (BYTES_BIG_ENDIAN)
4751 offset += difference % UNITS_PER_WORD;
4752 if (offset == final_offset)
4753 final_offset = 0;
4754 else
4755 return NULL_RTX;
4758 /* Recurse for further possible simplifications. */
4759 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
4760 final_offset);
4761 if (newx)
4762 return newx;
4763 if (validate_subreg (outermode, innermostmode,
4764 SUBREG_REG (op), final_offset))
4765 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
4766 return NULL_RTX;
4769 /* Merge implicit and explicit truncations. */
4771 if (GET_CODE (op) == TRUNCATE
4772 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
4773 && subreg_lowpart_offset (outermode, innermode) == byte)
4774 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
4775 GET_MODE (XEXP (op, 0)));
4777 /* SUBREG of a hard register => just change the register number
4778 and/or mode. If the hard register is not valid in that mode,
4779 suppress this simplification. If the hard register is the stack,
4780 frame, or argument pointer, leave this as a SUBREG. */
4782 if (REG_P (op)
4783 && REGNO (op) < FIRST_PSEUDO_REGISTER
4784 #ifdef CANNOT_CHANGE_MODE_CLASS
4785 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
4786 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
4787 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
4788 #endif
4789 && ((reload_completed && !frame_pointer_needed)
4790 || (REGNO (op) != FRAME_POINTER_REGNUM
4791 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
4792 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
4793 #endif
4795 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4796 && REGNO (op) != ARG_POINTER_REGNUM
4797 #endif
4798 && REGNO (op) != STACK_POINTER_REGNUM
4799 && subreg_offset_representable_p (REGNO (op), innermode,
4800 byte, outermode))
4802 unsigned int regno = REGNO (op);
4803 unsigned int final_regno
4804 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
4806 /* ??? We do allow it if the current REG is not valid for
4807 its mode. This is a kludge to work around how float/complex
4808 arguments are passed on 32-bit SPARC and should be fixed. */
4809 if (HARD_REGNO_MODE_OK (final_regno, outermode)
4810 || ! HARD_REGNO_MODE_OK (regno, innermode))
4812 rtx x;
4813 int final_offset = byte;
4815 /* Adjust offset for paradoxical subregs. */
4816 if (byte == 0
4817 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4819 int difference = (GET_MODE_SIZE (innermode)
4820 - GET_MODE_SIZE (outermode));
4821 if (WORDS_BIG_ENDIAN)
4822 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4823 if (BYTES_BIG_ENDIAN)
4824 final_offset += difference % UNITS_PER_WORD;
4827 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
4829 /* Propagate original regno. We don't have any way to specify
4830 the offset inside original regno, so do so only for lowpart.
4831 The information is used only by alias analysis that can not
4832 grog partial register anyway. */
4834 if (subreg_lowpart_offset (outermode, innermode) == byte)
4835 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
4836 return x;
4840 /* If we have a SUBREG of a register that we are replacing and we are
4841 replacing it with a MEM, make a new MEM and try replacing the
4842 SUBREG with it. Don't do this if the MEM has a mode-dependent address
4843 or if we would be widening it. */
4845 if (MEM_P (op)
4846 && ! mode_dependent_address_p (XEXP (op, 0))
4847 /* Allow splitting of volatile memory references in case we don't
4848 have instruction to move the whole thing. */
4849 && (! MEM_VOLATILE_P (op)
4850 || ! have_insn_for (SET, innermode))
4851 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
4852 return adjust_address_nv (op, outermode, byte);
4854 /* Handle complex values represented as CONCAT
4855 of real and imaginary part. */
4856 if (GET_CODE (op) == CONCAT)
4858 unsigned int part_size, final_offset;
4859 rtx part, res;
4861 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
4862 if (byte < part_size)
4864 part = XEXP (op, 0);
4865 final_offset = byte;
4867 else
4869 part = XEXP (op, 1);
4870 final_offset = byte - part_size;
4873 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
4874 return NULL_RTX;
4876 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
4877 if (res)
4878 return res;
4879 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
4880 return gen_rtx_SUBREG (outermode, part, final_offset);
4881 return NULL_RTX;
4884 /* Optimize SUBREG truncations of zero and sign extended values. */
4885 if ((GET_CODE (op) == ZERO_EXTEND
4886 || GET_CODE (op) == SIGN_EXTEND)
4887 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
4889 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
4891 /* If we're requesting the lowpart of a zero or sign extension,
4892 there are three possibilities. If the outermode is the same
4893 as the origmode, we can omit both the extension and the subreg.
4894 If the outermode is not larger than the origmode, we can apply
4895 the truncation without the extension. Finally, if the outermode
4896 is larger than the origmode, but both are integer modes, we
4897 can just extend to the appropriate mode. */
4898 if (bitpos == 0)
4900 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
4901 if (outermode == origmode)
4902 return XEXP (op, 0);
4903 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
4904 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
4905 subreg_lowpart_offset (outermode,
4906 origmode));
4907 if (SCALAR_INT_MODE_P (outermode))
4908 return simplify_gen_unary (GET_CODE (op), outermode,
4909 XEXP (op, 0), origmode);
4912 /* A SUBREG resulting from a zero extension may fold to zero if
4913 it extracts higher bits that the ZERO_EXTEND's source bits. */
4914 if (GET_CODE (op) == ZERO_EXTEND
4915 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
4916 return CONST0_RTX (outermode);
4919 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
4920 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
4921 the outer subreg is effectively a truncation to the original mode. */
4922 if ((GET_CODE (op) == LSHIFTRT
4923 || GET_CODE (op) == ASHIFTRT)
4924 && SCALAR_INT_MODE_P (outermode)
4925 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
4926 to avoid the possibility that an outer LSHIFTRT shifts by more
4927 than the sign extension's sign_bit_copies and introduces zeros
4928 into the high bits of the result. */
4929 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
4930 && GET_CODE (XEXP (op, 1)) == CONST_INT
4931 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
4932 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4933 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4934 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4935 return simplify_gen_binary (ASHIFTRT, outermode,
4936 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4938 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
4939 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
4940 the outer subreg is effectively a truncation to the original mode. */
4941 if ((GET_CODE (op) == LSHIFTRT
4942 || GET_CODE (op) == ASHIFTRT)
4943 && SCALAR_INT_MODE_P (outermode)
4944 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4945 && GET_CODE (XEXP (op, 1)) == CONST_INT
4946 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4947 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4948 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4949 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4950 return simplify_gen_binary (LSHIFTRT, outermode,
4951 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4953 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
4954 to (ashift:QI (x:QI) C), where C is a suitable small constant and
4955 the outer subreg is effectively a truncation to the original mode. */
4956 if (GET_CODE (op) == ASHIFT
4957 && SCALAR_INT_MODE_P (outermode)
4958 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4959 && GET_CODE (XEXP (op, 1)) == CONST_INT
4960 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4961 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
4962 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4963 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4964 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4965 return simplify_gen_binary (ASHIFT, outermode,
4966 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4968 return NULL_RTX;
4971 /* Make a SUBREG operation or equivalent if it folds. */
4974 simplify_gen_subreg (enum machine_mode outermode, rtx op,
4975 enum machine_mode innermode, unsigned int byte)
4977 rtx newx;
4979 newx = simplify_subreg (outermode, op, innermode, byte);
4980 if (newx)
4981 return newx;
4983 if (GET_CODE (op) == SUBREG
4984 || GET_CODE (op) == CONCAT
4985 || GET_MODE (op) == VOIDmode)
4986 return NULL_RTX;
4988 if (validate_subreg (outermode, innermode, op, byte))
4989 return gen_rtx_SUBREG (outermode, op, byte);
4991 return NULL_RTX;
4994 /* Simplify X, an rtx expression.
4996 Return the simplified expression or NULL if no simplifications
4997 were possible.
4999 This is the preferred entry point into the simplification routines;
5000 however, we still allow passes to call the more specific routines.
5002 Right now GCC has three (yes, three) major bodies of RTL simplification
5003 code that need to be unified.
5005 1. fold_rtx in cse.c. This code uses various CSE specific
5006 information to aid in RTL simplification.
5008 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5009 it uses combine specific information to aid in RTL
5010 simplification.
5012 3. The routines in this file.
5015 Long term we want to only have one body of simplification code; to
5016 get to that state I recommend the following steps:
5018 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5019 which are not pass dependent state into these routines.
5021 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5022 use this routine whenever possible.
5024 3. Allow for pass dependent state to be provided to these
5025 routines and add simplifications based on the pass dependent
5026 state. Remove code from cse.c & combine.c that becomes
5027 redundant/dead.
5029 It will take time, but ultimately the compiler will be easier to
5030 maintain and improve. It's totally silly that when we add a
5031 simplification that it needs to be added to 4 places (3 for RTL
5032 simplification and 1 for tree simplification. */
5035 simplify_rtx (rtx x)
5037 enum rtx_code code = GET_CODE (x);
5038 enum machine_mode mode = GET_MODE (x);
5040 switch (GET_RTX_CLASS (code))
5042 case RTX_UNARY:
5043 return simplify_unary_operation (code, mode,
5044 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5045 case RTX_COMM_ARITH:
5046 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5047 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5049 /* Fall through.... */
5051 case RTX_BIN_ARITH:
5052 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5054 case RTX_TERNARY:
5055 case RTX_BITFIELD_OPS:
5056 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5057 XEXP (x, 0), XEXP (x, 1),
5058 XEXP (x, 2));
5060 case RTX_COMPARE:
5061 case RTX_COMM_COMPARE:
5062 return simplify_relational_operation (code, mode,
5063 ((GET_MODE (XEXP (x, 0))
5064 != VOIDmode)
5065 ? GET_MODE (XEXP (x, 0))
5066 : GET_MODE (XEXP (x, 1))),
5067 XEXP (x, 0),
5068 XEXP (x, 1));
5070 case RTX_EXTRA:
5071 if (code == SUBREG)
5072 return simplify_subreg (mode, SUBREG_REG (x),
5073 GET_MODE (SUBREG_REG (x)),
5074 SUBREG_BYTE (x));
5075 break;
5077 case RTX_OBJ:
5078 if (code == LO_SUM)
5080 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5081 if (GET_CODE (XEXP (x, 0)) == HIGH
5082 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5083 return XEXP (x, 1);
5085 break;
5087 default:
5088 break;
5090 return NULL;