* gfortran.h: Edit comments on GFC_STD_*.
[official-gcc.git] / gcc / simplify-rtx.c
blobe4af34d7adf73cf505c2ba480f7b868a6227bb5a
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
21 02110-1301, USA. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "tm_p.h"
31 #include "regs.h"
32 #include "hard-reg-set.h"
33 #include "flags.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "recog.h"
37 #include "function.h"
38 #include "expr.h"
39 #include "toplev.h"
40 #include "output.h"
41 #include "ggc.h"
42 #include "target.h"
44 /* Simplification and canonicalization of RTL. */
46 /* Much code operates on (low, high) pairs; the low value is an
47 unsigned wide int, the high value a signed wide int. We
48 occasionally need to sign extend from low to high as if low were a
49 signed wide int. */
50 #define HWI_SIGN_EXTEND(low) \
51 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
53 static rtx neg_const_int (enum machine_mode, rtx);
54 static bool plus_minus_operand_p (rtx);
55 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
56 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
57 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
58 unsigned int);
59 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
60 rtx, rtx);
61 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
62 enum machine_mode, rtx, rtx);
63 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
64 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
65 rtx, rtx, rtx, rtx);
67 /* Negate a CONST_INT rtx, truncating (because a conversion from a
68 maximally negative number can overflow). */
69 static rtx
70 neg_const_int (enum machine_mode mode, rtx i)
72 return gen_int_mode (- INTVAL (i), mode);
75 /* Test whether expression, X, is an immediate constant that represents
76 the most significant bit of machine mode MODE. */
78 bool
79 mode_signbit_p (enum machine_mode mode, rtx x)
81 unsigned HOST_WIDE_INT val;
82 unsigned int width;
84 if (GET_MODE_CLASS (mode) != MODE_INT)
85 return false;
87 width = GET_MODE_BITSIZE (mode);
88 if (width == 0)
89 return false;
91 if (width <= HOST_BITS_PER_WIDE_INT
92 && GET_CODE (x) == CONST_INT)
93 val = INTVAL (x);
94 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
95 && GET_CODE (x) == CONST_DOUBLE
96 && CONST_DOUBLE_LOW (x) == 0)
98 val = CONST_DOUBLE_HIGH (x);
99 width -= HOST_BITS_PER_WIDE_INT;
101 else
102 return false;
104 if (width < HOST_BITS_PER_WIDE_INT)
105 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
106 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
109 /* Make a binary operation by properly ordering the operands and
110 seeing if the expression folds. */
113 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
114 rtx op1)
116 rtx tem;
118 /* If this simplifies, do it. */
119 tem = simplify_binary_operation (code, mode, op0, op1);
120 if (tem)
121 return tem;
123 /* Put complex operands first and constants second if commutative. */
124 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
125 && swap_commutative_operands_p (op0, op1))
126 tem = op0, op0 = op1, op1 = tem;
128 return gen_rtx_fmt_ee (code, mode, op0, op1);
131 /* If X is a MEM referencing the constant pool, return the real value.
132 Otherwise return X. */
134 avoid_constant_pool_reference (rtx x)
136 rtx c, tmp, addr;
137 enum machine_mode cmode;
138 HOST_WIDE_INT offset = 0;
140 switch (GET_CODE (x))
142 case MEM:
143 break;
145 case FLOAT_EXTEND:
146 /* Handle float extensions of constant pool references. */
147 tmp = XEXP (x, 0);
148 c = avoid_constant_pool_reference (tmp);
149 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
151 REAL_VALUE_TYPE d;
153 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
154 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
156 return x;
158 default:
159 return x;
162 if (GET_MODE (x) == BLKmode)
163 return x;
165 addr = XEXP (x, 0);
167 /* Call target hook to avoid the effects of -fpic etc.... */
168 addr = targetm.delegitimize_address (addr);
170 /* Split the address into a base and integer offset. */
171 if (GET_CODE (addr) == CONST
172 && GET_CODE (XEXP (addr, 0)) == PLUS
173 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
175 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
176 addr = XEXP (XEXP (addr, 0), 0);
179 if (GET_CODE (addr) == LO_SUM)
180 addr = XEXP (addr, 1);
182 /* If this is a constant pool reference, we can turn it into its
183 constant and hope that simplifications happen. */
184 if (GET_CODE (addr) == SYMBOL_REF
185 && CONSTANT_POOL_ADDRESS_P (addr))
187 c = get_pool_constant (addr);
188 cmode = get_pool_mode (addr);
190 /* If we're accessing the constant in a different mode than it was
191 originally stored, attempt to fix that up via subreg simplifications.
192 If that fails we have no choice but to return the original memory. */
193 if (offset != 0 || cmode != GET_MODE (x))
195 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
196 if (tem && CONSTANT_P (tem))
197 return tem;
199 else
200 return c;
203 return x;
206 /* Make a unary operation by first seeing if it folds and otherwise making
207 the specified operation. */
210 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
211 enum machine_mode op_mode)
213 rtx tem;
215 /* If this simplifies, use it. */
216 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
217 return tem;
219 return gen_rtx_fmt_e (code, mode, op);
222 /* Likewise for ternary operations. */
225 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
226 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
228 rtx tem;
230 /* If this simplifies, use it. */
231 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
232 op0, op1, op2)))
233 return tem;
235 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
238 /* Likewise, for relational operations.
239 CMP_MODE specifies mode comparison is done in. */
242 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
243 enum machine_mode cmp_mode, rtx op0, rtx op1)
245 rtx tem;
247 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
248 op0, op1)))
249 return tem;
251 return gen_rtx_fmt_ee (code, mode, op0, op1);
254 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
255 resulting RTX. Return a new RTX which is as simplified as possible. */
258 simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
260 enum rtx_code code = GET_CODE (x);
261 enum machine_mode mode = GET_MODE (x);
262 enum machine_mode op_mode;
263 rtx op0, op1, op2;
265 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
266 to build a new expression substituting recursively. If we can't do
267 anything, return our input. */
269 if (x == old_rtx)
270 return new_rtx;
272 switch (GET_RTX_CLASS (code))
274 case RTX_UNARY:
275 op0 = XEXP (x, 0);
276 op_mode = GET_MODE (op0);
277 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
278 if (op0 == XEXP (x, 0))
279 return x;
280 return simplify_gen_unary (code, mode, op0, op_mode);
282 case RTX_BIN_ARITH:
283 case RTX_COMM_ARITH:
284 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
285 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
286 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
287 return x;
288 return simplify_gen_binary (code, mode, op0, op1);
290 case RTX_COMPARE:
291 case RTX_COMM_COMPARE:
292 op0 = XEXP (x, 0);
293 op1 = XEXP (x, 1);
294 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
295 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
296 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
297 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
298 return x;
299 return simplify_gen_relational (code, mode, op_mode, op0, op1);
301 case RTX_TERNARY:
302 case RTX_BITFIELD_OPS:
303 op0 = XEXP (x, 0);
304 op_mode = GET_MODE (op0);
305 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
306 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
307 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
308 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
309 return x;
310 if (op_mode == VOIDmode)
311 op_mode = GET_MODE (op0);
312 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
314 case RTX_EXTRA:
315 /* The only case we try to handle is a SUBREG. */
316 if (code == SUBREG)
318 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
319 if (op0 == SUBREG_REG (x))
320 return x;
321 op0 = simplify_gen_subreg (GET_MODE (x), op0,
322 GET_MODE (SUBREG_REG (x)),
323 SUBREG_BYTE (x));
324 return op0 ? op0 : x;
326 break;
328 case RTX_OBJ:
329 if (code == MEM)
331 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
332 if (op0 == XEXP (x, 0))
333 return x;
334 return replace_equiv_address_nv (x, op0);
336 else if (code == LO_SUM)
338 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
339 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
341 /* (lo_sum (high x) x) -> x */
342 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
343 return op1;
345 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
346 return x;
347 return gen_rtx_LO_SUM (mode, op0, op1);
349 else if (code == REG)
351 if (rtx_equal_p (x, old_rtx))
352 return new_rtx;
354 break;
356 default:
357 break;
359 return x;
362 /* Try to simplify a unary operation CODE whose output mode is to be
363 MODE with input operand OP whose mode was originally OP_MODE.
364 Return zero if no simplification can be made. */
366 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
367 rtx op, enum machine_mode op_mode)
369 rtx trueop, tem;
371 if (GET_CODE (op) == CONST)
372 op = XEXP (op, 0);
374 trueop = avoid_constant_pool_reference (op);
376 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
377 if (tem)
378 return tem;
380 return simplify_unary_operation_1 (code, mode, op);
383 /* Perform some simplifications we can do even if the operands
384 aren't constant. */
385 static rtx
386 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
388 enum rtx_code reversed;
389 rtx temp;
391 switch (code)
393 case NOT:
394 /* (not (not X)) == X. */
395 if (GET_CODE (op) == NOT)
396 return XEXP (op, 0);
398 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
399 comparison is all ones. */
400 if (COMPARISON_P (op)
401 && (mode == BImode || STORE_FLAG_VALUE == -1)
402 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
403 return simplify_gen_relational (reversed, mode, VOIDmode,
404 XEXP (op, 0), XEXP (op, 1));
406 /* (not (plus X -1)) can become (neg X). */
407 if (GET_CODE (op) == PLUS
408 && XEXP (op, 1) == constm1_rtx)
409 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
411 /* Similarly, (not (neg X)) is (plus X -1). */
412 if (GET_CODE (op) == NEG)
413 return plus_constant (XEXP (op, 0), -1);
415 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
416 if (GET_CODE (op) == XOR
417 && GET_CODE (XEXP (op, 1)) == CONST_INT
418 && (temp = simplify_unary_operation (NOT, mode,
419 XEXP (op, 1), mode)) != 0)
420 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
422 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
423 if (GET_CODE (op) == PLUS
424 && GET_CODE (XEXP (op, 1)) == CONST_INT
425 && mode_signbit_p (mode, XEXP (op, 1))
426 && (temp = simplify_unary_operation (NOT, mode,
427 XEXP (op, 1), mode)) != 0)
428 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
431 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
432 operands other than 1, but that is not valid. We could do a
433 similar simplification for (not (lshiftrt C X)) where C is
434 just the sign bit, but this doesn't seem common enough to
435 bother with. */
436 if (GET_CODE (op) == ASHIFT
437 && XEXP (op, 0) == const1_rtx)
439 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
440 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
443 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
444 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
445 so we can perform the above simplification. */
447 if (STORE_FLAG_VALUE == -1
448 && GET_CODE (op) == ASHIFTRT
449 && GET_CODE (XEXP (op, 1)) == CONST_INT
450 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
451 return simplify_gen_relational (GE, mode, VOIDmode,
452 XEXP (op, 0), const0_rtx);
455 if (GET_CODE (op) == SUBREG
456 && subreg_lowpart_p (op)
457 && (GET_MODE_SIZE (GET_MODE (op))
458 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
459 && GET_CODE (SUBREG_REG (op)) == ASHIFT
460 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
462 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
463 rtx x;
465 x = gen_rtx_ROTATE (inner_mode,
466 simplify_gen_unary (NOT, inner_mode, const1_rtx,
467 inner_mode),
468 XEXP (SUBREG_REG (op), 1));
469 return rtl_hooks.gen_lowpart_no_emit (mode, x);
472 /* Apply De Morgan's laws to reduce number of patterns for machines
473 with negating logical insns (and-not, nand, etc.). If result has
474 only one NOT, put it first, since that is how the patterns are
475 coded. */
477 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
479 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
480 enum machine_mode op_mode;
482 op_mode = GET_MODE (in1);
483 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
485 op_mode = GET_MODE (in2);
486 if (op_mode == VOIDmode)
487 op_mode = mode;
488 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
490 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
492 rtx tem = in2;
493 in2 = in1; in1 = tem;
496 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
497 mode, in1, in2);
499 break;
501 case NEG:
502 /* (neg (neg X)) == X. */
503 if (GET_CODE (op) == NEG)
504 return XEXP (op, 0);
506 /* (neg (plus X 1)) can become (not X). */
507 if (GET_CODE (op) == PLUS
508 && XEXP (op, 1) == const1_rtx)
509 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
511 /* Similarly, (neg (not X)) is (plus X 1). */
512 if (GET_CODE (op) == NOT)
513 return plus_constant (XEXP (op, 0), 1);
515 /* (neg (minus X Y)) can become (minus Y X). This transformation
516 isn't safe for modes with signed zeros, since if X and Y are
517 both +0, (minus Y X) is the same as (minus X Y). If the
518 rounding mode is towards +infinity (or -infinity) then the two
519 expressions will be rounded differently. */
520 if (GET_CODE (op) == MINUS
521 && !HONOR_SIGNED_ZEROS (mode)
522 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
523 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
525 if (GET_CODE (op) == PLUS
526 && !HONOR_SIGNED_ZEROS (mode)
527 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
529 /* (neg (plus A C)) is simplified to (minus -C A). */
530 if (GET_CODE (XEXP (op, 1)) == CONST_INT
531 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
533 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
534 if (temp)
535 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
538 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
539 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
540 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
543 /* (neg (mult A B)) becomes (mult (neg A) B).
544 This works even for floating-point values. */
545 if (GET_CODE (op) == MULT
546 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
548 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
549 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
552 /* NEG commutes with ASHIFT since it is multiplication. Only do
553 this if we can then eliminate the NEG (e.g., if the operand
554 is a constant). */
555 if (GET_CODE (op) == ASHIFT)
557 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
558 if (temp)
559 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
562 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
563 C is equal to the width of MODE minus 1. */
564 if (GET_CODE (op) == ASHIFTRT
565 && GET_CODE (XEXP (op, 1)) == CONST_INT
566 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
567 return simplify_gen_binary (LSHIFTRT, mode,
568 XEXP (op, 0), XEXP (op, 1));
570 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
571 C is equal to the width of MODE minus 1. */
572 if (GET_CODE (op) == LSHIFTRT
573 && GET_CODE (XEXP (op, 1)) == CONST_INT
574 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
575 return simplify_gen_binary (ASHIFTRT, mode,
576 XEXP (op, 0), XEXP (op, 1));
578 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
579 if (GET_CODE (op) == XOR
580 && XEXP (op, 1) == const1_rtx
581 && nonzero_bits (XEXP (op, 0), mode) == 1)
582 return plus_constant (XEXP (op, 0), -1);
584 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
585 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
586 if (GET_CODE (op) == LT
587 && XEXP (op, 1) == const0_rtx)
589 enum machine_mode inner = GET_MODE (XEXP (op, 0));
590 int isize = GET_MODE_BITSIZE (inner);
591 if (STORE_FLAG_VALUE == 1)
593 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
594 GEN_INT (isize - 1));
595 if (mode == inner)
596 return temp;
597 if (GET_MODE_BITSIZE (mode) > isize)
598 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
599 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
601 else if (STORE_FLAG_VALUE == -1)
603 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
604 GEN_INT (isize - 1));
605 if (mode == inner)
606 return temp;
607 if (GET_MODE_BITSIZE (mode) > isize)
608 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
609 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
612 break;
614 case TRUNCATE:
615 /* We can't handle truncation to a partial integer mode here
616 because we don't know the real bitsize of the partial
617 integer mode. */
618 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
619 break;
621 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
622 if ((GET_CODE (op) == SIGN_EXTEND
623 || GET_CODE (op) == ZERO_EXTEND)
624 && GET_MODE (XEXP (op, 0)) == mode)
625 return XEXP (op, 0);
627 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
628 (OP:SI foo:SI) if OP is NEG or ABS. */
629 if ((GET_CODE (op) == ABS
630 || GET_CODE (op) == NEG)
631 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
632 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
633 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
634 return simplify_gen_unary (GET_CODE (op), mode,
635 XEXP (XEXP (op, 0), 0), mode);
637 /* (truncate:A (subreg:B (truncate:C X) 0)) is
638 (truncate:A X). */
639 if (GET_CODE (op) == SUBREG
640 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
641 && subreg_lowpart_p (op))
642 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
643 GET_MODE (XEXP (SUBREG_REG (op), 0)));
645 /* If we know that the value is already truncated, we can
646 replace the TRUNCATE with a SUBREG. Note that this is also
647 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
648 modes we just have to apply a different definition for
649 truncation. But don't do this for an (LSHIFTRT (MULT ...))
650 since this will cause problems with the umulXi3_highpart
651 patterns. */
652 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
653 GET_MODE_BITSIZE (GET_MODE (op)))
654 ? (num_sign_bit_copies (op, GET_MODE (op))
655 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
656 - GET_MODE_BITSIZE (mode)))
657 : truncated_to_mode (mode, op))
658 && ! (GET_CODE (op) == LSHIFTRT
659 && GET_CODE (XEXP (op, 0)) == MULT))
660 return rtl_hooks.gen_lowpart_no_emit (mode, op);
662 /* A truncate of a comparison can be replaced with a subreg if
663 STORE_FLAG_VALUE permits. This is like the previous test,
664 but it works even if the comparison is done in a mode larger
665 than HOST_BITS_PER_WIDE_INT. */
666 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
667 && COMPARISON_P (op)
668 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
669 return rtl_hooks.gen_lowpart_no_emit (mode, op);
670 break;
672 case FLOAT_TRUNCATE:
673 if (DECIMAL_FLOAT_MODE_P (mode))
674 break;
676 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
677 if (GET_CODE (op) == FLOAT_EXTEND
678 && GET_MODE (XEXP (op, 0)) == mode)
679 return XEXP (op, 0);
681 /* (float_truncate:SF (float_truncate:DF foo:XF))
682 = (float_truncate:SF foo:XF).
683 This may eliminate double rounding, so it is unsafe.
685 (float_truncate:SF (float_extend:XF foo:DF))
686 = (float_truncate:SF foo:DF).
688 (float_truncate:DF (float_extend:XF foo:SF))
689 = (float_extend:SF foo:DF). */
690 if ((GET_CODE (op) == FLOAT_TRUNCATE
691 && flag_unsafe_math_optimizations)
692 || GET_CODE (op) == FLOAT_EXTEND)
693 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
694 0)))
695 > GET_MODE_SIZE (mode)
696 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
697 mode,
698 XEXP (op, 0), mode);
700 /* (float_truncate (float x)) is (float x) */
701 if (GET_CODE (op) == FLOAT
702 && (flag_unsafe_math_optimizations
703 || ((unsigned)significand_size (GET_MODE (op))
704 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
705 - num_sign_bit_copies (XEXP (op, 0),
706 GET_MODE (XEXP (op, 0)))))))
707 return simplify_gen_unary (FLOAT, mode,
708 XEXP (op, 0),
709 GET_MODE (XEXP (op, 0)));
711 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
712 (OP:SF foo:SF) if OP is NEG or ABS. */
713 if ((GET_CODE (op) == ABS
714 || GET_CODE (op) == NEG)
715 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
716 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
717 return simplify_gen_unary (GET_CODE (op), mode,
718 XEXP (XEXP (op, 0), 0), mode);
720 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
721 is (float_truncate:SF x). */
722 if (GET_CODE (op) == SUBREG
723 && subreg_lowpart_p (op)
724 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
725 return SUBREG_REG (op);
726 break;
728 case FLOAT_EXTEND:
729 if (DECIMAL_FLOAT_MODE_P (mode))
730 break;
732 /* (float_extend (float_extend x)) is (float_extend x)
734 (float_extend (float x)) is (float x) assuming that double
735 rounding can't happen.
737 if (GET_CODE (op) == FLOAT_EXTEND
738 || (GET_CODE (op) == FLOAT
739 && ((unsigned)significand_size (GET_MODE (op))
740 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
741 - num_sign_bit_copies (XEXP (op, 0),
742 GET_MODE (XEXP (op, 0)))))))
743 return simplify_gen_unary (GET_CODE (op), mode,
744 XEXP (op, 0),
745 GET_MODE (XEXP (op, 0)));
747 break;
749 case ABS:
750 /* (abs (neg <foo>)) -> (abs <foo>) */
751 if (GET_CODE (op) == NEG)
752 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
753 GET_MODE (XEXP (op, 0)));
755 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
756 do nothing. */
757 if (GET_MODE (op) == VOIDmode)
758 break;
760 /* If operand is something known to be positive, ignore the ABS. */
761 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
762 || ((GET_MODE_BITSIZE (GET_MODE (op))
763 <= HOST_BITS_PER_WIDE_INT)
764 && ((nonzero_bits (op, GET_MODE (op))
765 & ((HOST_WIDE_INT) 1
766 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
767 == 0)))
768 return op;
770 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
771 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
772 return gen_rtx_NEG (mode, op);
774 break;
776 case FFS:
777 /* (ffs (*_extend <X>)) = (ffs <X>) */
778 if (GET_CODE (op) == SIGN_EXTEND
779 || GET_CODE (op) == ZERO_EXTEND)
780 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
781 GET_MODE (XEXP (op, 0)));
782 break;
784 case POPCOUNT:
785 switch (GET_CODE (op))
787 case BSWAP:
788 case ZERO_EXTEND:
789 /* (popcount (zero_extend <X>)) = (popcount <X>) */
790 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
791 GET_MODE (XEXP (op, 0)));
793 case ROTATE:
794 case ROTATERT:
795 /* Rotations don't affect popcount. */
796 if (!side_effects_p (XEXP (op, 1)))
797 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
798 GET_MODE (XEXP (op, 0)));
799 break;
801 default:
802 break;
804 break;
806 case PARITY:
807 switch (GET_CODE (op))
809 case NOT:
810 case BSWAP:
811 case ZERO_EXTEND:
812 case SIGN_EXTEND:
813 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
814 GET_MODE (XEXP (op, 0)));
816 case ROTATE:
817 case ROTATERT:
818 /* Rotations don't affect parity. */
819 if (!side_effects_p (XEXP (op, 1)))
820 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
821 GET_MODE (XEXP (op, 0)));
822 break;
824 default:
825 break;
827 break;
829 case BSWAP:
830 /* (bswap (bswap x)) -> x. */
831 if (GET_CODE (op) == BSWAP)
832 return XEXP (op, 0);
833 break;
835 case FLOAT:
836 /* (float (sign_extend <X>)) = (float <X>). */
837 if (GET_CODE (op) == SIGN_EXTEND)
838 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
839 GET_MODE (XEXP (op, 0)));
840 break;
842 case SIGN_EXTEND:
843 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
844 becomes just the MINUS if its mode is MODE. This allows
845 folding switch statements on machines using casesi (such as
846 the VAX). */
847 if (GET_CODE (op) == TRUNCATE
848 && GET_MODE (XEXP (op, 0)) == mode
849 && GET_CODE (XEXP (op, 0)) == MINUS
850 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
851 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
852 return XEXP (op, 0);
854 /* Check for a sign extension of a subreg of a promoted
855 variable, where the promotion is sign-extended, and the
856 target mode is the same as the variable's promotion. */
857 if (GET_CODE (op) == SUBREG
858 && SUBREG_PROMOTED_VAR_P (op)
859 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
860 && GET_MODE (XEXP (op, 0)) == mode)
861 return XEXP (op, 0);
863 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
864 if (! POINTERS_EXTEND_UNSIGNED
865 && mode == Pmode && GET_MODE (op) == ptr_mode
866 && (CONSTANT_P (op)
867 || (GET_CODE (op) == SUBREG
868 && REG_P (SUBREG_REG (op))
869 && REG_POINTER (SUBREG_REG (op))
870 && GET_MODE (SUBREG_REG (op)) == Pmode)))
871 return convert_memory_address (Pmode, op);
872 #endif
873 break;
875 case ZERO_EXTEND:
876 /* Check for a zero extension of a subreg of a promoted
877 variable, where the promotion is zero-extended, and the
878 target mode is the same as the variable's promotion. */
879 if (GET_CODE (op) == SUBREG
880 && SUBREG_PROMOTED_VAR_P (op)
881 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
882 && GET_MODE (XEXP (op, 0)) == mode)
883 return XEXP (op, 0);
885 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
886 if (POINTERS_EXTEND_UNSIGNED > 0
887 && mode == Pmode && GET_MODE (op) == ptr_mode
888 && (CONSTANT_P (op)
889 || (GET_CODE (op) == SUBREG
890 && REG_P (SUBREG_REG (op))
891 && REG_POINTER (SUBREG_REG (op))
892 && GET_MODE (SUBREG_REG (op)) == Pmode)))
893 return convert_memory_address (Pmode, op);
894 #endif
895 break;
897 default:
898 break;
901 return 0;
904 /* Try to compute the value of a unary operation CODE whose output mode is to
905 be MODE with input operand OP whose mode was originally OP_MODE.
906 Return zero if the value cannot be computed. */
908 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
909 rtx op, enum machine_mode op_mode)
911 unsigned int width = GET_MODE_BITSIZE (mode);
913 if (code == VEC_DUPLICATE)
915 gcc_assert (VECTOR_MODE_P (mode));
916 if (GET_MODE (op) != VOIDmode)
918 if (!VECTOR_MODE_P (GET_MODE (op)))
919 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
920 else
921 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
922 (GET_MODE (op)));
924 if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
925 || GET_CODE (op) == CONST_VECTOR)
927 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
928 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
929 rtvec v = rtvec_alloc (n_elts);
930 unsigned int i;
932 if (GET_CODE (op) != CONST_VECTOR)
933 for (i = 0; i < n_elts; i++)
934 RTVEC_ELT (v, i) = op;
935 else
937 enum machine_mode inmode = GET_MODE (op);
938 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
939 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
941 gcc_assert (in_n_elts < n_elts);
942 gcc_assert ((n_elts % in_n_elts) == 0);
943 for (i = 0; i < n_elts; i++)
944 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
946 return gen_rtx_CONST_VECTOR (mode, v);
950 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
952 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
953 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
954 enum machine_mode opmode = GET_MODE (op);
955 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
956 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
957 rtvec v = rtvec_alloc (n_elts);
958 unsigned int i;
960 gcc_assert (op_n_elts == n_elts);
961 for (i = 0; i < n_elts; i++)
963 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
964 CONST_VECTOR_ELT (op, i),
965 GET_MODE_INNER (opmode));
966 if (!x)
967 return 0;
968 RTVEC_ELT (v, i) = x;
970 return gen_rtx_CONST_VECTOR (mode, v);
973 /* The order of these tests is critical so that, for example, we don't
974 check the wrong mode (input vs. output) for a conversion operation,
975 such as FIX. At some point, this should be simplified. */
977 if (code == FLOAT && GET_MODE (op) == VOIDmode
978 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
980 HOST_WIDE_INT hv, lv;
981 REAL_VALUE_TYPE d;
983 if (GET_CODE (op) == CONST_INT)
984 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
985 else
986 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
988 REAL_VALUE_FROM_INT (d, lv, hv, mode);
989 d = real_value_truncate (mode, d);
990 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
992 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
993 && (GET_CODE (op) == CONST_DOUBLE
994 || GET_CODE (op) == CONST_INT))
996 HOST_WIDE_INT hv, lv;
997 REAL_VALUE_TYPE d;
999 if (GET_CODE (op) == CONST_INT)
1000 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1001 else
1002 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1004 if (op_mode == VOIDmode)
1006 /* We don't know how to interpret negative-looking numbers in
1007 this case, so don't try to fold those. */
1008 if (hv < 0)
1009 return 0;
1011 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
1013 else
1014 hv = 0, lv &= GET_MODE_MASK (op_mode);
1016 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1017 d = real_value_truncate (mode, d);
1018 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1021 if (GET_CODE (op) == CONST_INT
1022 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1024 HOST_WIDE_INT arg0 = INTVAL (op);
1025 HOST_WIDE_INT val;
1027 switch (code)
1029 case NOT:
1030 val = ~ arg0;
1031 break;
1033 case NEG:
1034 val = - arg0;
1035 break;
1037 case ABS:
1038 val = (arg0 >= 0 ? arg0 : - arg0);
1039 break;
1041 case FFS:
1042 /* Don't use ffs here. Instead, get low order bit and then its
1043 number. If arg0 is zero, this will return 0, as desired. */
1044 arg0 &= GET_MODE_MASK (mode);
1045 val = exact_log2 (arg0 & (- arg0)) + 1;
1046 break;
1048 case CLZ:
1049 arg0 &= GET_MODE_MASK (mode);
1050 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1052 else
1053 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1054 break;
1056 case CTZ:
1057 arg0 &= GET_MODE_MASK (mode);
1058 if (arg0 == 0)
1060 /* Even if the value at zero is undefined, we have to come
1061 up with some replacement. Seems good enough. */
1062 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1063 val = GET_MODE_BITSIZE (mode);
1065 else
1066 val = exact_log2 (arg0 & -arg0);
1067 break;
1069 case POPCOUNT:
1070 arg0 &= GET_MODE_MASK (mode);
1071 val = 0;
1072 while (arg0)
1073 val++, arg0 &= arg0 - 1;
1074 break;
1076 case PARITY:
1077 arg0 &= GET_MODE_MASK (mode);
1078 val = 0;
1079 while (arg0)
1080 val++, arg0 &= arg0 - 1;
1081 val &= 1;
1082 break;
1084 case BSWAP:
1086 unsigned int s;
1088 val = 0;
1089 for (s = 0; s < width; s += 8)
1091 unsigned int d = width - s - 8;
1092 unsigned HOST_WIDE_INT byte;
1093 byte = (arg0 >> s) & 0xff;
1094 val |= byte << d;
1097 break;
1099 case TRUNCATE:
1100 val = arg0;
1101 break;
1103 case ZERO_EXTEND:
1104 /* When zero-extending a CONST_INT, we need to know its
1105 original mode. */
1106 gcc_assert (op_mode != VOIDmode);
1107 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1109 /* If we were really extending the mode,
1110 we would have to distinguish between zero-extension
1111 and sign-extension. */
1112 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1113 val = arg0;
1115 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1116 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1117 else
1118 return 0;
1119 break;
1121 case SIGN_EXTEND:
1122 if (op_mode == VOIDmode)
1123 op_mode = mode;
1124 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1126 /* If we were really extending the mode,
1127 we would have to distinguish between zero-extension
1128 and sign-extension. */
1129 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1130 val = arg0;
1132 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1135 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1136 if (val
1137 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1138 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1140 else
1141 return 0;
1142 break;
1144 case SQRT:
1145 case FLOAT_EXTEND:
1146 case FLOAT_TRUNCATE:
1147 case SS_TRUNCATE:
1148 case US_TRUNCATE:
1149 case SS_NEG:
1150 return 0;
1152 default:
1153 gcc_unreachable ();
1156 return gen_int_mode (val, mode);
1159 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1160 for a DImode operation on a CONST_INT. */
1161 else if (GET_MODE (op) == VOIDmode
1162 && width <= HOST_BITS_PER_WIDE_INT * 2
1163 && (GET_CODE (op) == CONST_DOUBLE
1164 || GET_CODE (op) == CONST_INT))
1166 unsigned HOST_WIDE_INT l1, lv;
1167 HOST_WIDE_INT h1, hv;
1169 if (GET_CODE (op) == CONST_DOUBLE)
1170 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1171 else
1172 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1174 switch (code)
1176 case NOT:
1177 lv = ~ l1;
1178 hv = ~ h1;
1179 break;
1181 case NEG:
1182 neg_double (l1, h1, &lv, &hv);
1183 break;
1185 case ABS:
1186 if (h1 < 0)
1187 neg_double (l1, h1, &lv, &hv);
1188 else
1189 lv = l1, hv = h1;
1190 break;
1192 case FFS:
1193 hv = 0;
1194 if (l1 == 0)
1196 if (h1 == 0)
1197 lv = 0;
1198 else
1199 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1201 else
1202 lv = exact_log2 (l1 & -l1) + 1;
1203 break;
1205 case CLZ:
1206 hv = 0;
1207 if (h1 != 0)
1208 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1209 - HOST_BITS_PER_WIDE_INT;
1210 else if (l1 != 0)
1211 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1212 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1213 lv = GET_MODE_BITSIZE (mode);
1214 break;
1216 case CTZ:
1217 hv = 0;
1218 if (l1 != 0)
1219 lv = exact_log2 (l1 & -l1);
1220 else if (h1 != 0)
1221 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1222 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1223 lv = GET_MODE_BITSIZE (mode);
1224 break;
1226 case POPCOUNT:
1227 hv = 0;
1228 lv = 0;
1229 while (l1)
1230 lv++, l1 &= l1 - 1;
1231 while (h1)
1232 lv++, h1 &= h1 - 1;
1233 break;
1235 case PARITY:
1236 hv = 0;
1237 lv = 0;
1238 while (l1)
1239 lv++, l1 &= l1 - 1;
1240 while (h1)
1241 lv++, h1 &= h1 - 1;
1242 lv &= 1;
1243 break;
1245 case BSWAP:
1247 unsigned int s;
1249 hv = 0;
1250 lv = 0;
1251 for (s = 0; s < width; s += 8)
1253 unsigned int d = width - s - 8;
1254 unsigned HOST_WIDE_INT byte;
1256 if (s < HOST_BITS_PER_WIDE_INT)
1257 byte = (l1 >> s) & 0xff;
1258 else
1259 byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1261 if (d < HOST_BITS_PER_WIDE_INT)
1262 lv |= byte << d;
1263 else
1264 hv |= byte << (d - HOST_BITS_PER_WIDE_INT);
1267 break;
1269 case TRUNCATE:
1270 /* This is just a change-of-mode, so do nothing. */
1271 lv = l1, hv = h1;
1272 break;
1274 case ZERO_EXTEND:
1275 gcc_assert (op_mode != VOIDmode);
1277 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1278 return 0;
1280 hv = 0;
1281 lv = l1 & GET_MODE_MASK (op_mode);
1282 break;
1284 case SIGN_EXTEND:
1285 if (op_mode == VOIDmode
1286 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1287 return 0;
1288 else
1290 lv = l1 & GET_MODE_MASK (op_mode);
1291 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1292 && (lv & ((HOST_WIDE_INT) 1
1293 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1294 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1296 hv = HWI_SIGN_EXTEND (lv);
1298 break;
1300 case SQRT:
1301 return 0;
1303 default:
1304 return 0;
1307 return immed_double_const (lv, hv, mode);
1310 else if (GET_CODE (op) == CONST_DOUBLE
1311 && SCALAR_FLOAT_MODE_P (mode))
1313 REAL_VALUE_TYPE d, t;
1314 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1316 switch (code)
1318 case SQRT:
1319 if (HONOR_SNANS (mode) && real_isnan (&d))
1320 return 0;
1321 real_sqrt (&t, mode, &d);
1322 d = t;
1323 break;
1324 case ABS:
1325 d = REAL_VALUE_ABS (d);
1326 break;
1327 case NEG:
1328 d = REAL_VALUE_NEGATE (d);
1329 break;
1330 case FLOAT_TRUNCATE:
1331 d = real_value_truncate (mode, d);
1332 break;
1333 case FLOAT_EXTEND:
1334 /* All this does is change the mode. */
1335 break;
1336 case FIX:
1337 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1338 break;
1339 case NOT:
1341 long tmp[4];
1342 int i;
1344 real_to_target (tmp, &d, GET_MODE (op));
1345 for (i = 0; i < 4; i++)
1346 tmp[i] = ~tmp[i];
1347 real_from_target (&d, tmp, mode);
1348 break;
1350 default:
1351 gcc_unreachable ();
1353 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1356 else if (GET_CODE (op) == CONST_DOUBLE
1357 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1358 && GET_MODE_CLASS (mode) == MODE_INT
1359 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1361 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1362 operators are intentionally left unspecified (to ease implementation
1363 by target backends), for consistency, this routine implements the
1364 same semantics for constant folding as used by the middle-end. */
1366 /* This was formerly used only for non-IEEE float.
1367 eggert@twinsun.com says it is safe for IEEE also. */
1368 HOST_WIDE_INT xh, xl, th, tl;
1369 REAL_VALUE_TYPE x, t;
1370 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1371 switch (code)
1373 case FIX:
1374 if (REAL_VALUE_ISNAN (x))
1375 return const0_rtx;
1377 /* Test against the signed upper bound. */
1378 if (width > HOST_BITS_PER_WIDE_INT)
1380 th = ((unsigned HOST_WIDE_INT) 1
1381 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1382 tl = -1;
1384 else
1386 th = 0;
1387 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1389 real_from_integer (&t, VOIDmode, tl, th, 0);
1390 if (REAL_VALUES_LESS (t, x))
1392 xh = th;
1393 xl = tl;
1394 break;
1397 /* Test against the signed lower bound. */
1398 if (width > HOST_BITS_PER_WIDE_INT)
1400 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1401 tl = 0;
1403 else
1405 th = -1;
1406 tl = (HOST_WIDE_INT) -1 << (width - 1);
1408 real_from_integer (&t, VOIDmode, tl, th, 0);
1409 if (REAL_VALUES_LESS (x, t))
1411 xh = th;
1412 xl = tl;
1413 break;
1415 REAL_VALUE_TO_INT (&xl, &xh, x);
1416 break;
1418 case UNSIGNED_FIX:
1419 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1420 return const0_rtx;
1422 /* Test against the unsigned upper bound. */
1423 if (width == 2*HOST_BITS_PER_WIDE_INT)
1425 th = -1;
1426 tl = -1;
1428 else if (width >= HOST_BITS_PER_WIDE_INT)
1430 th = ((unsigned HOST_WIDE_INT) 1
1431 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1432 tl = -1;
1434 else
1436 th = 0;
1437 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1439 real_from_integer (&t, VOIDmode, tl, th, 1);
1440 if (REAL_VALUES_LESS (t, x))
1442 xh = th;
1443 xl = tl;
1444 break;
1447 REAL_VALUE_TO_INT (&xl, &xh, x);
1448 break;
1450 default:
1451 gcc_unreachable ();
1453 return immed_double_const (xl, xh, mode);
1456 return NULL_RTX;
1459 /* Subroutine of simplify_binary_operation to simplify a commutative,
1460 associative binary operation CODE with result mode MODE, operating
1461 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1462 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1463 canonicalization is possible. */
1465 static rtx
1466 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1467 rtx op0, rtx op1)
1469 rtx tem;
1471 /* Linearize the operator to the left. */
1472 if (GET_CODE (op1) == code)
1474 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1475 if (GET_CODE (op0) == code)
1477 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1478 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1481 /* "a op (b op c)" becomes "(b op c) op a". */
1482 if (! swap_commutative_operands_p (op1, op0))
1483 return simplify_gen_binary (code, mode, op1, op0);
1485 tem = op0;
1486 op0 = op1;
1487 op1 = tem;
1490 if (GET_CODE (op0) == code)
1492 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1493 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1495 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1496 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1499 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1500 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1501 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1502 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1503 if (tem != 0)
1504 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1506 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1507 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1508 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1509 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1510 if (tem != 0)
1511 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1514 return 0;
1518 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1519 and OP1. Return 0 if no simplification is possible.
1521 Don't use this for relational operations such as EQ or LT.
1522 Use simplify_relational_operation instead. */
1524 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1525 rtx op0, rtx op1)
1527 rtx trueop0, trueop1;
1528 rtx tem;
1530 /* Relational operations don't work here. We must know the mode
1531 of the operands in order to do the comparison correctly.
1532 Assuming a full word can give incorrect results.
1533 Consider comparing 128 with -128 in QImode. */
1534 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1535 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1537 /* Make sure the constant is second. */
1538 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1539 && swap_commutative_operands_p (op0, op1))
1541 tem = op0, op0 = op1, op1 = tem;
1544 trueop0 = avoid_constant_pool_reference (op0);
1545 trueop1 = avoid_constant_pool_reference (op1);
1547 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1548 if (tem)
1549 return tem;
1550 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1553 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1554 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1555 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1556 actual constants. */
1558 static rtx
1559 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1560 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1562 rtx tem, reversed, opleft, opright;
1563 HOST_WIDE_INT val;
1564 unsigned int width = GET_MODE_BITSIZE (mode);
1566 /* Even if we can't compute a constant result,
1567 there are some cases worth simplifying. */
1569 switch (code)
1571 case PLUS:
1572 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1573 when x is NaN, infinite, or finite and nonzero. They aren't
1574 when x is -0 and the rounding mode is not towards -infinity,
1575 since (-0) + 0 is then 0. */
1576 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1577 return op0;
1579 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1580 transformations are safe even for IEEE. */
1581 if (GET_CODE (op0) == NEG)
1582 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1583 else if (GET_CODE (op1) == NEG)
1584 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1586 /* (~a) + 1 -> -a */
1587 if (INTEGRAL_MODE_P (mode)
1588 && GET_CODE (op0) == NOT
1589 && trueop1 == const1_rtx)
1590 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1592 /* Handle both-operands-constant cases. We can only add
1593 CONST_INTs to constants since the sum of relocatable symbols
1594 can't be handled by most assemblers. Don't add CONST_INT
1595 to CONST_INT since overflow won't be computed properly if wider
1596 than HOST_BITS_PER_WIDE_INT. */
1598 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1599 && GET_CODE (op1) == CONST_INT)
1600 return plus_constant (op0, INTVAL (op1));
1601 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1602 && GET_CODE (op0) == CONST_INT)
1603 return plus_constant (op1, INTVAL (op0));
1605 /* See if this is something like X * C - X or vice versa or
1606 if the multiplication is written as a shift. If so, we can
1607 distribute and make a new multiply, shift, or maybe just
1608 have X (if C is 2 in the example above). But don't make
1609 something more expensive than we had before. */
1611 if (SCALAR_INT_MODE_P (mode))
1613 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1614 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1615 rtx lhs = op0, rhs = op1;
1617 if (GET_CODE (lhs) == NEG)
1619 coeff0l = -1;
1620 coeff0h = -1;
1621 lhs = XEXP (lhs, 0);
1623 else if (GET_CODE (lhs) == MULT
1624 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1626 coeff0l = INTVAL (XEXP (lhs, 1));
1627 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1628 lhs = XEXP (lhs, 0);
1630 else if (GET_CODE (lhs) == ASHIFT
1631 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1632 && INTVAL (XEXP (lhs, 1)) >= 0
1633 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1635 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1636 coeff0h = 0;
1637 lhs = XEXP (lhs, 0);
1640 if (GET_CODE (rhs) == NEG)
1642 coeff1l = -1;
1643 coeff1h = -1;
1644 rhs = XEXP (rhs, 0);
1646 else if (GET_CODE (rhs) == MULT
1647 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1649 coeff1l = INTVAL (XEXP (rhs, 1));
1650 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1651 rhs = XEXP (rhs, 0);
1653 else if (GET_CODE (rhs) == ASHIFT
1654 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1655 && INTVAL (XEXP (rhs, 1)) >= 0
1656 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1658 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1659 coeff1h = 0;
1660 rhs = XEXP (rhs, 0);
1663 if (rtx_equal_p (lhs, rhs))
1665 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1666 rtx coeff;
1667 unsigned HOST_WIDE_INT l;
1668 HOST_WIDE_INT h;
1670 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1671 coeff = immed_double_const (l, h, mode);
1673 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1674 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1675 ? tem : 0;
1679 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1680 if ((GET_CODE (op1) == CONST_INT
1681 || GET_CODE (op1) == CONST_DOUBLE)
1682 && GET_CODE (op0) == XOR
1683 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1684 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1685 && mode_signbit_p (mode, op1))
1686 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1687 simplify_gen_binary (XOR, mode, op1,
1688 XEXP (op0, 1)));
1690 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1691 if (GET_CODE (op0) == MULT
1692 && GET_CODE (XEXP (op0, 0)) == NEG)
1694 rtx in1, in2;
1696 in1 = XEXP (XEXP (op0, 0), 0);
1697 in2 = XEXP (op0, 1);
1698 return simplify_gen_binary (MINUS, mode, op1,
1699 simplify_gen_binary (MULT, mode,
1700 in1, in2));
1703 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1704 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1705 is 1. */
1706 if (COMPARISON_P (op0)
1707 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1708 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1709 && (reversed = reversed_comparison (op0, mode)))
1710 return
1711 simplify_gen_unary (NEG, mode, reversed, mode);
1713 /* If one of the operands is a PLUS or a MINUS, see if we can
1714 simplify this by the associative law.
1715 Don't use the associative law for floating point.
1716 The inaccuracy makes it nonassociative,
1717 and subtle programs can break if operations are associated. */
1719 if (INTEGRAL_MODE_P (mode)
1720 && (plus_minus_operand_p (op0)
1721 || plus_minus_operand_p (op1))
1722 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1723 return tem;
1725 /* Reassociate floating point addition only when the user
1726 specifies unsafe math optimizations. */
1727 if (FLOAT_MODE_P (mode)
1728 && flag_unsafe_math_optimizations)
1730 tem = simplify_associative_operation (code, mode, op0, op1);
1731 if (tem)
1732 return tem;
1734 break;
1736 case COMPARE:
1737 #ifdef HAVE_cc0
1738 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1739 using cc0, in which case we want to leave it as a COMPARE
1740 so we can distinguish it from a register-register-copy.
1742 In IEEE floating point, x-0 is not the same as x. */
1744 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1745 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1746 && trueop1 == CONST0_RTX (mode))
1747 return op0;
1748 #endif
1750 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1751 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1752 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1753 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1755 rtx xop00 = XEXP (op0, 0);
1756 rtx xop10 = XEXP (op1, 0);
1758 #ifdef HAVE_cc0
1759 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1760 #else
1761 if (REG_P (xop00) && REG_P (xop10)
1762 && GET_MODE (xop00) == GET_MODE (xop10)
1763 && REGNO (xop00) == REGNO (xop10)
1764 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1765 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1766 #endif
1767 return xop00;
1769 break;
1771 case MINUS:
1772 /* We can't assume x-x is 0 even with non-IEEE floating point,
1773 but since it is zero except in very strange circumstances, we
1774 will treat it as zero with -funsafe-math-optimizations. */
1775 if (rtx_equal_p (trueop0, trueop1)
1776 && ! side_effects_p (op0)
1777 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1778 return CONST0_RTX (mode);
1780 /* Change subtraction from zero into negation. (0 - x) is the
1781 same as -x when x is NaN, infinite, or finite and nonzero.
1782 But if the mode has signed zeros, and does not round towards
1783 -infinity, then 0 - 0 is 0, not -0. */
1784 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1785 return simplify_gen_unary (NEG, mode, op1, mode);
1787 /* (-1 - a) is ~a. */
1788 if (trueop0 == constm1_rtx)
1789 return simplify_gen_unary (NOT, mode, op1, mode);
1791 /* Subtracting 0 has no effect unless the mode has signed zeros
1792 and supports rounding towards -infinity. In such a case,
1793 0 - 0 is -0. */
1794 if (!(HONOR_SIGNED_ZEROS (mode)
1795 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1796 && trueop1 == CONST0_RTX (mode))
1797 return op0;
1799 /* See if this is something like X * C - X or vice versa or
1800 if the multiplication is written as a shift. If so, we can
1801 distribute and make a new multiply, shift, or maybe just
1802 have X (if C is 2 in the example above). But don't make
1803 something more expensive than we had before. */
1805 if (SCALAR_INT_MODE_P (mode))
1807 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1808 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1809 rtx lhs = op0, rhs = op1;
1811 if (GET_CODE (lhs) == NEG)
1813 coeff0l = -1;
1814 coeff0h = -1;
1815 lhs = XEXP (lhs, 0);
1817 else if (GET_CODE (lhs) == MULT
1818 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1820 coeff0l = INTVAL (XEXP (lhs, 1));
1821 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1822 lhs = XEXP (lhs, 0);
1824 else if (GET_CODE (lhs) == ASHIFT
1825 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1826 && INTVAL (XEXP (lhs, 1)) >= 0
1827 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1829 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1830 coeff0h = 0;
1831 lhs = XEXP (lhs, 0);
1834 if (GET_CODE (rhs) == NEG)
1836 negcoeff1l = 1;
1837 negcoeff1h = 0;
1838 rhs = XEXP (rhs, 0);
1840 else if (GET_CODE (rhs) == MULT
1841 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1843 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1844 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1845 rhs = XEXP (rhs, 0);
1847 else if (GET_CODE (rhs) == ASHIFT
1848 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1849 && INTVAL (XEXP (rhs, 1)) >= 0
1850 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1852 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1853 negcoeff1h = -1;
1854 rhs = XEXP (rhs, 0);
1857 if (rtx_equal_p (lhs, rhs))
1859 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1860 rtx coeff;
1861 unsigned HOST_WIDE_INT l;
1862 HOST_WIDE_INT h;
1864 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1865 coeff = immed_double_const (l, h, mode);
1867 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1868 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1869 ? tem : 0;
1873 /* (a - (-b)) -> (a + b). True even for IEEE. */
1874 if (GET_CODE (op1) == NEG)
1875 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1877 /* (-x - c) may be simplified as (-c - x). */
1878 if (GET_CODE (op0) == NEG
1879 && (GET_CODE (op1) == CONST_INT
1880 || GET_CODE (op1) == CONST_DOUBLE))
1882 tem = simplify_unary_operation (NEG, mode, op1, mode);
1883 if (tem)
1884 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1887 /* Don't let a relocatable value get a negative coeff. */
1888 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1889 return simplify_gen_binary (PLUS, mode,
1890 op0,
1891 neg_const_int (mode, op1));
1893 /* (x - (x & y)) -> (x & ~y) */
1894 if (GET_CODE (op1) == AND)
1896 if (rtx_equal_p (op0, XEXP (op1, 0)))
1898 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1899 GET_MODE (XEXP (op1, 1)));
1900 return simplify_gen_binary (AND, mode, op0, tem);
1902 if (rtx_equal_p (op0, XEXP (op1, 1)))
1904 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1905 GET_MODE (XEXP (op1, 0)));
1906 return simplify_gen_binary (AND, mode, op0, tem);
1910 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1911 by reversing the comparison code if valid. */
1912 if (STORE_FLAG_VALUE == 1
1913 && trueop0 == const1_rtx
1914 && COMPARISON_P (op1)
1915 && (reversed = reversed_comparison (op1, mode)))
1916 return reversed;
1918 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
1919 if (GET_CODE (op1) == MULT
1920 && GET_CODE (XEXP (op1, 0)) == NEG)
1922 rtx in1, in2;
1924 in1 = XEXP (XEXP (op1, 0), 0);
1925 in2 = XEXP (op1, 1);
1926 return simplify_gen_binary (PLUS, mode,
1927 simplify_gen_binary (MULT, mode,
1928 in1, in2),
1929 op0);
1932 /* Canonicalize (minus (neg A) (mult B C)) to
1933 (minus (mult (neg B) C) A). */
1934 if (GET_CODE (op1) == MULT
1935 && GET_CODE (op0) == NEG)
1937 rtx in1, in2;
1939 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
1940 in2 = XEXP (op1, 1);
1941 return simplify_gen_binary (MINUS, mode,
1942 simplify_gen_binary (MULT, mode,
1943 in1, in2),
1944 XEXP (op0, 0));
1947 /* If one of the operands is a PLUS or a MINUS, see if we can
1948 simplify this by the associative law. This will, for example,
1949 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1950 Don't use the associative law for floating point.
1951 The inaccuracy makes it nonassociative,
1952 and subtle programs can break if operations are associated. */
1954 if (INTEGRAL_MODE_P (mode)
1955 && (plus_minus_operand_p (op0)
1956 || plus_minus_operand_p (op1))
1957 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1958 return tem;
1959 break;
1961 case MULT:
1962 if (trueop1 == constm1_rtx)
1963 return simplify_gen_unary (NEG, mode, op0, mode);
1965 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1966 x is NaN, since x * 0 is then also NaN. Nor is it valid
1967 when the mode has signed zeros, since multiplying a negative
1968 number by 0 will give -0, not 0. */
1969 if (!HONOR_NANS (mode)
1970 && !HONOR_SIGNED_ZEROS (mode)
1971 && trueop1 == CONST0_RTX (mode)
1972 && ! side_effects_p (op0))
1973 return op1;
1975 /* In IEEE floating point, x*1 is not equivalent to x for
1976 signalling NaNs. */
1977 if (!HONOR_SNANS (mode)
1978 && trueop1 == CONST1_RTX (mode))
1979 return op0;
1981 /* Convert multiply by constant power of two into shift unless
1982 we are still generating RTL. This test is a kludge. */
1983 if (GET_CODE (trueop1) == CONST_INT
1984 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1985 /* If the mode is larger than the host word size, and the
1986 uppermost bit is set, then this isn't a power of two due
1987 to implicit sign extension. */
1988 && (width <= HOST_BITS_PER_WIDE_INT
1989 || val != HOST_BITS_PER_WIDE_INT - 1))
1990 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1992 /* Likewise for multipliers wider than a word. */
1993 if (GET_CODE (trueop1) == CONST_DOUBLE
1994 && (GET_MODE (trueop1) == VOIDmode
1995 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
1996 && GET_MODE (op0) == mode
1997 && CONST_DOUBLE_LOW (trueop1) == 0
1998 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
1999 return simplify_gen_binary (ASHIFT, mode, op0,
2000 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2002 /* x*2 is x+x and x*(-1) is -x */
2003 if (GET_CODE (trueop1) == CONST_DOUBLE
2004 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2005 && GET_MODE (op0) == mode)
2007 REAL_VALUE_TYPE d;
2008 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2010 if (REAL_VALUES_EQUAL (d, dconst2))
2011 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2013 if (!HONOR_SNANS (mode)
2014 && REAL_VALUES_EQUAL (d, dconstm1))
2015 return simplify_gen_unary (NEG, mode, op0, mode);
2018 /* Optimize -x * -x as x * x. */
2019 if (FLOAT_MODE_P (mode)
2020 && GET_CODE (op0) == NEG
2021 && GET_CODE (op1) == NEG
2022 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2023 && !side_effects_p (XEXP (op0, 0)))
2024 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2026 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2027 if (SCALAR_FLOAT_MODE_P (mode)
2028 && GET_CODE (op0) == ABS
2029 && GET_CODE (op1) == ABS
2030 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2031 && !side_effects_p (XEXP (op0, 0)))
2032 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2034 /* Reassociate multiplication, but for floating point MULTs
2035 only when the user specifies unsafe math optimizations. */
2036 if (! FLOAT_MODE_P (mode)
2037 || flag_unsafe_math_optimizations)
2039 tem = simplify_associative_operation (code, mode, op0, op1);
2040 if (tem)
2041 return tem;
2043 break;
2045 case IOR:
2046 if (trueop1 == const0_rtx)
2047 return op0;
2048 if (GET_CODE (trueop1) == CONST_INT
2049 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2050 == GET_MODE_MASK (mode)))
2051 return op1;
2052 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2053 return op0;
2054 /* A | (~A) -> -1 */
2055 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2056 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2057 && ! side_effects_p (op0)
2058 && SCALAR_INT_MODE_P (mode))
2059 return constm1_rtx;
2061 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2062 if (GET_CODE (op1) == CONST_INT
2063 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2064 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
2065 return op1;
2067 /* Canonicalize (X & C1) | C2. */
2068 if (GET_CODE (op0) == AND
2069 && GET_CODE (trueop1) == CONST_INT
2070 && GET_CODE (XEXP (op0, 1)) == CONST_INT)
2072 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2073 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2074 HOST_WIDE_INT c2 = INTVAL (trueop1);
2076 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2077 if ((c1 & c2) == c1
2078 && !side_effects_p (XEXP (op0, 0)))
2079 return trueop1;
2081 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2082 if (((c1|c2) & mask) == mask)
2083 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2085 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2086 if (((c1 & ~c2) & mask) != (c1 & mask))
2088 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2089 gen_int_mode (c1 & ~c2, mode));
2090 return simplify_gen_binary (IOR, mode, tem, op1);
2094 /* Convert (A & B) | A to A. */
2095 if (GET_CODE (op0) == AND
2096 && (rtx_equal_p (XEXP (op0, 0), op1)
2097 || rtx_equal_p (XEXP (op0, 1), op1))
2098 && ! side_effects_p (XEXP (op0, 0))
2099 && ! side_effects_p (XEXP (op0, 1)))
2100 return op1;
2102 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2103 mode size to (rotate A CX). */
2105 if (GET_CODE (op1) == ASHIFT
2106 || GET_CODE (op1) == SUBREG)
2108 opleft = op1;
2109 opright = op0;
2111 else
2113 opright = op1;
2114 opleft = op0;
2117 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2118 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2119 && GET_CODE (XEXP (opleft, 1)) == CONST_INT
2120 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2121 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2122 == GET_MODE_BITSIZE (mode)))
2123 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2125 /* Same, but for ashift that has been "simplified" to a wider mode
2126 by simplify_shift_const. */
2128 if (GET_CODE (opleft) == SUBREG
2129 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2130 && GET_CODE (opright) == LSHIFTRT
2131 && GET_CODE (XEXP (opright, 0)) == SUBREG
2132 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2133 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2134 && (GET_MODE_SIZE (GET_MODE (opleft))
2135 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2136 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2137 SUBREG_REG (XEXP (opright, 0)))
2138 && GET_CODE (XEXP (SUBREG_REG (opleft), 1)) == CONST_INT
2139 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2140 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2141 == GET_MODE_BITSIZE (mode)))
2142 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2143 XEXP (SUBREG_REG (opleft), 1));
2145 /* If we have (ior (and (X C1) C2)), simplify this by making
2146 C1 as small as possible if C1 actually changes. */
2147 if (GET_CODE (op1) == CONST_INT
2148 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2149 || INTVAL (op1) > 0)
2150 && GET_CODE (op0) == AND
2151 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2152 && GET_CODE (op1) == CONST_INT
2153 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2154 return simplify_gen_binary (IOR, mode,
2155 simplify_gen_binary
2156 (AND, mode, XEXP (op0, 0),
2157 GEN_INT (INTVAL (XEXP (op0, 1))
2158 & ~INTVAL (op1))),
2159 op1);
2161 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2162 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2163 the PLUS does not affect any of the bits in OP1: then we can do
2164 the IOR as a PLUS and we can associate. This is valid if OP1
2165 can be safely shifted left C bits. */
2166 if (GET_CODE (trueop1) == CONST_INT && GET_CODE (op0) == ASHIFTRT
2167 && GET_CODE (XEXP (op0, 0)) == PLUS
2168 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
2169 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2170 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2172 int count = INTVAL (XEXP (op0, 1));
2173 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2175 if (mask >> count == INTVAL (trueop1)
2176 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2177 return simplify_gen_binary (ASHIFTRT, mode,
2178 plus_constant (XEXP (op0, 0), mask),
2179 XEXP (op0, 1));
2182 tem = simplify_associative_operation (code, mode, op0, op1);
2183 if (tem)
2184 return tem;
2185 break;
2187 case XOR:
2188 if (trueop1 == const0_rtx)
2189 return op0;
2190 if (GET_CODE (trueop1) == CONST_INT
2191 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2192 == GET_MODE_MASK (mode)))
2193 return simplify_gen_unary (NOT, mode, op0, mode);
2194 if (rtx_equal_p (trueop0, trueop1)
2195 && ! side_effects_p (op0)
2196 && GET_MODE_CLASS (mode) != MODE_CC)
2197 return CONST0_RTX (mode);
2199 /* Canonicalize XOR of the most significant bit to PLUS. */
2200 if ((GET_CODE (op1) == CONST_INT
2201 || GET_CODE (op1) == CONST_DOUBLE)
2202 && mode_signbit_p (mode, op1))
2203 return simplify_gen_binary (PLUS, mode, op0, op1);
2204 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2205 if ((GET_CODE (op1) == CONST_INT
2206 || GET_CODE (op1) == CONST_DOUBLE)
2207 && GET_CODE (op0) == PLUS
2208 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
2209 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2210 && mode_signbit_p (mode, XEXP (op0, 1)))
2211 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2212 simplify_gen_binary (XOR, mode, op1,
2213 XEXP (op0, 1)));
2215 /* If we are XORing two things that have no bits in common,
2216 convert them into an IOR. This helps to detect rotation encoded
2217 using those methods and possibly other simplifications. */
2219 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2220 && (nonzero_bits (op0, mode)
2221 & nonzero_bits (op1, mode)) == 0)
2222 return (simplify_gen_binary (IOR, mode, op0, op1));
2224 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2225 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2226 (NOT y). */
2228 int num_negated = 0;
2230 if (GET_CODE (op0) == NOT)
2231 num_negated++, op0 = XEXP (op0, 0);
2232 if (GET_CODE (op1) == NOT)
2233 num_negated++, op1 = XEXP (op1, 0);
2235 if (num_negated == 2)
2236 return simplify_gen_binary (XOR, mode, op0, op1);
2237 else if (num_negated == 1)
2238 return simplify_gen_unary (NOT, mode,
2239 simplify_gen_binary (XOR, mode, op0, op1),
2240 mode);
2243 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2244 correspond to a machine insn or result in further simplifications
2245 if B is a constant. */
2247 if (GET_CODE (op0) == AND
2248 && rtx_equal_p (XEXP (op0, 1), op1)
2249 && ! side_effects_p (op1))
2250 return simplify_gen_binary (AND, mode,
2251 simplify_gen_unary (NOT, mode,
2252 XEXP (op0, 0), mode),
2253 op1);
2255 else if (GET_CODE (op0) == AND
2256 && rtx_equal_p (XEXP (op0, 0), op1)
2257 && ! side_effects_p (op1))
2258 return simplify_gen_binary (AND, mode,
2259 simplify_gen_unary (NOT, mode,
2260 XEXP (op0, 1), mode),
2261 op1);
2263 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2264 comparison if STORE_FLAG_VALUE is 1. */
2265 if (STORE_FLAG_VALUE == 1
2266 && trueop1 == const1_rtx
2267 && COMPARISON_P (op0)
2268 && (reversed = reversed_comparison (op0, mode)))
2269 return reversed;
2271 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2272 is (lt foo (const_int 0)), so we can perform the above
2273 simplification if STORE_FLAG_VALUE is 1. */
2275 if (STORE_FLAG_VALUE == 1
2276 && trueop1 == const1_rtx
2277 && GET_CODE (op0) == LSHIFTRT
2278 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2279 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2280 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2282 /* (xor (comparison foo bar) (const_int sign-bit))
2283 when STORE_FLAG_VALUE is the sign bit. */
2284 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2285 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2286 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2287 && trueop1 == const_true_rtx
2288 && COMPARISON_P (op0)
2289 && (reversed = reversed_comparison (op0, mode)))
2290 return reversed;
2292 break;
2294 tem = simplify_associative_operation (code, mode, op0, op1);
2295 if (tem)
2296 return tem;
2297 break;
2299 case AND:
2300 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2301 return trueop1;
2302 /* If we are turning off bits already known off in OP0, we need
2303 not do an AND. */
2304 if (GET_CODE (trueop1) == CONST_INT
2305 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2306 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
2307 return op0;
2308 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2309 && GET_MODE_CLASS (mode) != MODE_CC)
2310 return op0;
2311 /* A & (~A) -> 0 */
2312 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2313 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2314 && ! side_effects_p (op0)
2315 && GET_MODE_CLASS (mode) != MODE_CC)
2316 return CONST0_RTX (mode);
2318 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2319 there are no nonzero bits of C outside of X's mode. */
2320 if ((GET_CODE (op0) == SIGN_EXTEND
2321 || GET_CODE (op0) == ZERO_EXTEND)
2322 && GET_CODE (trueop1) == CONST_INT
2323 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2324 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2325 & INTVAL (trueop1)) == 0)
2327 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2328 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2329 gen_int_mode (INTVAL (trueop1),
2330 imode));
2331 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2334 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2335 if (GET_CODE (op0) == IOR
2336 && GET_CODE (trueop1) == CONST_INT
2337 && GET_CODE (XEXP (op0, 1)) == CONST_INT)
2339 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2340 return simplify_gen_binary (IOR, mode,
2341 simplify_gen_binary (AND, mode,
2342 XEXP (op0, 0), op1),
2343 gen_int_mode (tmp, mode));
2346 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2347 insn (and may simplify more). */
2348 if (GET_CODE (op0) == XOR
2349 && rtx_equal_p (XEXP (op0, 0), op1)
2350 && ! side_effects_p (op1))
2351 return simplify_gen_binary (AND, mode,
2352 simplify_gen_unary (NOT, mode,
2353 XEXP (op0, 1), mode),
2354 op1);
2356 if (GET_CODE (op0) == XOR
2357 && rtx_equal_p (XEXP (op0, 1), op1)
2358 && ! side_effects_p (op1))
2359 return simplify_gen_binary (AND, mode,
2360 simplify_gen_unary (NOT, mode,
2361 XEXP (op0, 0), mode),
2362 op1);
2364 /* Similarly for (~(A ^ B)) & A. */
2365 if (GET_CODE (op0) == NOT
2366 && GET_CODE (XEXP (op0, 0)) == XOR
2367 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2368 && ! side_effects_p (op1))
2369 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2371 if (GET_CODE (op0) == NOT
2372 && GET_CODE (XEXP (op0, 0)) == XOR
2373 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2374 && ! side_effects_p (op1))
2375 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2377 /* Convert (A | B) & A to A. */
2378 if (GET_CODE (op0) == IOR
2379 && (rtx_equal_p (XEXP (op0, 0), op1)
2380 || rtx_equal_p (XEXP (op0, 1), op1))
2381 && ! side_effects_p (XEXP (op0, 0))
2382 && ! side_effects_p (XEXP (op0, 1)))
2383 return op1;
2385 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2386 ((A & N) + B) & M -> (A + B) & M
2387 Similarly if (N & M) == 0,
2388 ((A | N) + B) & M -> (A + B) & M
2389 and for - instead of + and/or ^ instead of |. */
2390 if (GET_CODE (trueop1) == CONST_INT
2391 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2392 && ~INTVAL (trueop1)
2393 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2394 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2396 rtx pmop[2];
2397 int which;
2399 pmop[0] = XEXP (op0, 0);
2400 pmop[1] = XEXP (op0, 1);
2402 for (which = 0; which < 2; which++)
2404 tem = pmop[which];
2405 switch (GET_CODE (tem))
2407 case AND:
2408 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2409 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2410 == INTVAL (trueop1))
2411 pmop[which] = XEXP (tem, 0);
2412 break;
2413 case IOR:
2414 case XOR:
2415 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2416 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2417 pmop[which] = XEXP (tem, 0);
2418 break;
2419 default:
2420 break;
2424 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2426 tem = simplify_gen_binary (GET_CODE (op0), mode,
2427 pmop[0], pmop[1]);
2428 return simplify_gen_binary (code, mode, tem, op1);
2431 tem = simplify_associative_operation (code, mode, op0, op1);
2432 if (tem)
2433 return tem;
2434 break;
2436 case UDIV:
2437 /* 0/x is 0 (or x&0 if x has side-effects). */
2438 if (trueop0 == CONST0_RTX (mode))
2440 if (side_effects_p (op1))
2441 return simplify_gen_binary (AND, mode, op1, trueop0);
2442 return trueop0;
2444 /* x/1 is x. */
2445 if (trueop1 == CONST1_RTX (mode))
2446 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2447 /* Convert divide by power of two into shift. */
2448 if (GET_CODE (trueop1) == CONST_INT
2449 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2450 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2451 break;
2453 case DIV:
2454 /* Handle floating point and integers separately. */
2455 if (SCALAR_FLOAT_MODE_P (mode))
2457 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2458 safe for modes with NaNs, since 0.0 / 0.0 will then be
2459 NaN rather than 0.0. Nor is it safe for modes with signed
2460 zeros, since dividing 0 by a negative number gives -0.0 */
2461 if (trueop0 == CONST0_RTX (mode)
2462 && !HONOR_NANS (mode)
2463 && !HONOR_SIGNED_ZEROS (mode)
2464 && ! side_effects_p (op1))
2465 return op0;
2466 /* x/1.0 is x. */
2467 if (trueop1 == CONST1_RTX (mode)
2468 && !HONOR_SNANS (mode))
2469 return op0;
2471 if (GET_CODE (trueop1) == CONST_DOUBLE
2472 && trueop1 != CONST0_RTX (mode))
2474 REAL_VALUE_TYPE d;
2475 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2477 /* x/-1.0 is -x. */
2478 if (REAL_VALUES_EQUAL (d, dconstm1)
2479 && !HONOR_SNANS (mode))
2480 return simplify_gen_unary (NEG, mode, op0, mode);
2482 /* Change FP division by a constant into multiplication.
2483 Only do this with -funsafe-math-optimizations. */
2484 if (flag_unsafe_math_optimizations
2485 && !REAL_VALUES_EQUAL (d, dconst0))
2487 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2488 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2489 return simplify_gen_binary (MULT, mode, op0, tem);
2493 else
2495 /* 0/x is 0 (or x&0 if x has side-effects). */
2496 if (trueop0 == CONST0_RTX (mode))
2498 if (side_effects_p (op1))
2499 return simplify_gen_binary (AND, mode, op1, trueop0);
2500 return trueop0;
2502 /* x/1 is x. */
2503 if (trueop1 == CONST1_RTX (mode))
2504 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2505 /* x/-1 is -x. */
2506 if (trueop1 == constm1_rtx)
2508 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2509 return simplify_gen_unary (NEG, mode, x, mode);
2512 break;
2514 case UMOD:
2515 /* 0%x is 0 (or x&0 if x has side-effects). */
2516 if (trueop0 == CONST0_RTX (mode))
2518 if (side_effects_p (op1))
2519 return simplify_gen_binary (AND, mode, op1, trueop0);
2520 return trueop0;
2522 /* x%1 is 0 (of x&0 if x has side-effects). */
2523 if (trueop1 == CONST1_RTX (mode))
2525 if (side_effects_p (op0))
2526 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2527 return CONST0_RTX (mode);
2529 /* Implement modulus by power of two as AND. */
2530 if (GET_CODE (trueop1) == CONST_INT
2531 && exact_log2 (INTVAL (trueop1)) > 0)
2532 return simplify_gen_binary (AND, mode, op0,
2533 GEN_INT (INTVAL (op1) - 1));
2534 break;
2536 case MOD:
2537 /* 0%x is 0 (or x&0 if x has side-effects). */
2538 if (trueop0 == CONST0_RTX (mode))
2540 if (side_effects_p (op1))
2541 return simplify_gen_binary (AND, mode, op1, trueop0);
2542 return trueop0;
2544 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2545 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2547 if (side_effects_p (op0))
2548 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2549 return CONST0_RTX (mode);
2551 break;
2553 case ROTATERT:
2554 case ROTATE:
2555 case ASHIFTRT:
2556 if (trueop1 == CONST0_RTX (mode))
2557 return op0;
2558 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2559 return op0;
2560 /* Rotating ~0 always results in ~0. */
2561 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2562 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2563 && ! side_effects_p (op1))
2564 return op0;
2565 break;
2567 case ASHIFT:
2568 case SS_ASHIFT:
2569 if (trueop1 == CONST0_RTX (mode))
2570 return op0;
2571 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2572 return op0;
2573 break;
2575 case LSHIFTRT:
2576 if (trueop1 == CONST0_RTX (mode))
2577 return op0;
2578 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2579 return op0;
2580 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2581 if (GET_CODE (op0) == CLZ
2582 && GET_CODE (trueop1) == CONST_INT
2583 && STORE_FLAG_VALUE == 1
2584 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2586 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2587 unsigned HOST_WIDE_INT zero_val = 0;
2589 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2590 && zero_val == GET_MODE_BITSIZE (imode)
2591 && INTVAL (trueop1) == exact_log2 (zero_val))
2592 return simplify_gen_relational (EQ, mode, imode,
2593 XEXP (op0, 0), const0_rtx);
2595 break;
2597 case SMIN:
2598 if (width <= HOST_BITS_PER_WIDE_INT
2599 && GET_CODE (trueop1) == CONST_INT
2600 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2601 && ! side_effects_p (op0))
2602 return op1;
2603 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2604 return op0;
2605 tem = simplify_associative_operation (code, mode, op0, op1);
2606 if (tem)
2607 return tem;
2608 break;
2610 case SMAX:
2611 if (width <= HOST_BITS_PER_WIDE_INT
2612 && GET_CODE (trueop1) == CONST_INT
2613 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2614 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2615 && ! side_effects_p (op0))
2616 return op1;
2617 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2618 return op0;
2619 tem = simplify_associative_operation (code, mode, op0, op1);
2620 if (tem)
2621 return tem;
2622 break;
2624 case UMIN:
2625 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2626 return op1;
2627 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2628 return op0;
2629 tem = simplify_associative_operation (code, mode, op0, op1);
2630 if (tem)
2631 return tem;
2632 break;
2634 case UMAX:
2635 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2636 return op1;
2637 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2638 return op0;
2639 tem = simplify_associative_operation (code, mode, op0, op1);
2640 if (tem)
2641 return tem;
2642 break;
2644 case SS_PLUS:
2645 case US_PLUS:
2646 case SS_MINUS:
2647 case US_MINUS:
2648 /* ??? There are simplifications that can be done. */
2649 return 0;
2651 case VEC_SELECT:
2652 if (!VECTOR_MODE_P (mode))
2654 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2655 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2656 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2657 gcc_assert (XVECLEN (trueop1, 0) == 1);
2658 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2660 if (GET_CODE (trueop0) == CONST_VECTOR)
2661 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2662 (trueop1, 0, 0)));
2664 else
2666 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2667 gcc_assert (GET_MODE_INNER (mode)
2668 == GET_MODE_INNER (GET_MODE (trueop0)));
2669 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2671 if (GET_CODE (trueop0) == CONST_VECTOR)
2673 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2674 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2675 rtvec v = rtvec_alloc (n_elts);
2676 unsigned int i;
2678 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2679 for (i = 0; i < n_elts; i++)
2681 rtx x = XVECEXP (trueop1, 0, i);
2683 gcc_assert (GET_CODE (x) == CONST_INT);
2684 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2685 INTVAL (x));
2688 return gen_rtx_CONST_VECTOR (mode, v);
2692 if (XVECLEN (trueop1, 0) == 1
2693 && GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT
2694 && GET_CODE (trueop0) == VEC_CONCAT)
2696 rtx vec = trueop0;
2697 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2699 /* Try to find the element in the VEC_CONCAT. */
2700 while (GET_MODE (vec) != mode
2701 && GET_CODE (vec) == VEC_CONCAT)
2703 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2704 if (offset < vec_size)
2705 vec = XEXP (vec, 0);
2706 else
2708 offset -= vec_size;
2709 vec = XEXP (vec, 1);
2711 vec = avoid_constant_pool_reference (vec);
2714 if (GET_MODE (vec) == mode)
2715 return vec;
2718 return 0;
2719 case VEC_CONCAT:
2721 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2722 ? GET_MODE (trueop0)
2723 : GET_MODE_INNER (mode));
2724 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2725 ? GET_MODE (trueop1)
2726 : GET_MODE_INNER (mode));
2728 gcc_assert (VECTOR_MODE_P (mode));
2729 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2730 == GET_MODE_SIZE (mode));
2732 if (VECTOR_MODE_P (op0_mode))
2733 gcc_assert (GET_MODE_INNER (mode)
2734 == GET_MODE_INNER (op0_mode));
2735 else
2736 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2738 if (VECTOR_MODE_P (op1_mode))
2739 gcc_assert (GET_MODE_INNER (mode)
2740 == GET_MODE_INNER (op1_mode));
2741 else
2742 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2744 if ((GET_CODE (trueop0) == CONST_VECTOR
2745 || GET_CODE (trueop0) == CONST_INT
2746 || GET_CODE (trueop0) == CONST_DOUBLE)
2747 && (GET_CODE (trueop1) == CONST_VECTOR
2748 || GET_CODE (trueop1) == CONST_INT
2749 || GET_CODE (trueop1) == CONST_DOUBLE))
2751 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2752 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2753 rtvec v = rtvec_alloc (n_elts);
2754 unsigned int i;
2755 unsigned in_n_elts = 1;
2757 if (VECTOR_MODE_P (op0_mode))
2758 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2759 for (i = 0; i < n_elts; i++)
2761 if (i < in_n_elts)
2763 if (!VECTOR_MODE_P (op0_mode))
2764 RTVEC_ELT (v, i) = trueop0;
2765 else
2766 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2768 else
2770 if (!VECTOR_MODE_P (op1_mode))
2771 RTVEC_ELT (v, i) = trueop1;
2772 else
2773 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2774 i - in_n_elts);
2778 return gen_rtx_CONST_VECTOR (mode, v);
2781 return 0;
2783 default:
2784 gcc_unreachable ();
2787 return 0;
2791 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
2792 rtx op0, rtx op1)
2794 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
2795 HOST_WIDE_INT val;
2796 unsigned int width = GET_MODE_BITSIZE (mode);
2798 if (VECTOR_MODE_P (mode)
2799 && code != VEC_CONCAT
2800 && GET_CODE (op0) == CONST_VECTOR
2801 && GET_CODE (op1) == CONST_VECTOR)
2803 unsigned n_elts = GET_MODE_NUNITS (mode);
2804 enum machine_mode op0mode = GET_MODE (op0);
2805 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2806 enum machine_mode op1mode = GET_MODE (op1);
2807 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2808 rtvec v = rtvec_alloc (n_elts);
2809 unsigned int i;
2811 gcc_assert (op0_n_elts == n_elts);
2812 gcc_assert (op1_n_elts == n_elts);
2813 for (i = 0; i < n_elts; i++)
2815 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2816 CONST_VECTOR_ELT (op0, i),
2817 CONST_VECTOR_ELT (op1, i));
2818 if (!x)
2819 return 0;
2820 RTVEC_ELT (v, i) = x;
2823 return gen_rtx_CONST_VECTOR (mode, v);
2826 if (VECTOR_MODE_P (mode)
2827 && code == VEC_CONCAT
2828 && CONSTANT_P (op0) && CONSTANT_P (op1))
2830 unsigned n_elts = GET_MODE_NUNITS (mode);
2831 rtvec v = rtvec_alloc (n_elts);
2833 gcc_assert (n_elts >= 2);
2834 if (n_elts == 2)
2836 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2837 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2839 RTVEC_ELT (v, 0) = op0;
2840 RTVEC_ELT (v, 1) = op1;
2842 else
2844 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2845 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2846 unsigned i;
2848 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2849 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2850 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2852 for (i = 0; i < op0_n_elts; ++i)
2853 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2854 for (i = 0; i < op1_n_elts; ++i)
2855 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2858 return gen_rtx_CONST_VECTOR (mode, v);
2861 if (SCALAR_FLOAT_MODE_P (mode)
2862 && GET_CODE (op0) == CONST_DOUBLE
2863 && GET_CODE (op1) == CONST_DOUBLE
2864 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2866 if (code == AND
2867 || code == IOR
2868 || code == XOR)
2870 long tmp0[4];
2871 long tmp1[4];
2872 REAL_VALUE_TYPE r;
2873 int i;
2875 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
2876 GET_MODE (op0));
2877 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
2878 GET_MODE (op1));
2879 for (i = 0; i < 4; i++)
2881 switch (code)
2883 case AND:
2884 tmp0[i] &= tmp1[i];
2885 break;
2886 case IOR:
2887 tmp0[i] |= tmp1[i];
2888 break;
2889 case XOR:
2890 tmp0[i] ^= tmp1[i];
2891 break;
2892 default:
2893 gcc_unreachable ();
2896 real_from_target (&r, tmp0, mode);
2897 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
2899 else
2901 REAL_VALUE_TYPE f0, f1, value, result;
2902 bool inexact;
2904 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
2905 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
2906 real_convert (&f0, mode, &f0);
2907 real_convert (&f1, mode, &f1);
2909 if (HONOR_SNANS (mode)
2910 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
2911 return 0;
2913 if (code == DIV
2914 && REAL_VALUES_EQUAL (f1, dconst0)
2915 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
2916 return 0;
2918 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2919 && flag_trapping_math
2920 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
2922 int s0 = REAL_VALUE_NEGATIVE (f0);
2923 int s1 = REAL_VALUE_NEGATIVE (f1);
2925 switch (code)
2927 case PLUS:
2928 /* Inf + -Inf = NaN plus exception. */
2929 if (s0 != s1)
2930 return 0;
2931 break;
2932 case MINUS:
2933 /* Inf - Inf = NaN plus exception. */
2934 if (s0 == s1)
2935 return 0;
2936 break;
2937 case DIV:
2938 /* Inf / Inf = NaN plus exception. */
2939 return 0;
2940 default:
2941 break;
2945 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2946 && flag_trapping_math
2947 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
2948 || (REAL_VALUE_ISINF (f1)
2949 && REAL_VALUES_EQUAL (f0, dconst0))))
2950 /* Inf * 0 = NaN plus exception. */
2951 return 0;
2953 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
2954 &f0, &f1);
2955 real_convert (&result, mode, &value);
2957 /* Don't constant fold this floating point operation if
2958 the result has overflowed and flag_trapping_math. */
2960 if (flag_trapping_math
2961 && MODE_HAS_INFINITIES (mode)
2962 && REAL_VALUE_ISINF (result)
2963 && !REAL_VALUE_ISINF (f0)
2964 && !REAL_VALUE_ISINF (f1))
2965 /* Overflow plus exception. */
2966 return 0;
2968 /* Don't constant fold this floating point operation if the
2969 result may dependent upon the run-time rounding mode and
2970 flag_rounding_math is set, or if GCC's software emulation
2971 is unable to accurately represent the result. */
2973 if ((flag_rounding_math
2974 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
2975 && !flag_unsafe_math_optimizations))
2976 && (inexact || !real_identical (&result, &value)))
2977 return NULL_RTX;
2979 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
2983 /* We can fold some multi-word operations. */
2984 if (GET_MODE_CLASS (mode) == MODE_INT
2985 && width == HOST_BITS_PER_WIDE_INT * 2
2986 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
2987 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
2989 unsigned HOST_WIDE_INT l1, l2, lv, lt;
2990 HOST_WIDE_INT h1, h2, hv, ht;
2992 if (GET_CODE (op0) == CONST_DOUBLE)
2993 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
2994 else
2995 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
2997 if (GET_CODE (op1) == CONST_DOUBLE)
2998 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
2999 else
3000 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
3002 switch (code)
3004 case MINUS:
3005 /* A - B == A + (-B). */
3006 neg_double (l2, h2, &lv, &hv);
3007 l2 = lv, h2 = hv;
3009 /* Fall through.... */
3011 case PLUS:
3012 add_double (l1, h1, l2, h2, &lv, &hv);
3013 break;
3015 case MULT:
3016 mul_double (l1, h1, l2, h2, &lv, &hv);
3017 break;
3019 case DIV:
3020 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3021 &lv, &hv, &lt, &ht))
3022 return 0;
3023 break;
3025 case MOD:
3026 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3027 &lt, &ht, &lv, &hv))
3028 return 0;
3029 break;
3031 case UDIV:
3032 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3033 &lv, &hv, &lt, &ht))
3034 return 0;
3035 break;
3037 case UMOD:
3038 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3039 &lt, &ht, &lv, &hv))
3040 return 0;
3041 break;
3043 case AND:
3044 lv = l1 & l2, hv = h1 & h2;
3045 break;
3047 case IOR:
3048 lv = l1 | l2, hv = h1 | h2;
3049 break;
3051 case XOR:
3052 lv = l1 ^ l2, hv = h1 ^ h2;
3053 break;
3055 case SMIN:
3056 if (h1 < h2
3057 || (h1 == h2
3058 && ((unsigned HOST_WIDE_INT) l1
3059 < (unsigned HOST_WIDE_INT) l2)))
3060 lv = l1, hv = h1;
3061 else
3062 lv = l2, hv = h2;
3063 break;
3065 case SMAX:
3066 if (h1 > h2
3067 || (h1 == h2
3068 && ((unsigned HOST_WIDE_INT) l1
3069 > (unsigned HOST_WIDE_INT) l2)))
3070 lv = l1, hv = h1;
3071 else
3072 lv = l2, hv = h2;
3073 break;
3075 case UMIN:
3076 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
3077 || (h1 == h2
3078 && ((unsigned HOST_WIDE_INT) l1
3079 < (unsigned HOST_WIDE_INT) l2)))
3080 lv = l1, hv = h1;
3081 else
3082 lv = l2, hv = h2;
3083 break;
3085 case UMAX:
3086 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
3087 || (h1 == h2
3088 && ((unsigned HOST_WIDE_INT) l1
3089 > (unsigned HOST_WIDE_INT) l2)))
3090 lv = l1, hv = h1;
3091 else
3092 lv = l2, hv = h2;
3093 break;
3095 case LSHIFTRT: case ASHIFTRT:
3096 case ASHIFT:
3097 case ROTATE: case ROTATERT:
3098 if (SHIFT_COUNT_TRUNCATED)
3099 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
3101 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
3102 return 0;
3104 if (code == LSHIFTRT || code == ASHIFTRT)
3105 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
3106 code == ASHIFTRT);
3107 else if (code == ASHIFT)
3108 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
3109 else if (code == ROTATE)
3110 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3111 else /* code == ROTATERT */
3112 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3113 break;
3115 default:
3116 return 0;
3119 return immed_double_const (lv, hv, mode);
3122 if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
3123 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3125 /* Get the integer argument values in two forms:
3126 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3128 arg0 = INTVAL (op0);
3129 arg1 = INTVAL (op1);
3131 if (width < HOST_BITS_PER_WIDE_INT)
3133 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
3134 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
3136 arg0s = arg0;
3137 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3138 arg0s |= ((HOST_WIDE_INT) (-1) << width);
3140 arg1s = arg1;
3141 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3142 arg1s |= ((HOST_WIDE_INT) (-1) << width);
3144 else
3146 arg0s = arg0;
3147 arg1s = arg1;
3150 /* Compute the value of the arithmetic. */
3152 switch (code)
3154 case PLUS:
3155 val = arg0s + arg1s;
3156 break;
3158 case MINUS:
3159 val = arg0s - arg1s;
3160 break;
3162 case MULT:
3163 val = arg0s * arg1s;
3164 break;
3166 case DIV:
3167 if (arg1s == 0
3168 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3169 && arg1s == -1))
3170 return 0;
3171 val = arg0s / arg1s;
3172 break;
3174 case MOD:
3175 if (arg1s == 0
3176 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3177 && arg1s == -1))
3178 return 0;
3179 val = arg0s % arg1s;
3180 break;
3182 case UDIV:
3183 if (arg1 == 0
3184 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3185 && arg1s == -1))
3186 return 0;
3187 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3188 break;
3190 case UMOD:
3191 if (arg1 == 0
3192 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3193 && arg1s == -1))
3194 return 0;
3195 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3196 break;
3198 case AND:
3199 val = arg0 & arg1;
3200 break;
3202 case IOR:
3203 val = arg0 | arg1;
3204 break;
3206 case XOR:
3207 val = arg0 ^ arg1;
3208 break;
3210 case LSHIFTRT:
3211 case ASHIFT:
3212 case ASHIFTRT:
3213 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3214 the value is in range. We can't return any old value for
3215 out-of-range arguments because either the middle-end (via
3216 shift_truncation_mask) or the back-end might be relying on
3217 target-specific knowledge. Nor can we rely on
3218 shift_truncation_mask, since the shift might not be part of an
3219 ashlM3, lshrM3 or ashrM3 instruction. */
3220 if (SHIFT_COUNT_TRUNCATED)
3221 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3222 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3223 return 0;
3225 val = (code == ASHIFT
3226 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3227 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3229 /* Sign-extend the result for arithmetic right shifts. */
3230 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3231 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3232 break;
3234 case ROTATERT:
3235 if (arg1 < 0)
3236 return 0;
3238 arg1 %= width;
3239 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3240 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3241 break;
3243 case ROTATE:
3244 if (arg1 < 0)
3245 return 0;
3247 arg1 %= width;
3248 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3249 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3250 break;
3252 case COMPARE:
3253 /* Do nothing here. */
3254 return 0;
3256 case SMIN:
3257 val = arg0s <= arg1s ? arg0s : arg1s;
3258 break;
3260 case UMIN:
3261 val = ((unsigned HOST_WIDE_INT) arg0
3262 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3263 break;
3265 case SMAX:
3266 val = arg0s > arg1s ? arg0s : arg1s;
3267 break;
3269 case UMAX:
3270 val = ((unsigned HOST_WIDE_INT) arg0
3271 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3272 break;
3274 case SS_PLUS:
3275 case US_PLUS:
3276 case SS_MINUS:
3277 case US_MINUS:
3278 case SS_ASHIFT:
3279 /* ??? There are simplifications that can be done. */
3280 return 0;
3282 default:
3283 gcc_unreachable ();
3286 return gen_int_mode (val, mode);
3289 return NULL_RTX;
3294 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3295 PLUS or MINUS.
3297 Rather than test for specific case, we do this by a brute-force method
3298 and do all possible simplifications until no more changes occur. Then
3299 we rebuild the operation. */
3301 struct simplify_plus_minus_op_data
3303 rtx op;
3304 short neg;
3307 static int
3308 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
3310 const struct simplify_plus_minus_op_data *d1 = p1;
3311 const struct simplify_plus_minus_op_data *d2 = p2;
3312 int result;
3314 result = (commutative_operand_precedence (d2->op)
3315 - commutative_operand_precedence (d1->op));
3316 if (result)
3317 return result;
3319 /* Group together equal REGs to do more simplification. */
3320 if (REG_P (d1->op) && REG_P (d2->op))
3321 return REGNO (d1->op) - REGNO (d2->op);
3322 else
3323 return 0;
3326 static rtx
3327 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3328 rtx op1)
3330 struct simplify_plus_minus_op_data ops[8];
3331 rtx result, tem;
3332 int n_ops = 2, input_ops = 2;
3333 int changed, n_constants = 0, canonicalized = 0;
3334 int i, j;
3336 memset (ops, 0, sizeof ops);
3338 /* Set up the two operands and then expand them until nothing has been
3339 changed. If we run out of room in our array, give up; this should
3340 almost never happen. */
3342 ops[0].op = op0;
3343 ops[0].neg = 0;
3344 ops[1].op = op1;
3345 ops[1].neg = (code == MINUS);
3349 changed = 0;
3351 for (i = 0; i < n_ops; i++)
3353 rtx this_op = ops[i].op;
3354 int this_neg = ops[i].neg;
3355 enum rtx_code this_code = GET_CODE (this_op);
3357 switch (this_code)
3359 case PLUS:
3360 case MINUS:
3361 if (n_ops == 7)
3362 return NULL_RTX;
3364 ops[n_ops].op = XEXP (this_op, 1);
3365 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3366 n_ops++;
3368 ops[i].op = XEXP (this_op, 0);
3369 input_ops++;
3370 changed = 1;
3371 canonicalized |= this_neg;
3372 break;
3374 case NEG:
3375 ops[i].op = XEXP (this_op, 0);
3376 ops[i].neg = ! this_neg;
3377 changed = 1;
3378 canonicalized = 1;
3379 break;
3381 case CONST:
3382 if (n_ops < 7
3383 && GET_CODE (XEXP (this_op, 0)) == PLUS
3384 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3385 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3387 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3388 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3389 ops[n_ops].neg = this_neg;
3390 n_ops++;
3391 changed = 1;
3392 canonicalized = 1;
3394 break;
3396 case NOT:
3397 /* ~a -> (-a - 1) */
3398 if (n_ops != 7)
3400 ops[n_ops].op = constm1_rtx;
3401 ops[n_ops++].neg = this_neg;
3402 ops[i].op = XEXP (this_op, 0);
3403 ops[i].neg = !this_neg;
3404 changed = 1;
3405 canonicalized = 1;
3407 break;
3409 case CONST_INT:
3410 n_constants++;
3411 if (this_neg)
3413 ops[i].op = neg_const_int (mode, this_op);
3414 ops[i].neg = 0;
3415 changed = 1;
3416 canonicalized = 1;
3418 break;
3420 default:
3421 break;
3425 while (changed);
3427 if (n_constants > 1)
3428 canonicalized = 1;
3430 gcc_assert (n_ops >= 2);
3432 /* If we only have two operands, we can avoid the loops. */
3433 if (n_ops == 2)
3435 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3436 rtx lhs, rhs;
3438 /* Get the two operands. Be careful with the order, especially for
3439 the cases where code == MINUS. */
3440 if (ops[0].neg && ops[1].neg)
3442 lhs = gen_rtx_NEG (mode, ops[0].op);
3443 rhs = ops[1].op;
3445 else if (ops[0].neg)
3447 lhs = ops[1].op;
3448 rhs = ops[0].op;
3450 else
3452 lhs = ops[0].op;
3453 rhs = ops[1].op;
3456 return simplify_const_binary_operation (code, mode, lhs, rhs);
3459 /* Now simplify each pair of operands until nothing changes. */
3462 /* Insertion sort is good enough for an eight-element array. */
3463 for (i = 1; i < n_ops; i++)
3465 struct simplify_plus_minus_op_data save;
3466 j = i - 1;
3467 if (simplify_plus_minus_op_data_cmp (&ops[j], &ops[i]) < 0)
3468 continue;
3470 canonicalized = 1;
3471 save = ops[i];
3473 ops[j + 1] = ops[j];
3474 while (j-- && simplify_plus_minus_op_data_cmp (&ops[j], &save) > 0);
3475 ops[j + 1] = save;
3478 /* This is only useful the first time through. */
3479 if (!canonicalized)
3480 return NULL_RTX;
3482 changed = 0;
3483 for (i = n_ops - 1; i > 0; i--)
3484 for (j = i - 1; j >= 0; j--)
3486 rtx lhs = ops[j].op, rhs = ops[i].op;
3487 int lneg = ops[j].neg, rneg = ops[i].neg;
3489 if (lhs != 0 && rhs != 0)
3491 enum rtx_code ncode = PLUS;
3493 if (lneg != rneg)
3495 ncode = MINUS;
3496 if (lneg)
3497 tem = lhs, lhs = rhs, rhs = tem;
3499 else if (swap_commutative_operands_p (lhs, rhs))
3500 tem = lhs, lhs = rhs, rhs = tem;
3502 if ((GET_CODE (lhs) == CONST || GET_CODE (lhs) == CONST_INT)
3503 && (GET_CODE (rhs) == CONST || GET_CODE (rhs) == CONST_INT))
3505 rtx tem_lhs, tem_rhs;
3507 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3508 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3509 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3511 if (tem && !CONSTANT_P (tem))
3512 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3514 else
3515 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3517 /* Reject "simplifications" that just wrap the two
3518 arguments in a CONST. Failure to do so can result
3519 in infinite recursion with simplify_binary_operation
3520 when it calls us to simplify CONST operations. */
3521 if (tem
3522 && ! (GET_CODE (tem) == CONST
3523 && GET_CODE (XEXP (tem, 0)) == ncode
3524 && XEXP (XEXP (tem, 0), 0) == lhs
3525 && XEXP (XEXP (tem, 0), 1) == rhs))
3527 lneg &= rneg;
3528 if (GET_CODE (tem) == NEG)
3529 tem = XEXP (tem, 0), lneg = !lneg;
3530 if (GET_CODE (tem) == CONST_INT && lneg)
3531 tem = neg_const_int (mode, tem), lneg = 0;
3533 ops[i].op = tem;
3534 ops[i].neg = lneg;
3535 ops[j].op = NULL_RTX;
3536 changed = 1;
3541 /* Pack all the operands to the lower-numbered entries. */
3542 for (i = 0, j = 0; j < n_ops; j++)
3543 if (ops[j].op)
3545 ops[i] = ops[j];
3546 i++;
3548 n_ops = i;
3550 while (changed);
3552 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3553 if (n_ops == 2
3554 && GET_CODE (ops[1].op) == CONST_INT
3555 && CONSTANT_P (ops[0].op)
3556 && ops[0].neg)
3557 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3559 /* We suppressed creation of trivial CONST expressions in the
3560 combination loop to avoid recursion. Create one manually now.
3561 The combination loop should have ensured that there is exactly
3562 one CONST_INT, and the sort will have ensured that it is last
3563 in the array and that any other constant will be next-to-last. */
3565 if (n_ops > 1
3566 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
3567 && CONSTANT_P (ops[n_ops - 2].op))
3569 rtx value = ops[n_ops - 1].op;
3570 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3571 value = neg_const_int (mode, value);
3572 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3573 n_ops--;
3576 /* Put a non-negated operand first, if possible. */
3578 for (i = 0; i < n_ops && ops[i].neg; i++)
3579 continue;
3580 if (i == n_ops)
3581 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3582 else if (i != 0)
3584 tem = ops[0].op;
3585 ops[0] = ops[i];
3586 ops[i].op = tem;
3587 ops[i].neg = 1;
3590 /* Now make the result by performing the requested operations. */
3591 result = ops[0].op;
3592 for (i = 1; i < n_ops; i++)
3593 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3594 mode, result, ops[i].op);
3596 return result;
3599 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3600 static bool
3601 plus_minus_operand_p (rtx x)
3603 return GET_CODE (x) == PLUS
3604 || GET_CODE (x) == MINUS
3605 || (GET_CODE (x) == CONST
3606 && GET_CODE (XEXP (x, 0)) == PLUS
3607 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3608 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3611 /* Like simplify_binary_operation except used for relational operators.
3612 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3613 not also be VOIDmode.
3615 CMP_MODE specifies in which mode the comparison is done in, so it is
3616 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3617 the operands or, if both are VOIDmode, the operands are compared in
3618 "infinite precision". */
3620 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3621 enum machine_mode cmp_mode, rtx op0, rtx op1)
3623 rtx tem, trueop0, trueop1;
3625 if (cmp_mode == VOIDmode)
3626 cmp_mode = GET_MODE (op0);
3627 if (cmp_mode == VOIDmode)
3628 cmp_mode = GET_MODE (op1);
3630 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3631 if (tem)
3633 if (SCALAR_FLOAT_MODE_P (mode))
3635 if (tem == const0_rtx)
3636 return CONST0_RTX (mode);
3637 #ifdef FLOAT_STORE_FLAG_VALUE
3639 REAL_VALUE_TYPE val;
3640 val = FLOAT_STORE_FLAG_VALUE (mode);
3641 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3643 #else
3644 return NULL_RTX;
3645 #endif
3647 if (VECTOR_MODE_P (mode))
3649 if (tem == const0_rtx)
3650 return CONST0_RTX (mode);
3651 #ifdef VECTOR_STORE_FLAG_VALUE
3653 int i, units;
3654 rtvec v;
3656 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3657 if (val == NULL_RTX)
3658 return NULL_RTX;
3659 if (val == const1_rtx)
3660 return CONST1_RTX (mode);
3662 units = GET_MODE_NUNITS (mode);
3663 v = rtvec_alloc (units);
3664 for (i = 0; i < units; i++)
3665 RTVEC_ELT (v, i) = val;
3666 return gen_rtx_raw_CONST_VECTOR (mode, v);
3668 #else
3669 return NULL_RTX;
3670 #endif
3673 return tem;
3676 /* For the following tests, ensure const0_rtx is op1. */
3677 if (swap_commutative_operands_p (op0, op1)
3678 || (op0 == const0_rtx && op1 != const0_rtx))
3679 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3681 /* If op0 is a compare, extract the comparison arguments from it. */
3682 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3683 return simplify_relational_operation (code, mode, VOIDmode,
3684 XEXP (op0, 0), XEXP (op0, 1));
3686 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
3687 || CC0_P (op0))
3688 return NULL_RTX;
3690 trueop0 = avoid_constant_pool_reference (op0);
3691 trueop1 = avoid_constant_pool_reference (op1);
3692 return simplify_relational_operation_1 (code, mode, cmp_mode,
3693 trueop0, trueop1);
3696 /* This part of simplify_relational_operation is only used when CMP_MODE
3697 is not in class MODE_CC (i.e. it is a real comparison).
3699 MODE is the mode of the result, while CMP_MODE specifies in which
3700 mode the comparison is done in, so it is the mode of the operands. */
3702 static rtx
3703 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
3704 enum machine_mode cmp_mode, rtx op0, rtx op1)
3706 enum rtx_code op0code = GET_CODE (op0);
3708 if (op1 == const0_rtx && COMPARISON_P (op0))
3710 /* If op0 is a comparison, extract the comparison arguments
3711 from it. */
3712 if (code == NE)
3714 if (GET_MODE (op0) == mode)
3715 return simplify_rtx (op0);
3716 else
3717 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
3718 XEXP (op0, 0), XEXP (op0, 1));
3720 else if (code == EQ)
3722 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
3723 if (new_code != UNKNOWN)
3724 return simplify_gen_relational (new_code, mode, VOIDmode,
3725 XEXP (op0, 0), XEXP (op0, 1));
3729 if (op1 == const0_rtx)
3731 /* Canonicalize (GTU x 0) as (NE x 0). */
3732 if (code == GTU)
3733 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
3734 /* Canonicalize (LEU x 0) as (EQ x 0). */
3735 if (code == LEU)
3736 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
3738 else if (op1 == const1_rtx)
3740 switch (code)
3742 case GE:
3743 /* Canonicalize (GE x 1) as (GT x 0). */
3744 return simplify_gen_relational (GT, mode, cmp_mode,
3745 op0, const0_rtx);
3746 case GEU:
3747 /* Canonicalize (GEU x 1) as (NE x 0). */
3748 return simplify_gen_relational (NE, mode, cmp_mode,
3749 op0, const0_rtx);
3750 case LT:
3751 /* Canonicalize (LT x 1) as (LE x 0). */
3752 return simplify_gen_relational (LE, mode, cmp_mode,
3753 op0, const0_rtx);
3754 case LTU:
3755 /* Canonicalize (LTU x 1) as (EQ x 0). */
3756 return simplify_gen_relational (EQ, mode, cmp_mode,
3757 op0, const0_rtx);
3758 default:
3759 break;
3762 else if (op1 == constm1_rtx)
3764 /* Canonicalize (LE x -1) as (LT x 0). */
3765 if (code == LE)
3766 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
3767 /* Canonicalize (GT x -1) as (GE x 0). */
3768 if (code == GT)
3769 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
3772 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
3773 if ((code == EQ || code == NE)
3774 && (op0code == PLUS || op0code == MINUS)
3775 && CONSTANT_P (op1)
3776 && CONSTANT_P (XEXP (op0, 1))
3777 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
3779 rtx x = XEXP (op0, 0);
3780 rtx c = XEXP (op0, 1);
3782 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
3783 cmp_mode, op1, c);
3784 return simplify_gen_relational (code, mode, cmp_mode, x, c);
3787 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3788 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3789 if (code == NE
3790 && op1 == const0_rtx
3791 && GET_MODE_CLASS (mode) == MODE_INT
3792 && cmp_mode != VOIDmode
3793 /* ??? Work-around BImode bugs in the ia64 backend. */
3794 && mode != BImode
3795 && cmp_mode != BImode
3796 && nonzero_bits (op0, cmp_mode) == 1
3797 && STORE_FLAG_VALUE == 1)
3798 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
3799 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
3800 : lowpart_subreg (mode, op0, cmp_mode);
3802 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
3803 if ((code == EQ || code == NE)
3804 && op1 == const0_rtx
3805 && op0code == XOR)
3806 return simplify_gen_relational (code, mode, cmp_mode,
3807 XEXP (op0, 0), XEXP (op0, 1));
3809 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
3810 if ((code == EQ || code == NE)
3811 && op0code == XOR
3812 && rtx_equal_p (XEXP (op0, 0), op1)
3813 && !side_effects_p (XEXP (op0, 0)))
3814 return simplify_gen_relational (code, mode, cmp_mode,
3815 XEXP (op0, 1), const0_rtx);
3817 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
3818 if ((code == EQ || code == NE)
3819 && op0code == XOR
3820 && rtx_equal_p (XEXP (op0, 1), op1)
3821 && !side_effects_p (XEXP (op0, 1)))
3822 return simplify_gen_relational (code, mode, cmp_mode,
3823 XEXP (op0, 0), const0_rtx);
3825 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
3826 if ((code == EQ || code == NE)
3827 && op0code == XOR
3828 && (GET_CODE (op1) == CONST_INT
3829 || GET_CODE (op1) == CONST_DOUBLE)
3830 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
3831 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
3832 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
3833 simplify_gen_binary (XOR, cmp_mode,
3834 XEXP (op0, 1), op1));
3836 if (op0code == POPCOUNT && op1 == const0_rtx)
3837 switch (code)
3839 case EQ:
3840 case LE:
3841 case LEU:
3842 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
3843 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
3844 XEXP (op0, 0), const0_rtx);
3846 case NE:
3847 case GT:
3848 case GTU:
3849 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
3850 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
3851 XEXP (op0, 0), const0_rtx);
3853 default:
3854 break;
3857 return NULL_RTX;
3860 /* Check if the given comparison (done in the given MODE) is actually a
3861 tautology or a contradiction.
3862 If no simplification is possible, this function returns zero.
3863 Otherwise, it returns either const_true_rtx or const0_rtx. */
3866 simplify_const_relational_operation (enum rtx_code code,
3867 enum machine_mode mode,
3868 rtx op0, rtx op1)
3870 int equal, op0lt, op0ltu, op1lt, op1ltu;
3871 rtx tem;
3872 rtx trueop0;
3873 rtx trueop1;
3875 gcc_assert (mode != VOIDmode
3876 || (GET_MODE (op0) == VOIDmode
3877 && GET_MODE (op1) == VOIDmode));
3879 /* If op0 is a compare, extract the comparison arguments from it. */
3880 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3882 op1 = XEXP (op0, 1);
3883 op0 = XEXP (op0, 0);
3885 if (GET_MODE (op0) != VOIDmode)
3886 mode = GET_MODE (op0);
3887 else if (GET_MODE (op1) != VOIDmode)
3888 mode = GET_MODE (op1);
3889 else
3890 return 0;
3893 /* We can't simplify MODE_CC values since we don't know what the
3894 actual comparison is. */
3895 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
3896 return 0;
3898 /* Make sure the constant is second. */
3899 if (swap_commutative_operands_p (op0, op1))
3901 tem = op0, op0 = op1, op1 = tem;
3902 code = swap_condition (code);
3905 trueop0 = avoid_constant_pool_reference (op0);
3906 trueop1 = avoid_constant_pool_reference (op1);
3908 /* For integer comparisons of A and B maybe we can simplify A - B and can
3909 then simplify a comparison of that with zero. If A and B are both either
3910 a register or a CONST_INT, this can't help; testing for these cases will
3911 prevent infinite recursion here and speed things up.
3913 We can only do this for EQ and NE comparisons as otherwise we may
3914 lose or introduce overflow which we cannot disregard as undefined as
3915 we do not know the signedness of the operation on either the left or
3916 the right hand side of the comparison. */
3918 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
3919 && (code == EQ || code == NE)
3920 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
3921 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
3922 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
3923 /* We cannot do this if tem is a nonzero address. */
3924 && ! nonzero_address_p (tem))
3925 return simplify_const_relational_operation (signed_condition (code),
3926 mode, tem, const0_rtx);
3928 if (! HONOR_NANS (mode) && code == ORDERED)
3929 return const_true_rtx;
3931 if (! HONOR_NANS (mode) && code == UNORDERED)
3932 return const0_rtx;
3934 /* For modes without NaNs, if the two operands are equal, we know the
3935 result except if they have side-effects. */
3936 if (! HONOR_NANS (GET_MODE (trueop0))
3937 && rtx_equal_p (trueop0, trueop1)
3938 && ! side_effects_p (trueop0))
3939 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
3941 /* If the operands are floating-point constants, see if we can fold
3942 the result. */
3943 else if (GET_CODE (trueop0) == CONST_DOUBLE
3944 && GET_CODE (trueop1) == CONST_DOUBLE
3945 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
3947 REAL_VALUE_TYPE d0, d1;
3949 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
3950 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
3952 /* Comparisons are unordered iff at least one of the values is NaN. */
3953 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
3954 switch (code)
3956 case UNEQ:
3957 case UNLT:
3958 case UNGT:
3959 case UNLE:
3960 case UNGE:
3961 case NE:
3962 case UNORDERED:
3963 return const_true_rtx;
3964 case EQ:
3965 case LT:
3966 case GT:
3967 case LE:
3968 case GE:
3969 case LTGT:
3970 case ORDERED:
3971 return const0_rtx;
3972 default:
3973 return 0;
3976 equal = REAL_VALUES_EQUAL (d0, d1);
3977 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
3978 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
3981 /* Otherwise, see if the operands are both integers. */
3982 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
3983 && (GET_CODE (trueop0) == CONST_DOUBLE
3984 || GET_CODE (trueop0) == CONST_INT)
3985 && (GET_CODE (trueop1) == CONST_DOUBLE
3986 || GET_CODE (trueop1) == CONST_INT))
3988 int width = GET_MODE_BITSIZE (mode);
3989 HOST_WIDE_INT l0s, h0s, l1s, h1s;
3990 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
3992 /* Get the two words comprising each integer constant. */
3993 if (GET_CODE (trueop0) == CONST_DOUBLE)
3995 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
3996 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
3998 else
4000 l0u = l0s = INTVAL (trueop0);
4001 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4004 if (GET_CODE (trueop1) == CONST_DOUBLE)
4006 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4007 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4009 else
4011 l1u = l1s = INTVAL (trueop1);
4012 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4015 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4016 we have to sign or zero-extend the values. */
4017 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4019 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
4020 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
4022 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
4023 l0s |= ((HOST_WIDE_INT) (-1) << width);
4025 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
4026 l1s |= ((HOST_WIDE_INT) (-1) << width);
4028 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4029 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4031 equal = (h0u == h1u && l0u == l1u);
4032 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
4033 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
4034 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
4035 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
4038 /* Otherwise, there are some code-specific tests we can make. */
4039 else
4041 /* Optimize comparisons with upper and lower bounds. */
4042 if (SCALAR_INT_MODE_P (mode)
4043 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
4045 rtx mmin, mmax;
4046 int sign;
4048 if (code == GEU
4049 || code == LEU
4050 || code == GTU
4051 || code == LTU)
4052 sign = 0;
4053 else
4054 sign = 1;
4056 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
4058 tem = NULL_RTX;
4059 switch (code)
4061 case GEU:
4062 case GE:
4063 /* x >= min is always true. */
4064 if (rtx_equal_p (trueop1, mmin))
4065 tem = const_true_rtx;
4066 else
4067 break;
4069 case LEU:
4070 case LE:
4071 /* x <= max is always true. */
4072 if (rtx_equal_p (trueop1, mmax))
4073 tem = const_true_rtx;
4074 break;
4076 case GTU:
4077 case GT:
4078 /* x > max is always false. */
4079 if (rtx_equal_p (trueop1, mmax))
4080 tem = const0_rtx;
4081 break;
4083 case LTU:
4084 case LT:
4085 /* x < min is always false. */
4086 if (rtx_equal_p (trueop1, mmin))
4087 tem = const0_rtx;
4088 break;
4090 default:
4091 break;
4093 if (tem == const0_rtx
4094 || tem == const_true_rtx)
4095 return tem;
4098 switch (code)
4100 case EQ:
4101 if (trueop1 == const0_rtx && nonzero_address_p (op0))
4102 return const0_rtx;
4103 break;
4105 case NE:
4106 if (trueop1 == const0_rtx && nonzero_address_p (op0))
4107 return const_true_rtx;
4108 break;
4110 case LT:
4111 /* Optimize abs(x) < 0.0. */
4112 if (trueop1 == CONST0_RTX (mode)
4113 && !HONOR_SNANS (mode)
4114 && (!INTEGRAL_MODE_P (mode)
4115 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4117 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
4118 : trueop0;
4119 if (GET_CODE (tem) == ABS)
4121 if (INTEGRAL_MODE_P (mode)
4122 && (issue_strict_overflow_warning
4123 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4124 warning (OPT_Wstrict_overflow,
4125 ("assuming signed overflow does not occur when "
4126 "assuming abs (x) < 0 is false"));
4127 return const0_rtx;
4131 /* Optimize popcount (x) < 0. */
4132 if (GET_CODE (trueop0) == POPCOUNT && trueop1 == const0_rtx)
4133 return const_true_rtx;
4134 break;
4136 case GE:
4137 /* Optimize abs(x) >= 0.0. */
4138 if (trueop1 == CONST0_RTX (mode)
4139 && !HONOR_NANS (mode)
4140 && (!INTEGRAL_MODE_P (mode)
4141 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4143 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
4144 : trueop0;
4145 if (GET_CODE (tem) == ABS)
4147 if (INTEGRAL_MODE_P (mode)
4148 && (issue_strict_overflow_warning
4149 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4150 warning (OPT_Wstrict_overflow,
4151 ("assuming signed overflow does not occur when "
4152 "assuming abs (x) >= 0 is true"));
4153 return const_true_rtx;
4157 /* Optimize popcount (x) >= 0. */
4158 if (GET_CODE (trueop0) == POPCOUNT && trueop1 == const0_rtx)
4159 return const_true_rtx;
4160 break;
4162 case UNGE:
4163 /* Optimize ! (abs(x) < 0.0). */
4164 if (trueop1 == CONST0_RTX (mode))
4166 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
4167 : trueop0;
4168 if (GET_CODE (tem) == ABS)
4169 return const_true_rtx;
4171 break;
4173 default:
4174 break;
4177 return 0;
4180 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
4181 as appropriate. */
4182 switch (code)
4184 case EQ:
4185 case UNEQ:
4186 return equal ? const_true_rtx : const0_rtx;
4187 case NE:
4188 case LTGT:
4189 return ! equal ? const_true_rtx : const0_rtx;
4190 case LT:
4191 case UNLT:
4192 return op0lt ? const_true_rtx : const0_rtx;
4193 case GT:
4194 case UNGT:
4195 return op1lt ? const_true_rtx : const0_rtx;
4196 case LTU:
4197 return op0ltu ? const_true_rtx : const0_rtx;
4198 case GTU:
4199 return op1ltu ? const_true_rtx : const0_rtx;
4200 case LE:
4201 case UNLE:
4202 return equal || op0lt ? const_true_rtx : const0_rtx;
4203 case GE:
4204 case UNGE:
4205 return equal || op1lt ? const_true_rtx : const0_rtx;
4206 case LEU:
4207 return equal || op0ltu ? const_true_rtx : const0_rtx;
4208 case GEU:
4209 return equal || op1ltu ? const_true_rtx : const0_rtx;
4210 case ORDERED:
4211 return const_true_rtx;
4212 case UNORDERED:
4213 return const0_rtx;
4214 default:
4215 gcc_unreachable ();
4219 /* Simplify CODE, an operation with result mode MODE and three operands,
4220 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4221 a constant. Return 0 if no simplifications is possible. */
4224 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4225 enum machine_mode op0_mode, rtx op0, rtx op1,
4226 rtx op2)
4228 unsigned int width = GET_MODE_BITSIZE (mode);
4230 /* VOIDmode means "infinite" precision. */
4231 if (width == 0)
4232 width = HOST_BITS_PER_WIDE_INT;
4234 switch (code)
4236 case SIGN_EXTRACT:
4237 case ZERO_EXTRACT:
4238 if (GET_CODE (op0) == CONST_INT
4239 && GET_CODE (op1) == CONST_INT
4240 && GET_CODE (op2) == CONST_INT
4241 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4242 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4244 /* Extracting a bit-field from a constant */
4245 HOST_WIDE_INT val = INTVAL (op0);
4247 if (BITS_BIG_ENDIAN)
4248 val >>= (GET_MODE_BITSIZE (op0_mode)
4249 - INTVAL (op2) - INTVAL (op1));
4250 else
4251 val >>= INTVAL (op2);
4253 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4255 /* First zero-extend. */
4256 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4257 /* If desired, propagate sign bit. */
4258 if (code == SIGN_EXTRACT
4259 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4260 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4263 /* Clear the bits that don't belong in our mode,
4264 unless they and our sign bit are all one.
4265 So we get either a reasonable negative value or a reasonable
4266 unsigned value for this mode. */
4267 if (width < HOST_BITS_PER_WIDE_INT
4268 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4269 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4270 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4272 return gen_int_mode (val, mode);
4274 break;
4276 case IF_THEN_ELSE:
4277 if (GET_CODE (op0) == CONST_INT)
4278 return op0 != const0_rtx ? op1 : op2;
4280 /* Convert c ? a : a into "a". */
4281 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4282 return op1;
4284 /* Convert a != b ? a : b into "a". */
4285 if (GET_CODE (op0) == NE
4286 && ! side_effects_p (op0)
4287 && ! HONOR_NANS (mode)
4288 && ! HONOR_SIGNED_ZEROS (mode)
4289 && ((rtx_equal_p (XEXP (op0, 0), op1)
4290 && rtx_equal_p (XEXP (op0, 1), op2))
4291 || (rtx_equal_p (XEXP (op0, 0), op2)
4292 && rtx_equal_p (XEXP (op0, 1), op1))))
4293 return op1;
4295 /* Convert a == b ? a : b into "b". */
4296 if (GET_CODE (op0) == EQ
4297 && ! side_effects_p (op0)
4298 && ! HONOR_NANS (mode)
4299 && ! HONOR_SIGNED_ZEROS (mode)
4300 && ((rtx_equal_p (XEXP (op0, 0), op1)
4301 && rtx_equal_p (XEXP (op0, 1), op2))
4302 || (rtx_equal_p (XEXP (op0, 0), op2)
4303 && rtx_equal_p (XEXP (op0, 1), op1))))
4304 return op2;
4306 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4308 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4309 ? GET_MODE (XEXP (op0, 1))
4310 : GET_MODE (XEXP (op0, 0)));
4311 rtx temp;
4313 /* Look for happy constants in op1 and op2. */
4314 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
4316 HOST_WIDE_INT t = INTVAL (op1);
4317 HOST_WIDE_INT f = INTVAL (op2);
4319 if (t == STORE_FLAG_VALUE && f == 0)
4320 code = GET_CODE (op0);
4321 else if (t == 0 && f == STORE_FLAG_VALUE)
4323 enum rtx_code tmp;
4324 tmp = reversed_comparison_code (op0, NULL_RTX);
4325 if (tmp == UNKNOWN)
4326 break;
4327 code = tmp;
4329 else
4330 break;
4332 return simplify_gen_relational (code, mode, cmp_mode,
4333 XEXP (op0, 0), XEXP (op0, 1));
4336 if (cmp_mode == VOIDmode)
4337 cmp_mode = op0_mode;
4338 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4339 cmp_mode, XEXP (op0, 0),
4340 XEXP (op0, 1));
4342 /* See if any simplifications were possible. */
4343 if (temp)
4345 if (GET_CODE (temp) == CONST_INT)
4346 return temp == const0_rtx ? op2 : op1;
4347 else if (temp)
4348 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4351 break;
4353 case VEC_MERGE:
4354 gcc_assert (GET_MODE (op0) == mode);
4355 gcc_assert (GET_MODE (op1) == mode);
4356 gcc_assert (VECTOR_MODE_P (mode));
4357 op2 = avoid_constant_pool_reference (op2);
4358 if (GET_CODE (op2) == CONST_INT)
4360 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4361 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4362 int mask = (1 << n_elts) - 1;
4364 if (!(INTVAL (op2) & mask))
4365 return op1;
4366 if ((INTVAL (op2) & mask) == mask)
4367 return op0;
4369 op0 = avoid_constant_pool_reference (op0);
4370 op1 = avoid_constant_pool_reference (op1);
4371 if (GET_CODE (op0) == CONST_VECTOR
4372 && GET_CODE (op1) == CONST_VECTOR)
4374 rtvec v = rtvec_alloc (n_elts);
4375 unsigned int i;
4377 for (i = 0; i < n_elts; i++)
4378 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4379 ? CONST_VECTOR_ELT (op0, i)
4380 : CONST_VECTOR_ELT (op1, i));
4381 return gen_rtx_CONST_VECTOR (mode, v);
4384 break;
4386 default:
4387 gcc_unreachable ();
4390 return 0;
4393 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
4394 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
4396 Works by unpacking OP into a collection of 8-bit values
4397 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4398 and then repacking them again for OUTERMODE. */
4400 static rtx
4401 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4402 enum machine_mode innermode, unsigned int byte)
4404 /* We support up to 512-bit values (for V8DFmode). */
4405 enum {
4406 max_bitsize = 512,
4407 value_bit = 8,
4408 value_mask = (1 << value_bit) - 1
4410 unsigned char value[max_bitsize / value_bit];
4411 int value_start;
4412 int i;
4413 int elem;
4415 int num_elem;
4416 rtx * elems;
4417 int elem_bitsize;
4418 rtx result_s;
4419 rtvec result_v = NULL;
4420 enum mode_class outer_class;
4421 enum machine_mode outer_submode;
4423 /* Some ports misuse CCmode. */
4424 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
4425 return op;
4427 /* We have no way to represent a complex constant at the rtl level. */
4428 if (COMPLEX_MODE_P (outermode))
4429 return NULL_RTX;
4431 /* Unpack the value. */
4433 if (GET_CODE (op) == CONST_VECTOR)
4435 num_elem = CONST_VECTOR_NUNITS (op);
4436 elems = &CONST_VECTOR_ELT (op, 0);
4437 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4439 else
4441 num_elem = 1;
4442 elems = &op;
4443 elem_bitsize = max_bitsize;
4445 /* If this asserts, it is too complicated; reducing value_bit may help. */
4446 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4447 /* I don't know how to handle endianness of sub-units. */
4448 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4450 for (elem = 0; elem < num_elem; elem++)
4452 unsigned char * vp;
4453 rtx el = elems[elem];
4455 /* Vectors are kept in target memory order. (This is probably
4456 a mistake.) */
4458 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4459 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4460 / BITS_PER_UNIT);
4461 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4462 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4463 unsigned bytele = (subword_byte % UNITS_PER_WORD
4464 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4465 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4468 switch (GET_CODE (el))
4470 case CONST_INT:
4471 for (i = 0;
4472 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4473 i += value_bit)
4474 *vp++ = INTVAL (el) >> i;
4475 /* CONST_INTs are always logically sign-extended. */
4476 for (; i < elem_bitsize; i += value_bit)
4477 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4478 break;
4480 case CONST_DOUBLE:
4481 if (GET_MODE (el) == VOIDmode)
4483 /* If this triggers, someone should have generated a
4484 CONST_INT instead. */
4485 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4487 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4488 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4489 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4491 *vp++
4492 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4493 i += value_bit;
4495 /* It shouldn't matter what's done here, so fill it with
4496 zero. */
4497 for (; i < elem_bitsize; i += value_bit)
4498 *vp++ = 0;
4500 else
4502 long tmp[max_bitsize / 32];
4503 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4505 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4506 gcc_assert (bitsize <= elem_bitsize);
4507 gcc_assert (bitsize % value_bit == 0);
4509 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4510 GET_MODE (el));
4512 /* real_to_target produces its result in words affected by
4513 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4514 and use WORDS_BIG_ENDIAN instead; see the documentation
4515 of SUBREG in rtl.texi. */
4516 for (i = 0; i < bitsize; i += value_bit)
4518 int ibase;
4519 if (WORDS_BIG_ENDIAN)
4520 ibase = bitsize - 1 - i;
4521 else
4522 ibase = i;
4523 *vp++ = tmp[ibase / 32] >> i % 32;
4526 /* It shouldn't matter what's done here, so fill it with
4527 zero. */
4528 for (; i < elem_bitsize; i += value_bit)
4529 *vp++ = 0;
4531 break;
4533 default:
4534 gcc_unreachable ();
4538 /* Now, pick the right byte to start with. */
4539 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4540 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4541 will already have offset 0. */
4542 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4544 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4545 - byte);
4546 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4547 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4548 byte = (subword_byte % UNITS_PER_WORD
4549 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4552 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4553 so if it's become negative it will instead be very large.) */
4554 gcc_assert (byte < GET_MODE_SIZE (innermode));
4556 /* Convert from bytes to chunks of size value_bit. */
4557 value_start = byte * (BITS_PER_UNIT / value_bit);
4559 /* Re-pack the value. */
4561 if (VECTOR_MODE_P (outermode))
4563 num_elem = GET_MODE_NUNITS (outermode);
4564 result_v = rtvec_alloc (num_elem);
4565 elems = &RTVEC_ELT (result_v, 0);
4566 outer_submode = GET_MODE_INNER (outermode);
4568 else
4570 num_elem = 1;
4571 elems = &result_s;
4572 outer_submode = outermode;
4575 outer_class = GET_MODE_CLASS (outer_submode);
4576 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
4578 gcc_assert (elem_bitsize % value_bit == 0);
4579 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
4581 for (elem = 0; elem < num_elem; elem++)
4583 unsigned char *vp;
4585 /* Vectors are stored in target memory order. (This is probably
4586 a mistake.) */
4588 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4589 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4590 / BITS_PER_UNIT);
4591 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4592 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4593 unsigned bytele = (subword_byte % UNITS_PER_WORD
4594 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4595 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
4598 switch (outer_class)
4600 case MODE_INT:
4601 case MODE_PARTIAL_INT:
4603 unsigned HOST_WIDE_INT hi = 0, lo = 0;
4605 for (i = 0;
4606 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4607 i += value_bit)
4608 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
4609 for (; i < elem_bitsize; i += value_bit)
4610 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
4611 << (i - HOST_BITS_PER_WIDE_INT));
4613 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4614 know why. */
4615 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4616 elems[elem] = gen_int_mode (lo, outer_submode);
4617 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
4618 elems[elem] = immed_double_const (lo, hi, outer_submode);
4619 else
4620 return NULL_RTX;
4622 break;
4624 case MODE_FLOAT:
4625 case MODE_DECIMAL_FLOAT:
4627 REAL_VALUE_TYPE r;
4628 long tmp[max_bitsize / 32];
4630 /* real_from_target wants its input in words affected by
4631 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4632 and use WORDS_BIG_ENDIAN instead; see the documentation
4633 of SUBREG in rtl.texi. */
4634 for (i = 0; i < max_bitsize / 32; i++)
4635 tmp[i] = 0;
4636 for (i = 0; i < elem_bitsize; i += value_bit)
4638 int ibase;
4639 if (WORDS_BIG_ENDIAN)
4640 ibase = elem_bitsize - 1 - i;
4641 else
4642 ibase = i;
4643 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
4646 real_from_target (&r, tmp, outer_submode);
4647 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
4649 break;
4651 default:
4652 gcc_unreachable ();
4655 if (VECTOR_MODE_P (outermode))
4656 return gen_rtx_CONST_VECTOR (outermode, result_v);
4657 else
4658 return result_s;
4661 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
4662 Return 0 if no simplifications are possible. */
4664 simplify_subreg (enum machine_mode outermode, rtx op,
4665 enum machine_mode innermode, unsigned int byte)
4667 /* Little bit of sanity checking. */
4668 gcc_assert (innermode != VOIDmode);
4669 gcc_assert (outermode != VOIDmode);
4670 gcc_assert (innermode != BLKmode);
4671 gcc_assert (outermode != BLKmode);
4673 gcc_assert (GET_MODE (op) == innermode
4674 || GET_MODE (op) == VOIDmode);
4676 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
4677 gcc_assert (byte < GET_MODE_SIZE (innermode));
4679 if (outermode == innermode && !byte)
4680 return op;
4682 if (GET_CODE (op) == CONST_INT
4683 || GET_CODE (op) == CONST_DOUBLE
4684 || GET_CODE (op) == CONST_VECTOR)
4685 return simplify_immed_subreg (outermode, op, innermode, byte);
4687 /* Changing mode twice with SUBREG => just change it once,
4688 or not at all if changing back op starting mode. */
4689 if (GET_CODE (op) == SUBREG)
4691 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
4692 int final_offset = byte + SUBREG_BYTE (op);
4693 rtx newx;
4695 if (outermode == innermostmode
4696 && byte == 0 && SUBREG_BYTE (op) == 0)
4697 return SUBREG_REG (op);
4699 /* The SUBREG_BYTE represents offset, as if the value were stored
4700 in memory. Irritating exception is paradoxical subreg, where
4701 we define SUBREG_BYTE to be 0. On big endian machines, this
4702 value should be negative. For a moment, undo this exception. */
4703 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4705 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
4706 if (WORDS_BIG_ENDIAN)
4707 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4708 if (BYTES_BIG_ENDIAN)
4709 final_offset += difference % UNITS_PER_WORD;
4711 if (SUBREG_BYTE (op) == 0
4712 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
4714 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
4715 if (WORDS_BIG_ENDIAN)
4716 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4717 if (BYTES_BIG_ENDIAN)
4718 final_offset += difference % UNITS_PER_WORD;
4721 /* See whether resulting subreg will be paradoxical. */
4722 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
4724 /* In nonparadoxical subregs we can't handle negative offsets. */
4725 if (final_offset < 0)
4726 return NULL_RTX;
4727 /* Bail out in case resulting subreg would be incorrect. */
4728 if (final_offset % GET_MODE_SIZE (outermode)
4729 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
4730 return NULL_RTX;
4732 else
4734 int offset = 0;
4735 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
4737 /* In paradoxical subreg, see if we are still looking on lower part.
4738 If so, our SUBREG_BYTE will be 0. */
4739 if (WORDS_BIG_ENDIAN)
4740 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4741 if (BYTES_BIG_ENDIAN)
4742 offset += difference % UNITS_PER_WORD;
4743 if (offset == final_offset)
4744 final_offset = 0;
4745 else
4746 return NULL_RTX;
4749 /* Recurse for further possible simplifications. */
4750 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
4751 final_offset);
4752 if (newx)
4753 return newx;
4754 if (validate_subreg (outermode, innermostmode,
4755 SUBREG_REG (op), final_offset))
4756 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
4757 return NULL_RTX;
4760 /* Merge implicit and explicit truncations. */
4762 if (GET_CODE (op) == TRUNCATE
4763 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
4764 && subreg_lowpart_offset (outermode, innermode) == byte)
4765 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
4766 GET_MODE (XEXP (op, 0)));
4768 /* SUBREG of a hard register => just change the register number
4769 and/or mode. If the hard register is not valid in that mode,
4770 suppress this simplification. If the hard register is the stack,
4771 frame, or argument pointer, leave this as a SUBREG. */
4773 if (REG_P (op)
4774 && REGNO (op) < FIRST_PSEUDO_REGISTER
4775 #ifdef CANNOT_CHANGE_MODE_CLASS
4776 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
4777 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
4778 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
4779 #endif
4780 && ((reload_completed && !frame_pointer_needed)
4781 || (REGNO (op) != FRAME_POINTER_REGNUM
4782 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
4783 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
4784 #endif
4786 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4787 && REGNO (op) != ARG_POINTER_REGNUM
4788 #endif
4789 && REGNO (op) != STACK_POINTER_REGNUM
4790 && subreg_offset_representable_p (REGNO (op), innermode,
4791 byte, outermode))
4793 unsigned int regno = REGNO (op);
4794 unsigned int final_regno
4795 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
4797 /* ??? We do allow it if the current REG is not valid for
4798 its mode. This is a kludge to work around how float/complex
4799 arguments are passed on 32-bit SPARC and should be fixed. */
4800 if (HARD_REGNO_MODE_OK (final_regno, outermode)
4801 || ! HARD_REGNO_MODE_OK (regno, innermode))
4803 rtx x;
4804 int final_offset = byte;
4806 /* Adjust offset for paradoxical subregs. */
4807 if (byte == 0
4808 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4810 int difference = (GET_MODE_SIZE (innermode)
4811 - GET_MODE_SIZE (outermode));
4812 if (WORDS_BIG_ENDIAN)
4813 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4814 if (BYTES_BIG_ENDIAN)
4815 final_offset += difference % UNITS_PER_WORD;
4818 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
4820 /* Propagate original regno. We don't have any way to specify
4821 the offset inside original regno, so do so only for lowpart.
4822 The information is used only by alias analysis that can not
4823 grog partial register anyway. */
4825 if (subreg_lowpart_offset (outermode, innermode) == byte)
4826 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
4827 return x;
4831 /* If we have a SUBREG of a register that we are replacing and we are
4832 replacing it with a MEM, make a new MEM and try replacing the
4833 SUBREG with it. Don't do this if the MEM has a mode-dependent address
4834 or if we would be widening it. */
4836 if (MEM_P (op)
4837 && ! mode_dependent_address_p (XEXP (op, 0))
4838 /* Allow splitting of volatile memory references in case we don't
4839 have instruction to move the whole thing. */
4840 && (! MEM_VOLATILE_P (op)
4841 || ! have_insn_for (SET, innermode))
4842 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
4843 return adjust_address_nv (op, outermode, byte);
4845 /* Handle complex values represented as CONCAT
4846 of real and imaginary part. */
4847 if (GET_CODE (op) == CONCAT)
4849 unsigned int part_size, final_offset;
4850 rtx part, res;
4852 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
4853 if (byte < part_size)
4855 part = XEXP (op, 0);
4856 final_offset = byte;
4858 else
4860 part = XEXP (op, 1);
4861 final_offset = byte - part_size;
4864 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
4865 return NULL_RTX;
4867 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
4868 if (res)
4869 return res;
4870 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
4871 return gen_rtx_SUBREG (outermode, part, final_offset);
4872 return NULL_RTX;
4875 /* Optimize SUBREG truncations of zero and sign extended values. */
4876 if ((GET_CODE (op) == ZERO_EXTEND
4877 || GET_CODE (op) == SIGN_EXTEND)
4878 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
4880 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
4882 /* If we're requesting the lowpart of a zero or sign extension,
4883 there are three possibilities. If the outermode is the same
4884 as the origmode, we can omit both the extension and the subreg.
4885 If the outermode is not larger than the origmode, we can apply
4886 the truncation without the extension. Finally, if the outermode
4887 is larger than the origmode, but both are integer modes, we
4888 can just extend to the appropriate mode. */
4889 if (bitpos == 0)
4891 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
4892 if (outermode == origmode)
4893 return XEXP (op, 0);
4894 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
4895 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
4896 subreg_lowpart_offset (outermode,
4897 origmode));
4898 if (SCALAR_INT_MODE_P (outermode))
4899 return simplify_gen_unary (GET_CODE (op), outermode,
4900 XEXP (op, 0), origmode);
4903 /* A SUBREG resulting from a zero extension may fold to zero if
4904 it extracts higher bits that the ZERO_EXTEND's source bits. */
4905 if (GET_CODE (op) == ZERO_EXTEND
4906 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
4907 return CONST0_RTX (outermode);
4910 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
4911 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
4912 the outer subreg is effectively a truncation to the original mode. */
4913 if ((GET_CODE (op) == LSHIFTRT
4914 || GET_CODE (op) == ASHIFTRT)
4915 && SCALAR_INT_MODE_P (outermode)
4916 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
4917 to avoid the possibility that an outer LSHIFTRT shifts by more
4918 than the sign extension's sign_bit_copies and introduces zeros
4919 into the high bits of the result. */
4920 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
4921 && GET_CODE (XEXP (op, 1)) == CONST_INT
4922 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
4923 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4924 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4925 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4926 return simplify_gen_binary (ASHIFTRT, outermode,
4927 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4929 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
4930 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
4931 the outer subreg is effectively a truncation to the original mode. */
4932 if ((GET_CODE (op) == LSHIFTRT
4933 || GET_CODE (op) == ASHIFTRT)
4934 && SCALAR_INT_MODE_P (outermode)
4935 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4936 && GET_CODE (XEXP (op, 1)) == CONST_INT
4937 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4938 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4939 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4940 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4941 return simplify_gen_binary (LSHIFTRT, outermode,
4942 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4944 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
4945 to (ashift:QI (x:QI) C), where C is a suitable small constant and
4946 the outer subreg is effectively a truncation to the original mode. */
4947 if (GET_CODE (op) == ASHIFT
4948 && SCALAR_INT_MODE_P (outermode)
4949 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4950 && GET_CODE (XEXP (op, 1)) == CONST_INT
4951 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4952 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
4953 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4954 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4955 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4956 return simplify_gen_binary (ASHIFT, outermode,
4957 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4959 return NULL_RTX;
4962 /* Make a SUBREG operation or equivalent if it folds. */
4965 simplify_gen_subreg (enum machine_mode outermode, rtx op,
4966 enum machine_mode innermode, unsigned int byte)
4968 rtx newx;
4970 newx = simplify_subreg (outermode, op, innermode, byte);
4971 if (newx)
4972 return newx;
4974 if (GET_CODE (op) == SUBREG
4975 || GET_CODE (op) == CONCAT
4976 || GET_MODE (op) == VOIDmode)
4977 return NULL_RTX;
4979 if (validate_subreg (outermode, innermode, op, byte))
4980 return gen_rtx_SUBREG (outermode, op, byte);
4982 return NULL_RTX;
4985 /* Simplify X, an rtx expression.
4987 Return the simplified expression or NULL if no simplifications
4988 were possible.
4990 This is the preferred entry point into the simplification routines;
4991 however, we still allow passes to call the more specific routines.
4993 Right now GCC has three (yes, three) major bodies of RTL simplification
4994 code that need to be unified.
4996 1. fold_rtx in cse.c. This code uses various CSE specific
4997 information to aid in RTL simplification.
4999 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5000 it uses combine specific information to aid in RTL
5001 simplification.
5003 3. The routines in this file.
5006 Long term we want to only have one body of simplification code; to
5007 get to that state I recommend the following steps:
5009 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5010 which are not pass dependent state into these routines.
5012 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5013 use this routine whenever possible.
5015 3. Allow for pass dependent state to be provided to these
5016 routines and add simplifications based on the pass dependent
5017 state. Remove code from cse.c & combine.c that becomes
5018 redundant/dead.
5020 It will take time, but ultimately the compiler will be easier to
5021 maintain and improve. It's totally silly that when we add a
5022 simplification that it needs to be added to 4 places (3 for RTL
5023 simplification and 1 for tree simplification. */
5026 simplify_rtx (rtx x)
5028 enum rtx_code code = GET_CODE (x);
5029 enum machine_mode mode = GET_MODE (x);
5031 switch (GET_RTX_CLASS (code))
5033 case RTX_UNARY:
5034 return simplify_unary_operation (code, mode,
5035 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5036 case RTX_COMM_ARITH:
5037 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5038 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5040 /* Fall through.... */
5042 case RTX_BIN_ARITH:
5043 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5045 case RTX_TERNARY:
5046 case RTX_BITFIELD_OPS:
5047 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5048 XEXP (x, 0), XEXP (x, 1),
5049 XEXP (x, 2));
5051 case RTX_COMPARE:
5052 case RTX_COMM_COMPARE:
5053 return simplify_relational_operation (code, mode,
5054 ((GET_MODE (XEXP (x, 0))
5055 != VOIDmode)
5056 ? GET_MODE (XEXP (x, 0))
5057 : GET_MODE (XEXP (x, 1))),
5058 XEXP (x, 0),
5059 XEXP (x, 1));
5061 case RTX_EXTRA:
5062 if (code == SUBREG)
5063 return simplify_subreg (mode, SUBREG_REG (x),
5064 GET_MODE (SUBREG_REG (x)),
5065 SUBREG_BYTE (x));
5066 break;
5068 case RTX_OBJ:
5069 if (code == LO_SUM)
5071 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5072 if (GET_CODE (XEXP (x, 0)) == HIGH
5073 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5074 return XEXP (x, 1);
5076 break;
5078 default:
5079 break;
5081 return NULL;