* g++.dg/tree-ssa/copyprop-1.C: Remove xfail.
[official-gcc.git] / gcc / simplify-rtx.c
blob0371339d5405ceebf2cf77c0ae07124a4bed6e32
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, const_rtx);
53 static bool plus_minus_operand_p (const_rtx);
54 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
57 unsigned int);
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
59 rtx, rtx);
60 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
61 enum machine_mode, rtx, rtx);
62 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
63 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
64 rtx, rtx, rtx, rtx);
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
68 static rtx
69 neg_const_int (enum machine_mode mode, const_rtx i)
71 return gen_int_mode (- INTVAL (i), mode);
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
77 bool
78 mode_signbit_p (enum machine_mode mode, const_rtx x)
80 unsigned HOST_WIDE_INT val;
81 unsigned int width;
83 if (GET_MODE_CLASS (mode) != MODE_INT)
84 return false;
86 width = GET_MODE_BITSIZE (mode);
87 if (width == 0)
88 return false;
90 if (width <= HOST_BITS_PER_WIDE_INT
91 && GET_CODE (x) == CONST_INT)
92 val = INTVAL (x);
93 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x) == 0)
97 val = CONST_DOUBLE_HIGH (x);
98 width -= HOST_BITS_PER_WIDE_INT;
100 else
101 return false;
103 if (width < HOST_BITS_PER_WIDE_INT)
104 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
105 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
112 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
113 rtx op1)
115 rtx tem;
117 /* If this simplifies, do it. */
118 tem = simplify_binary_operation (code, mode, op0, op1);
119 if (tem)
120 return tem;
122 /* Put complex operands first and constants second if commutative. */
123 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
124 && swap_commutative_operands_p (op0, op1))
125 tem = op0, op0 = op1, op1 = tem;
127 return gen_rtx_fmt_ee (code, mode, op0, op1);
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
133 avoid_constant_pool_reference (rtx x)
135 rtx c, tmp, addr;
136 enum machine_mode cmode;
137 HOST_WIDE_INT offset = 0;
139 switch (GET_CODE (x))
141 case MEM:
142 break;
144 case FLOAT_EXTEND:
145 /* Handle float extensions of constant pool references. */
146 tmp = XEXP (x, 0);
147 c = avoid_constant_pool_reference (tmp);
148 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
150 REAL_VALUE_TYPE d;
152 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
155 return x;
157 default:
158 return x;
161 if (GET_MODE (x) == BLKmode)
162 return x;
164 addr = XEXP (x, 0);
166 /* Call target hook to avoid the effects of -fpic etc.... */
167 addr = targetm.delegitimize_address (addr);
169 /* Split the address into a base and integer offset. */
170 if (GET_CODE (addr) == CONST
171 && GET_CODE (XEXP (addr, 0)) == PLUS
172 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
174 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
175 addr = XEXP (XEXP (addr, 0), 0);
178 if (GET_CODE (addr) == LO_SUM)
179 addr = XEXP (addr, 1);
181 /* If this is a constant pool reference, we can turn it into its
182 constant and hope that simplifications happen. */
183 if (GET_CODE (addr) == SYMBOL_REF
184 && CONSTANT_POOL_ADDRESS_P (addr))
186 c = get_pool_constant (addr);
187 cmode = get_pool_mode (addr);
189 /* If we're accessing the constant in a different mode than it was
190 originally stored, attempt to fix that up via subreg simplifications.
191 If that fails we have no choice but to return the original memory. */
192 if (offset != 0 || cmode != GET_MODE (x))
194 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
195 if (tem && CONSTANT_P (tem))
196 return tem;
198 else
199 return c;
202 return x;
205 /* Make a unary operation by first seeing if it folds and otherwise making
206 the specified operation. */
209 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
210 enum machine_mode op_mode)
212 rtx tem;
214 /* If this simplifies, use it. */
215 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
216 return tem;
218 return gen_rtx_fmt_e (code, mode, op);
221 /* Likewise for ternary operations. */
224 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
225 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
227 rtx tem;
229 /* If this simplifies, use it. */
230 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
231 op0, op1, op2)))
232 return tem;
234 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
237 /* Likewise, for relational operations.
238 CMP_MODE specifies mode comparison is done in. */
241 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
242 enum machine_mode cmp_mode, rtx op0, rtx op1)
244 rtx tem;
246 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
247 op0, op1)))
248 return tem;
250 return gen_rtx_fmt_ee (code, mode, op0, op1);
253 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
254 resulting RTX. Return a new RTX which is as simplified as possible. */
257 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
259 enum rtx_code code = GET_CODE (x);
260 enum machine_mode mode = GET_MODE (x);
261 enum machine_mode op_mode;
262 rtx op0, op1, op2;
264 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
265 to build a new expression substituting recursively. If we can't do
266 anything, return our input. */
268 if (x == old_rtx)
269 return new_rtx;
271 switch (GET_RTX_CLASS (code))
273 case RTX_UNARY:
274 op0 = XEXP (x, 0);
275 op_mode = GET_MODE (op0);
276 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
277 if (op0 == XEXP (x, 0))
278 return x;
279 return simplify_gen_unary (code, mode, op0, op_mode);
281 case RTX_BIN_ARITH:
282 case RTX_COMM_ARITH:
283 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
284 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
285 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
286 return x;
287 return simplify_gen_binary (code, mode, op0, op1);
289 case RTX_COMPARE:
290 case RTX_COMM_COMPARE:
291 op0 = XEXP (x, 0);
292 op1 = XEXP (x, 1);
293 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
294 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
295 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
296 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
297 return x;
298 return simplify_gen_relational (code, mode, op_mode, op0, op1);
300 case RTX_TERNARY:
301 case RTX_BITFIELD_OPS:
302 op0 = XEXP (x, 0);
303 op_mode = GET_MODE (op0);
304 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
305 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
306 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
307 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
308 return x;
309 if (op_mode == VOIDmode)
310 op_mode = GET_MODE (op0);
311 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
313 case RTX_EXTRA:
314 /* The only case we try to handle is a SUBREG. */
315 if (code == SUBREG)
317 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
318 if (op0 == SUBREG_REG (x))
319 return x;
320 op0 = simplify_gen_subreg (GET_MODE (x), op0,
321 GET_MODE (SUBREG_REG (x)),
322 SUBREG_BYTE (x));
323 return op0 ? op0 : x;
325 break;
327 case RTX_OBJ:
328 if (code == MEM)
330 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
331 if (op0 == XEXP (x, 0))
332 return x;
333 return replace_equiv_address_nv (x, op0);
335 else if (code == LO_SUM)
337 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
338 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
340 /* (lo_sum (high x) x) -> x */
341 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
342 return op1;
344 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
345 return x;
346 return gen_rtx_LO_SUM (mode, op0, op1);
348 else if (code == REG)
350 if (rtx_equal_p (x, old_rtx))
351 return new_rtx;
353 break;
355 default:
356 break;
358 return x;
361 /* Try to simplify a unary operation CODE whose output mode is to be
362 MODE with input operand OP whose mode was originally OP_MODE.
363 Return zero if no simplification can be made. */
365 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
366 rtx op, enum machine_mode op_mode)
368 rtx trueop, tem;
370 if (GET_CODE (op) == CONST)
371 op = XEXP (op, 0);
373 trueop = avoid_constant_pool_reference (op);
375 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
376 if (tem)
377 return tem;
379 return simplify_unary_operation_1 (code, mode, op);
382 /* Perform some simplifications we can do even if the operands
383 aren't constant. */
384 static rtx
385 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
387 enum rtx_code reversed;
388 rtx temp;
390 switch (code)
392 case NOT:
393 /* (not (not X)) == X. */
394 if (GET_CODE (op) == NOT)
395 return XEXP (op, 0);
397 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
398 comparison is all ones. */
399 if (COMPARISON_P (op)
400 && (mode == BImode || STORE_FLAG_VALUE == -1)
401 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
402 return simplify_gen_relational (reversed, mode, VOIDmode,
403 XEXP (op, 0), XEXP (op, 1));
405 /* (not (plus X -1)) can become (neg X). */
406 if (GET_CODE (op) == PLUS
407 && XEXP (op, 1) == constm1_rtx)
408 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
410 /* Similarly, (not (neg X)) is (plus X -1). */
411 if (GET_CODE (op) == NEG)
412 return plus_constant (XEXP (op, 0), -1);
414 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
415 if (GET_CODE (op) == XOR
416 && GET_CODE (XEXP (op, 1)) == CONST_INT
417 && (temp = simplify_unary_operation (NOT, mode,
418 XEXP (op, 1), mode)) != 0)
419 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
421 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
422 if (GET_CODE (op) == PLUS
423 && GET_CODE (XEXP (op, 1)) == CONST_INT
424 && mode_signbit_p (mode, XEXP (op, 1))
425 && (temp = simplify_unary_operation (NOT, mode,
426 XEXP (op, 1), mode)) != 0)
427 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
430 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
431 operands other than 1, but that is not valid. We could do a
432 similar simplification for (not (lshiftrt C X)) where C is
433 just the sign bit, but this doesn't seem common enough to
434 bother with. */
435 if (GET_CODE (op) == ASHIFT
436 && XEXP (op, 0) == const1_rtx)
438 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
439 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
442 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
443 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
444 so we can perform the above simplification. */
446 if (STORE_FLAG_VALUE == -1
447 && GET_CODE (op) == ASHIFTRT
448 && GET_CODE (XEXP (op, 1)) == CONST_INT
449 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
450 return simplify_gen_relational (GE, mode, VOIDmode,
451 XEXP (op, 0), const0_rtx);
454 if (GET_CODE (op) == SUBREG
455 && subreg_lowpart_p (op)
456 && (GET_MODE_SIZE (GET_MODE (op))
457 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
458 && GET_CODE (SUBREG_REG (op)) == ASHIFT
459 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
461 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
462 rtx x;
464 x = gen_rtx_ROTATE (inner_mode,
465 simplify_gen_unary (NOT, inner_mode, const1_rtx,
466 inner_mode),
467 XEXP (SUBREG_REG (op), 1));
468 return rtl_hooks.gen_lowpart_no_emit (mode, x);
471 /* Apply De Morgan's laws to reduce number of patterns for machines
472 with negating logical insns (and-not, nand, etc.). If result has
473 only one NOT, put it first, since that is how the patterns are
474 coded. */
476 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
478 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
479 enum machine_mode op_mode;
481 op_mode = GET_MODE (in1);
482 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
484 op_mode = GET_MODE (in2);
485 if (op_mode == VOIDmode)
486 op_mode = mode;
487 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
489 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
491 rtx tem = in2;
492 in2 = in1; in1 = tem;
495 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
496 mode, in1, in2);
498 break;
500 case NEG:
501 /* (neg (neg X)) == X. */
502 if (GET_CODE (op) == NEG)
503 return XEXP (op, 0);
505 /* (neg (plus X 1)) can become (not X). */
506 if (GET_CODE (op) == PLUS
507 && XEXP (op, 1) == const1_rtx)
508 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
510 /* Similarly, (neg (not X)) is (plus X 1). */
511 if (GET_CODE (op) == NOT)
512 return plus_constant (XEXP (op, 0), 1);
514 /* (neg (minus X Y)) can become (minus Y X). This transformation
515 isn't safe for modes with signed zeros, since if X and Y are
516 both +0, (minus Y X) is the same as (minus X Y). If the
517 rounding mode is towards +infinity (or -infinity) then the two
518 expressions will be rounded differently. */
519 if (GET_CODE (op) == MINUS
520 && !HONOR_SIGNED_ZEROS (mode)
521 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
522 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
524 if (GET_CODE (op) == PLUS
525 && !HONOR_SIGNED_ZEROS (mode)
526 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
528 /* (neg (plus A C)) is simplified to (minus -C A). */
529 if (GET_CODE (XEXP (op, 1)) == CONST_INT
530 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
532 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
533 if (temp)
534 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
537 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
538 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
539 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
542 /* (neg (mult A B)) becomes (mult (neg A) B).
543 This works even for floating-point values. */
544 if (GET_CODE (op) == MULT
545 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
547 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
548 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
551 /* NEG commutes with ASHIFT since it is multiplication. Only do
552 this if we can then eliminate the NEG (e.g., if the operand
553 is a constant). */
554 if (GET_CODE (op) == ASHIFT)
556 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
557 if (temp)
558 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
561 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
562 C is equal to the width of MODE minus 1. */
563 if (GET_CODE (op) == ASHIFTRT
564 && GET_CODE (XEXP (op, 1)) == CONST_INT
565 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
566 return simplify_gen_binary (LSHIFTRT, mode,
567 XEXP (op, 0), XEXP (op, 1));
569 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
570 C is equal to the width of MODE minus 1. */
571 if (GET_CODE (op) == LSHIFTRT
572 && GET_CODE (XEXP (op, 1)) == CONST_INT
573 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
574 return simplify_gen_binary (ASHIFTRT, mode,
575 XEXP (op, 0), XEXP (op, 1));
577 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
578 if (GET_CODE (op) == XOR
579 && XEXP (op, 1) == const1_rtx
580 && nonzero_bits (XEXP (op, 0), mode) == 1)
581 return plus_constant (XEXP (op, 0), -1);
583 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
584 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
585 if (GET_CODE (op) == LT
586 && XEXP (op, 1) == const0_rtx
587 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
589 enum machine_mode inner = GET_MODE (XEXP (op, 0));
590 int isize = GET_MODE_BITSIZE (inner);
591 if (STORE_FLAG_VALUE == 1)
593 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
594 GEN_INT (isize - 1));
595 if (mode == inner)
596 return temp;
597 if (GET_MODE_BITSIZE (mode) > isize)
598 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
599 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
601 else if (STORE_FLAG_VALUE == -1)
603 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
604 GEN_INT (isize - 1));
605 if (mode == inner)
606 return temp;
607 if (GET_MODE_BITSIZE (mode) > isize)
608 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
609 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
612 break;
614 case TRUNCATE:
615 /* We can't handle truncation to a partial integer mode here
616 because we don't know the real bitsize of the partial
617 integer mode. */
618 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
619 break;
621 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
622 if ((GET_CODE (op) == SIGN_EXTEND
623 || GET_CODE (op) == ZERO_EXTEND)
624 && GET_MODE (XEXP (op, 0)) == mode)
625 return XEXP (op, 0);
627 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
628 (OP:SI foo:SI) if OP is NEG or ABS. */
629 if ((GET_CODE (op) == ABS
630 || GET_CODE (op) == NEG)
631 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
632 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
633 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
634 return simplify_gen_unary (GET_CODE (op), mode,
635 XEXP (XEXP (op, 0), 0), mode);
637 /* (truncate:A (subreg:B (truncate:C X) 0)) is
638 (truncate:A X). */
639 if (GET_CODE (op) == SUBREG
640 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
641 && subreg_lowpart_p (op))
642 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
643 GET_MODE (XEXP (SUBREG_REG (op), 0)));
645 /* If we know that the value is already truncated, we can
646 replace the TRUNCATE with a SUBREG. Note that this is also
647 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
648 modes we just have to apply a different definition for
649 truncation. But don't do this for an (LSHIFTRT (MULT ...))
650 since this will cause problems with the umulXi3_highpart
651 patterns. */
652 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
653 GET_MODE_BITSIZE (GET_MODE (op)))
654 ? (num_sign_bit_copies (op, GET_MODE (op))
655 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
656 - GET_MODE_BITSIZE (mode)))
657 : truncated_to_mode (mode, op))
658 && ! (GET_CODE (op) == LSHIFTRT
659 && GET_CODE (XEXP (op, 0)) == MULT))
660 return rtl_hooks.gen_lowpart_no_emit (mode, op);
662 /* A truncate of a comparison can be replaced with a subreg if
663 STORE_FLAG_VALUE permits. This is like the previous test,
664 but it works even if the comparison is done in a mode larger
665 than HOST_BITS_PER_WIDE_INT. */
666 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
667 && COMPARISON_P (op)
668 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
669 return rtl_hooks.gen_lowpart_no_emit (mode, op);
670 break;
672 case FLOAT_TRUNCATE:
673 if (DECIMAL_FLOAT_MODE_P (mode))
674 break;
676 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
677 if (GET_CODE (op) == FLOAT_EXTEND
678 && GET_MODE (XEXP (op, 0)) == mode)
679 return XEXP (op, 0);
681 /* (float_truncate:SF (float_truncate:DF foo:XF))
682 = (float_truncate:SF foo:XF).
683 This may eliminate double rounding, so it is unsafe.
685 (float_truncate:SF (float_extend:XF foo:DF))
686 = (float_truncate:SF foo:DF).
688 (float_truncate:DF (float_extend:XF foo:SF))
689 = (float_extend:SF foo:DF). */
690 if ((GET_CODE (op) == FLOAT_TRUNCATE
691 && flag_unsafe_math_optimizations)
692 || GET_CODE (op) == FLOAT_EXTEND)
693 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
694 0)))
695 > GET_MODE_SIZE (mode)
696 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
697 mode,
698 XEXP (op, 0), mode);
700 /* (float_truncate (float x)) is (float x) */
701 if (GET_CODE (op) == FLOAT
702 && (flag_unsafe_math_optimizations
703 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
704 && ((unsigned)significand_size (GET_MODE (op))
705 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
706 - num_sign_bit_copies (XEXP (op, 0),
707 GET_MODE (XEXP (op, 0))))))))
708 return simplify_gen_unary (FLOAT, mode,
709 XEXP (op, 0),
710 GET_MODE (XEXP (op, 0)));
712 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
713 (OP:SF foo:SF) if OP is NEG or ABS. */
714 if ((GET_CODE (op) == ABS
715 || GET_CODE (op) == NEG)
716 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
717 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
718 return simplify_gen_unary (GET_CODE (op), mode,
719 XEXP (XEXP (op, 0), 0), mode);
721 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
722 is (float_truncate:SF x). */
723 if (GET_CODE (op) == SUBREG
724 && subreg_lowpart_p (op)
725 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
726 return SUBREG_REG (op);
727 break;
729 case FLOAT_EXTEND:
730 if (DECIMAL_FLOAT_MODE_P (mode))
731 break;
733 /* (float_extend (float_extend x)) is (float_extend x)
735 (float_extend (float x)) is (float x) assuming that double
736 rounding can't happen.
738 if (GET_CODE (op) == FLOAT_EXTEND
739 || (GET_CODE (op) == FLOAT
740 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
741 && ((unsigned)significand_size (GET_MODE (op))
742 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
743 - num_sign_bit_copies (XEXP (op, 0),
744 GET_MODE (XEXP (op, 0)))))))
745 return simplify_gen_unary (GET_CODE (op), mode,
746 XEXP (op, 0),
747 GET_MODE (XEXP (op, 0)));
749 break;
751 case ABS:
752 /* (abs (neg <foo>)) -> (abs <foo>) */
753 if (GET_CODE (op) == NEG)
754 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
755 GET_MODE (XEXP (op, 0)));
757 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
758 do nothing. */
759 if (GET_MODE (op) == VOIDmode)
760 break;
762 /* If operand is something known to be positive, ignore the ABS. */
763 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
764 || ((GET_MODE_BITSIZE (GET_MODE (op))
765 <= HOST_BITS_PER_WIDE_INT)
766 && ((nonzero_bits (op, GET_MODE (op))
767 & ((HOST_WIDE_INT) 1
768 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
769 == 0)))
770 return op;
772 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
773 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
774 return gen_rtx_NEG (mode, op);
776 break;
778 case FFS:
779 /* (ffs (*_extend <X>)) = (ffs <X>) */
780 if (GET_CODE (op) == SIGN_EXTEND
781 || GET_CODE (op) == ZERO_EXTEND)
782 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
783 GET_MODE (XEXP (op, 0)));
784 break;
786 case POPCOUNT:
787 switch (GET_CODE (op))
789 case BSWAP:
790 case ZERO_EXTEND:
791 /* (popcount (zero_extend <X>)) = (popcount <X>) */
792 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
793 GET_MODE (XEXP (op, 0)));
795 case ROTATE:
796 case ROTATERT:
797 /* Rotations don't affect popcount. */
798 if (!side_effects_p (XEXP (op, 1)))
799 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
800 GET_MODE (XEXP (op, 0)));
801 break;
803 default:
804 break;
806 break;
808 case PARITY:
809 switch (GET_CODE (op))
811 case NOT:
812 case BSWAP:
813 case ZERO_EXTEND:
814 case SIGN_EXTEND:
815 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
816 GET_MODE (XEXP (op, 0)));
818 case ROTATE:
819 case ROTATERT:
820 /* Rotations don't affect parity. */
821 if (!side_effects_p (XEXP (op, 1)))
822 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
823 GET_MODE (XEXP (op, 0)));
824 break;
826 default:
827 break;
829 break;
831 case BSWAP:
832 /* (bswap (bswap x)) -> x. */
833 if (GET_CODE (op) == BSWAP)
834 return XEXP (op, 0);
835 break;
837 case FLOAT:
838 /* (float (sign_extend <X>)) = (float <X>). */
839 if (GET_CODE (op) == SIGN_EXTEND)
840 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
841 GET_MODE (XEXP (op, 0)));
842 break;
844 case SIGN_EXTEND:
845 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
846 becomes just the MINUS if its mode is MODE. This allows
847 folding switch statements on machines using casesi (such as
848 the VAX). */
849 if (GET_CODE (op) == TRUNCATE
850 && GET_MODE (XEXP (op, 0)) == mode
851 && GET_CODE (XEXP (op, 0)) == MINUS
852 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
853 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
854 return XEXP (op, 0);
856 /* Check for a sign extension of a subreg of a promoted
857 variable, where the promotion is sign-extended, and the
858 target mode is the same as the variable's promotion. */
859 if (GET_CODE (op) == SUBREG
860 && SUBREG_PROMOTED_VAR_P (op)
861 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
862 && GET_MODE (XEXP (op, 0)) == mode)
863 return XEXP (op, 0);
865 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
866 if (! POINTERS_EXTEND_UNSIGNED
867 && mode == Pmode && GET_MODE (op) == ptr_mode
868 && (CONSTANT_P (op)
869 || (GET_CODE (op) == SUBREG
870 && REG_P (SUBREG_REG (op))
871 && REG_POINTER (SUBREG_REG (op))
872 && GET_MODE (SUBREG_REG (op)) == Pmode)))
873 return convert_memory_address (Pmode, op);
874 #endif
875 break;
877 case ZERO_EXTEND:
878 /* Check for a zero extension of a subreg of a promoted
879 variable, where the promotion is zero-extended, and the
880 target mode is the same as the variable's promotion. */
881 if (GET_CODE (op) == SUBREG
882 && SUBREG_PROMOTED_VAR_P (op)
883 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
884 && GET_MODE (XEXP (op, 0)) == mode)
885 return XEXP (op, 0);
887 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
888 if (POINTERS_EXTEND_UNSIGNED > 0
889 && mode == Pmode && GET_MODE (op) == ptr_mode
890 && (CONSTANT_P (op)
891 || (GET_CODE (op) == SUBREG
892 && REG_P (SUBREG_REG (op))
893 && REG_POINTER (SUBREG_REG (op))
894 && GET_MODE (SUBREG_REG (op)) == Pmode)))
895 return convert_memory_address (Pmode, op);
896 #endif
897 break;
899 default:
900 break;
903 return 0;
906 /* Try to compute the value of a unary operation CODE whose output mode is to
907 be MODE with input operand OP whose mode was originally OP_MODE.
908 Return zero if the value cannot be computed. */
910 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
911 rtx op, enum machine_mode op_mode)
913 unsigned int width = GET_MODE_BITSIZE (mode);
915 if (code == VEC_DUPLICATE)
917 gcc_assert (VECTOR_MODE_P (mode));
918 if (GET_MODE (op) != VOIDmode)
920 if (!VECTOR_MODE_P (GET_MODE (op)))
921 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
922 else
923 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
924 (GET_MODE (op)));
926 if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
927 || GET_CODE (op) == CONST_VECTOR)
929 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
930 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
931 rtvec v = rtvec_alloc (n_elts);
932 unsigned int i;
934 if (GET_CODE (op) != CONST_VECTOR)
935 for (i = 0; i < n_elts; i++)
936 RTVEC_ELT (v, i) = op;
937 else
939 enum machine_mode inmode = GET_MODE (op);
940 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
941 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
943 gcc_assert (in_n_elts < n_elts);
944 gcc_assert ((n_elts % in_n_elts) == 0);
945 for (i = 0; i < n_elts; i++)
946 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
948 return gen_rtx_CONST_VECTOR (mode, v);
952 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
954 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
955 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
956 enum machine_mode opmode = GET_MODE (op);
957 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
958 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
959 rtvec v = rtvec_alloc (n_elts);
960 unsigned int i;
962 gcc_assert (op_n_elts == n_elts);
963 for (i = 0; i < n_elts; i++)
965 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
966 CONST_VECTOR_ELT (op, i),
967 GET_MODE_INNER (opmode));
968 if (!x)
969 return 0;
970 RTVEC_ELT (v, i) = x;
972 return gen_rtx_CONST_VECTOR (mode, v);
975 /* The order of these tests is critical so that, for example, we don't
976 check the wrong mode (input vs. output) for a conversion operation,
977 such as FIX. At some point, this should be simplified. */
979 if (code == FLOAT && GET_MODE (op) == VOIDmode
980 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
982 HOST_WIDE_INT hv, lv;
983 REAL_VALUE_TYPE d;
985 if (GET_CODE (op) == CONST_INT)
986 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
987 else
988 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
990 REAL_VALUE_FROM_INT (d, lv, hv, mode);
991 d = real_value_truncate (mode, d);
992 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
994 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
995 && (GET_CODE (op) == CONST_DOUBLE
996 || GET_CODE (op) == CONST_INT))
998 HOST_WIDE_INT hv, lv;
999 REAL_VALUE_TYPE d;
1001 if (GET_CODE (op) == CONST_INT)
1002 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1003 else
1004 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1006 if (op_mode == VOIDmode)
1008 /* We don't know how to interpret negative-looking numbers in
1009 this case, so don't try to fold those. */
1010 if (hv < 0)
1011 return 0;
1013 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
1015 else
1016 hv = 0, lv &= GET_MODE_MASK (op_mode);
1018 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1019 d = real_value_truncate (mode, d);
1020 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1023 if (GET_CODE (op) == CONST_INT
1024 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1026 HOST_WIDE_INT arg0 = INTVAL (op);
1027 HOST_WIDE_INT val;
1029 switch (code)
1031 case NOT:
1032 val = ~ arg0;
1033 break;
1035 case NEG:
1036 val = - arg0;
1037 break;
1039 case ABS:
1040 val = (arg0 >= 0 ? arg0 : - arg0);
1041 break;
1043 case FFS:
1044 /* Don't use ffs here. Instead, get low order bit and then its
1045 number. If arg0 is zero, this will return 0, as desired. */
1046 arg0 &= GET_MODE_MASK (mode);
1047 val = exact_log2 (arg0 & (- arg0)) + 1;
1048 break;
1050 case CLZ:
1051 arg0 &= GET_MODE_MASK (mode);
1052 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1054 else
1055 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1056 break;
1058 case CTZ:
1059 arg0 &= GET_MODE_MASK (mode);
1060 if (arg0 == 0)
1062 /* Even if the value at zero is undefined, we have to come
1063 up with some replacement. Seems good enough. */
1064 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1065 val = GET_MODE_BITSIZE (mode);
1067 else
1068 val = exact_log2 (arg0 & -arg0);
1069 break;
1071 case POPCOUNT:
1072 arg0 &= GET_MODE_MASK (mode);
1073 val = 0;
1074 while (arg0)
1075 val++, arg0 &= arg0 - 1;
1076 break;
1078 case PARITY:
1079 arg0 &= GET_MODE_MASK (mode);
1080 val = 0;
1081 while (arg0)
1082 val++, arg0 &= arg0 - 1;
1083 val &= 1;
1084 break;
1086 case BSWAP:
1088 unsigned int s;
1090 val = 0;
1091 for (s = 0; s < width; s += 8)
1093 unsigned int d = width - s - 8;
1094 unsigned HOST_WIDE_INT byte;
1095 byte = (arg0 >> s) & 0xff;
1096 val |= byte << d;
1099 break;
1101 case TRUNCATE:
1102 val = arg0;
1103 break;
1105 case ZERO_EXTEND:
1106 /* When zero-extending a CONST_INT, we need to know its
1107 original mode. */
1108 gcc_assert (op_mode != VOIDmode);
1109 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1111 /* If we were really extending the mode,
1112 we would have to distinguish between zero-extension
1113 and sign-extension. */
1114 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1115 val = arg0;
1117 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1118 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1119 else
1120 return 0;
1121 break;
1123 case SIGN_EXTEND:
1124 if (op_mode == VOIDmode)
1125 op_mode = mode;
1126 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1128 /* If we were really extending the mode,
1129 we would have to distinguish between zero-extension
1130 and sign-extension. */
1131 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1132 val = arg0;
1134 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1137 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1138 if (val
1139 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1140 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1142 else
1143 return 0;
1144 break;
1146 case SQRT:
1147 case FLOAT_EXTEND:
1148 case FLOAT_TRUNCATE:
1149 case SS_TRUNCATE:
1150 case US_TRUNCATE:
1151 case SS_NEG:
1152 case US_NEG:
1153 return 0;
1155 default:
1156 gcc_unreachable ();
1159 return gen_int_mode (val, mode);
1162 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1163 for a DImode operation on a CONST_INT. */
1164 else if (GET_MODE (op) == VOIDmode
1165 && width <= HOST_BITS_PER_WIDE_INT * 2
1166 && (GET_CODE (op) == CONST_DOUBLE
1167 || GET_CODE (op) == CONST_INT))
1169 unsigned HOST_WIDE_INT l1, lv;
1170 HOST_WIDE_INT h1, hv;
1172 if (GET_CODE (op) == CONST_DOUBLE)
1173 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1174 else
1175 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1177 switch (code)
1179 case NOT:
1180 lv = ~ l1;
1181 hv = ~ h1;
1182 break;
1184 case NEG:
1185 neg_double (l1, h1, &lv, &hv);
1186 break;
1188 case ABS:
1189 if (h1 < 0)
1190 neg_double (l1, h1, &lv, &hv);
1191 else
1192 lv = l1, hv = h1;
1193 break;
1195 case FFS:
1196 hv = 0;
1197 if (l1 == 0)
1199 if (h1 == 0)
1200 lv = 0;
1201 else
1202 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1204 else
1205 lv = exact_log2 (l1 & -l1) + 1;
1206 break;
1208 case CLZ:
1209 hv = 0;
1210 if (h1 != 0)
1211 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1212 - HOST_BITS_PER_WIDE_INT;
1213 else if (l1 != 0)
1214 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1215 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1216 lv = GET_MODE_BITSIZE (mode);
1217 break;
1219 case CTZ:
1220 hv = 0;
1221 if (l1 != 0)
1222 lv = exact_log2 (l1 & -l1);
1223 else if (h1 != 0)
1224 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1225 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1226 lv = GET_MODE_BITSIZE (mode);
1227 break;
1229 case POPCOUNT:
1230 hv = 0;
1231 lv = 0;
1232 while (l1)
1233 lv++, l1 &= l1 - 1;
1234 while (h1)
1235 lv++, h1 &= h1 - 1;
1236 break;
1238 case PARITY:
1239 hv = 0;
1240 lv = 0;
1241 while (l1)
1242 lv++, l1 &= l1 - 1;
1243 while (h1)
1244 lv++, h1 &= h1 - 1;
1245 lv &= 1;
1246 break;
1248 case BSWAP:
1250 unsigned int s;
1252 hv = 0;
1253 lv = 0;
1254 for (s = 0; s < width; s += 8)
1256 unsigned int d = width - s - 8;
1257 unsigned HOST_WIDE_INT byte;
1259 if (s < HOST_BITS_PER_WIDE_INT)
1260 byte = (l1 >> s) & 0xff;
1261 else
1262 byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1264 if (d < HOST_BITS_PER_WIDE_INT)
1265 lv |= byte << d;
1266 else
1267 hv |= byte << (d - HOST_BITS_PER_WIDE_INT);
1270 break;
1272 case TRUNCATE:
1273 /* This is just a change-of-mode, so do nothing. */
1274 lv = l1, hv = h1;
1275 break;
1277 case ZERO_EXTEND:
1278 gcc_assert (op_mode != VOIDmode);
1280 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1281 return 0;
1283 hv = 0;
1284 lv = l1 & GET_MODE_MASK (op_mode);
1285 break;
1287 case SIGN_EXTEND:
1288 if (op_mode == VOIDmode
1289 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1290 return 0;
1291 else
1293 lv = l1 & GET_MODE_MASK (op_mode);
1294 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1295 && (lv & ((HOST_WIDE_INT) 1
1296 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1297 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1299 hv = HWI_SIGN_EXTEND (lv);
1301 break;
1303 case SQRT:
1304 return 0;
1306 default:
1307 return 0;
1310 return immed_double_const (lv, hv, mode);
1313 else if (GET_CODE (op) == CONST_DOUBLE
1314 && SCALAR_FLOAT_MODE_P (mode))
1316 REAL_VALUE_TYPE d, t;
1317 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1319 switch (code)
1321 case SQRT:
1322 if (HONOR_SNANS (mode) && real_isnan (&d))
1323 return 0;
1324 real_sqrt (&t, mode, &d);
1325 d = t;
1326 break;
1327 case ABS:
1328 d = REAL_VALUE_ABS (d);
1329 break;
1330 case NEG:
1331 d = REAL_VALUE_NEGATE (d);
1332 break;
1333 case FLOAT_TRUNCATE:
1334 d = real_value_truncate (mode, d);
1335 break;
1336 case FLOAT_EXTEND:
1337 /* All this does is change the mode. */
1338 break;
1339 case FIX:
1340 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1341 break;
1342 case NOT:
1344 long tmp[4];
1345 int i;
1347 real_to_target (tmp, &d, GET_MODE (op));
1348 for (i = 0; i < 4; i++)
1349 tmp[i] = ~tmp[i];
1350 real_from_target (&d, tmp, mode);
1351 break;
1353 default:
1354 gcc_unreachable ();
1356 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1359 else if (GET_CODE (op) == CONST_DOUBLE
1360 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1361 && GET_MODE_CLASS (mode) == MODE_INT
1362 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1364 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1365 operators are intentionally left unspecified (to ease implementation
1366 by target backends), for consistency, this routine implements the
1367 same semantics for constant folding as used by the middle-end. */
1369 /* This was formerly used only for non-IEEE float.
1370 eggert@twinsun.com says it is safe for IEEE also. */
1371 HOST_WIDE_INT xh, xl, th, tl;
1372 REAL_VALUE_TYPE x, t;
1373 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1374 switch (code)
1376 case FIX:
1377 if (REAL_VALUE_ISNAN (x))
1378 return const0_rtx;
1380 /* Test against the signed upper bound. */
1381 if (width > HOST_BITS_PER_WIDE_INT)
1383 th = ((unsigned HOST_WIDE_INT) 1
1384 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1385 tl = -1;
1387 else
1389 th = 0;
1390 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1392 real_from_integer (&t, VOIDmode, tl, th, 0);
1393 if (REAL_VALUES_LESS (t, x))
1395 xh = th;
1396 xl = tl;
1397 break;
1400 /* Test against the signed lower bound. */
1401 if (width > HOST_BITS_PER_WIDE_INT)
1403 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1404 tl = 0;
1406 else
1408 th = -1;
1409 tl = (HOST_WIDE_INT) -1 << (width - 1);
1411 real_from_integer (&t, VOIDmode, tl, th, 0);
1412 if (REAL_VALUES_LESS (x, t))
1414 xh = th;
1415 xl = tl;
1416 break;
1418 REAL_VALUE_TO_INT (&xl, &xh, x);
1419 break;
1421 case UNSIGNED_FIX:
1422 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1423 return const0_rtx;
1425 /* Test against the unsigned upper bound. */
1426 if (width == 2*HOST_BITS_PER_WIDE_INT)
1428 th = -1;
1429 tl = -1;
1431 else if (width >= HOST_BITS_PER_WIDE_INT)
1433 th = ((unsigned HOST_WIDE_INT) 1
1434 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1435 tl = -1;
1437 else
1439 th = 0;
1440 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1442 real_from_integer (&t, VOIDmode, tl, th, 1);
1443 if (REAL_VALUES_LESS (t, x))
1445 xh = th;
1446 xl = tl;
1447 break;
1450 REAL_VALUE_TO_INT (&xl, &xh, x);
1451 break;
1453 default:
1454 gcc_unreachable ();
1456 return immed_double_const (xl, xh, mode);
1459 return NULL_RTX;
1462 /* Subroutine of simplify_binary_operation to simplify a commutative,
1463 associative binary operation CODE with result mode MODE, operating
1464 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1465 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1466 canonicalization is possible. */
1468 static rtx
1469 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1470 rtx op0, rtx op1)
1472 rtx tem;
1474 /* Linearize the operator to the left. */
1475 if (GET_CODE (op1) == code)
1477 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1478 if (GET_CODE (op0) == code)
1480 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1481 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1484 /* "a op (b op c)" becomes "(b op c) op a". */
1485 if (! swap_commutative_operands_p (op1, op0))
1486 return simplify_gen_binary (code, mode, op1, op0);
1488 tem = op0;
1489 op0 = op1;
1490 op1 = tem;
1493 if (GET_CODE (op0) == code)
1495 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1496 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1498 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1499 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1502 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1503 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1504 if (tem != 0)
1505 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1507 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1508 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1509 if (tem != 0)
1510 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1513 return 0;
1517 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1518 and OP1. Return 0 if no simplification is possible.
1520 Don't use this for relational operations such as EQ or LT.
1521 Use simplify_relational_operation instead. */
1523 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1524 rtx op0, rtx op1)
1526 rtx trueop0, trueop1;
1527 rtx tem;
1529 /* Relational operations don't work here. We must know the mode
1530 of the operands in order to do the comparison correctly.
1531 Assuming a full word can give incorrect results.
1532 Consider comparing 128 with -128 in QImode. */
1533 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1534 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1536 /* Make sure the constant is second. */
1537 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1538 && swap_commutative_operands_p (op0, op1))
1540 tem = op0, op0 = op1, op1 = tem;
1543 trueop0 = avoid_constant_pool_reference (op0);
1544 trueop1 = avoid_constant_pool_reference (op1);
1546 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1547 if (tem)
1548 return tem;
1549 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1552 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1553 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1554 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1555 actual constants. */
1557 static rtx
1558 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1559 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1561 rtx tem, reversed, opleft, opright;
1562 HOST_WIDE_INT val;
1563 unsigned int width = GET_MODE_BITSIZE (mode);
1565 /* Even if we can't compute a constant result,
1566 there are some cases worth simplifying. */
1568 switch (code)
1570 case PLUS:
1571 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1572 when x is NaN, infinite, or finite and nonzero. They aren't
1573 when x is -0 and the rounding mode is not towards -infinity,
1574 since (-0) + 0 is then 0. */
1575 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1576 return op0;
1578 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1579 transformations are safe even for IEEE. */
1580 if (GET_CODE (op0) == NEG)
1581 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1582 else if (GET_CODE (op1) == NEG)
1583 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1585 /* (~a) + 1 -> -a */
1586 if (INTEGRAL_MODE_P (mode)
1587 && GET_CODE (op0) == NOT
1588 && trueop1 == const1_rtx)
1589 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1591 /* Handle both-operands-constant cases. We can only add
1592 CONST_INTs to constants since the sum of relocatable symbols
1593 can't be handled by most assemblers. Don't add CONST_INT
1594 to CONST_INT since overflow won't be computed properly if wider
1595 than HOST_BITS_PER_WIDE_INT. */
1597 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1598 && GET_CODE (op1) == CONST_INT)
1599 return plus_constant (op0, INTVAL (op1));
1600 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1601 && GET_CODE (op0) == CONST_INT)
1602 return plus_constant (op1, INTVAL (op0));
1604 /* See if this is something like X * C - X or vice versa or
1605 if the multiplication is written as a shift. If so, we can
1606 distribute and make a new multiply, shift, or maybe just
1607 have X (if C is 2 in the example above). But don't make
1608 something more expensive than we had before. */
1610 if (SCALAR_INT_MODE_P (mode))
1612 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1613 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1614 rtx lhs = op0, rhs = op1;
1616 if (GET_CODE (lhs) == NEG)
1618 coeff0l = -1;
1619 coeff0h = -1;
1620 lhs = XEXP (lhs, 0);
1622 else if (GET_CODE (lhs) == MULT
1623 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1625 coeff0l = INTVAL (XEXP (lhs, 1));
1626 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1627 lhs = XEXP (lhs, 0);
1629 else if (GET_CODE (lhs) == ASHIFT
1630 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1631 && INTVAL (XEXP (lhs, 1)) >= 0
1632 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1634 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1635 coeff0h = 0;
1636 lhs = XEXP (lhs, 0);
1639 if (GET_CODE (rhs) == NEG)
1641 coeff1l = -1;
1642 coeff1h = -1;
1643 rhs = XEXP (rhs, 0);
1645 else if (GET_CODE (rhs) == MULT
1646 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1648 coeff1l = INTVAL (XEXP (rhs, 1));
1649 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1650 rhs = XEXP (rhs, 0);
1652 else if (GET_CODE (rhs) == ASHIFT
1653 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1654 && INTVAL (XEXP (rhs, 1)) >= 0
1655 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1657 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1658 coeff1h = 0;
1659 rhs = XEXP (rhs, 0);
1662 if (rtx_equal_p (lhs, rhs))
1664 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1665 rtx coeff;
1666 unsigned HOST_WIDE_INT l;
1667 HOST_WIDE_INT h;
1669 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1670 coeff = immed_double_const (l, h, mode);
1672 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1673 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1674 ? tem : 0;
1678 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1679 if ((GET_CODE (op1) == CONST_INT
1680 || GET_CODE (op1) == CONST_DOUBLE)
1681 && GET_CODE (op0) == XOR
1682 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1683 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1684 && mode_signbit_p (mode, op1))
1685 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1686 simplify_gen_binary (XOR, mode, op1,
1687 XEXP (op0, 1)));
1689 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1690 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1691 && GET_CODE (op0) == MULT
1692 && GET_CODE (XEXP (op0, 0)) == NEG)
1694 rtx in1, in2;
1696 in1 = XEXP (XEXP (op0, 0), 0);
1697 in2 = XEXP (op0, 1);
1698 return simplify_gen_binary (MINUS, mode, op1,
1699 simplify_gen_binary (MULT, mode,
1700 in1, in2));
1703 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1704 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1705 is 1. */
1706 if (COMPARISON_P (op0)
1707 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1708 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1709 && (reversed = reversed_comparison (op0, mode)))
1710 return
1711 simplify_gen_unary (NEG, mode, reversed, mode);
1713 /* If one of the operands is a PLUS or a MINUS, see if we can
1714 simplify this by the associative law.
1715 Don't use the associative law for floating point.
1716 The inaccuracy makes it nonassociative,
1717 and subtle programs can break if operations are associated. */
1719 if (INTEGRAL_MODE_P (mode)
1720 && (plus_minus_operand_p (op0)
1721 || plus_minus_operand_p (op1))
1722 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1723 return tem;
1725 /* Reassociate floating point addition only when the user
1726 specifies associative math operations. */
1727 if (FLOAT_MODE_P (mode)
1728 && flag_associative_math)
1730 tem = simplify_associative_operation (code, mode, op0, op1);
1731 if (tem)
1732 return tem;
1734 break;
1736 case COMPARE:
1737 #ifdef HAVE_cc0
1738 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1739 using cc0, in which case we want to leave it as a COMPARE
1740 so we can distinguish it from a register-register-copy.
1742 In IEEE floating point, x-0 is not the same as x. */
1744 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1745 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1746 && trueop1 == CONST0_RTX (mode))
1747 return op0;
1748 #endif
1750 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1751 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1752 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1753 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1755 rtx xop00 = XEXP (op0, 0);
1756 rtx xop10 = XEXP (op1, 0);
1758 #ifdef HAVE_cc0
1759 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1760 #else
1761 if (REG_P (xop00) && REG_P (xop10)
1762 && GET_MODE (xop00) == GET_MODE (xop10)
1763 && REGNO (xop00) == REGNO (xop10)
1764 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1765 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1766 #endif
1767 return xop00;
1769 break;
1771 case MINUS:
1772 /* We can't assume x-x is 0 even with non-IEEE floating point,
1773 but since it is zero except in very strange circumstances, we
1774 will treat it as zero with -ffinite-math-only. */
1775 if (rtx_equal_p (trueop0, trueop1)
1776 && ! side_effects_p (op0)
1777 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
1778 return CONST0_RTX (mode);
1780 /* Change subtraction from zero into negation. (0 - x) is the
1781 same as -x when x is NaN, infinite, or finite and nonzero.
1782 But if the mode has signed zeros, and does not round towards
1783 -infinity, then 0 - 0 is 0, not -0. */
1784 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1785 return simplify_gen_unary (NEG, mode, op1, mode);
1787 /* (-1 - a) is ~a. */
1788 if (trueop0 == constm1_rtx)
1789 return simplify_gen_unary (NOT, mode, op1, mode);
1791 /* Subtracting 0 has no effect unless the mode has signed zeros
1792 and supports rounding towards -infinity. In such a case,
1793 0 - 0 is -0. */
1794 if (!(HONOR_SIGNED_ZEROS (mode)
1795 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1796 && trueop1 == CONST0_RTX (mode))
1797 return op0;
1799 /* See if this is something like X * C - X or vice versa or
1800 if the multiplication is written as a shift. If so, we can
1801 distribute and make a new multiply, shift, or maybe just
1802 have X (if C is 2 in the example above). But don't make
1803 something more expensive than we had before. */
1805 if (SCALAR_INT_MODE_P (mode))
1807 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1808 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1809 rtx lhs = op0, rhs = op1;
1811 if (GET_CODE (lhs) == NEG)
1813 coeff0l = -1;
1814 coeff0h = -1;
1815 lhs = XEXP (lhs, 0);
1817 else if (GET_CODE (lhs) == MULT
1818 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1820 coeff0l = INTVAL (XEXP (lhs, 1));
1821 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1822 lhs = XEXP (lhs, 0);
1824 else if (GET_CODE (lhs) == ASHIFT
1825 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1826 && INTVAL (XEXP (lhs, 1)) >= 0
1827 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1829 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1830 coeff0h = 0;
1831 lhs = XEXP (lhs, 0);
1834 if (GET_CODE (rhs) == NEG)
1836 negcoeff1l = 1;
1837 negcoeff1h = 0;
1838 rhs = XEXP (rhs, 0);
1840 else if (GET_CODE (rhs) == MULT
1841 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1843 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1844 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1845 rhs = XEXP (rhs, 0);
1847 else if (GET_CODE (rhs) == ASHIFT
1848 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1849 && INTVAL (XEXP (rhs, 1)) >= 0
1850 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1852 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1853 negcoeff1h = -1;
1854 rhs = XEXP (rhs, 0);
1857 if (rtx_equal_p (lhs, rhs))
1859 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1860 rtx coeff;
1861 unsigned HOST_WIDE_INT l;
1862 HOST_WIDE_INT h;
1864 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1865 coeff = immed_double_const (l, h, mode);
1867 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1868 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1869 ? tem : 0;
1873 /* (a - (-b)) -> (a + b). True even for IEEE. */
1874 if (GET_CODE (op1) == NEG)
1875 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1877 /* (-x - c) may be simplified as (-c - x). */
1878 if (GET_CODE (op0) == NEG
1879 && (GET_CODE (op1) == CONST_INT
1880 || GET_CODE (op1) == CONST_DOUBLE))
1882 tem = simplify_unary_operation (NEG, mode, op1, mode);
1883 if (tem)
1884 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1887 /* Don't let a relocatable value get a negative coeff. */
1888 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1889 return simplify_gen_binary (PLUS, mode,
1890 op0,
1891 neg_const_int (mode, op1));
1893 /* (x - (x & y)) -> (x & ~y) */
1894 if (GET_CODE (op1) == AND)
1896 if (rtx_equal_p (op0, XEXP (op1, 0)))
1898 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1899 GET_MODE (XEXP (op1, 1)));
1900 return simplify_gen_binary (AND, mode, op0, tem);
1902 if (rtx_equal_p (op0, XEXP (op1, 1)))
1904 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1905 GET_MODE (XEXP (op1, 0)));
1906 return simplify_gen_binary (AND, mode, op0, tem);
1910 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1911 by reversing the comparison code if valid. */
1912 if (STORE_FLAG_VALUE == 1
1913 && trueop0 == const1_rtx
1914 && COMPARISON_P (op1)
1915 && (reversed = reversed_comparison (op1, mode)))
1916 return reversed;
1918 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
1919 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1920 && GET_CODE (op1) == MULT
1921 && GET_CODE (XEXP (op1, 0)) == NEG)
1923 rtx in1, in2;
1925 in1 = XEXP (XEXP (op1, 0), 0);
1926 in2 = XEXP (op1, 1);
1927 return simplify_gen_binary (PLUS, mode,
1928 simplify_gen_binary (MULT, mode,
1929 in1, in2),
1930 op0);
1933 /* Canonicalize (minus (neg A) (mult B C)) to
1934 (minus (mult (neg B) C) A). */
1935 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1936 && GET_CODE (op1) == MULT
1937 && GET_CODE (op0) == NEG)
1939 rtx in1, in2;
1941 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
1942 in2 = XEXP (op1, 1);
1943 return simplify_gen_binary (MINUS, mode,
1944 simplify_gen_binary (MULT, mode,
1945 in1, in2),
1946 XEXP (op0, 0));
1949 /* If one of the operands is a PLUS or a MINUS, see if we can
1950 simplify this by the associative law. This will, for example,
1951 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1952 Don't use the associative law for floating point.
1953 The inaccuracy makes it nonassociative,
1954 and subtle programs can break if operations are associated. */
1956 if (INTEGRAL_MODE_P (mode)
1957 && (plus_minus_operand_p (op0)
1958 || plus_minus_operand_p (op1))
1959 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1960 return tem;
1961 break;
1963 case MULT:
1964 if (trueop1 == constm1_rtx)
1965 return simplify_gen_unary (NEG, mode, op0, mode);
1967 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1968 x is NaN, since x * 0 is then also NaN. Nor is it valid
1969 when the mode has signed zeros, since multiplying a negative
1970 number by 0 will give -0, not 0. */
1971 if (!HONOR_NANS (mode)
1972 && !HONOR_SIGNED_ZEROS (mode)
1973 && trueop1 == CONST0_RTX (mode)
1974 && ! side_effects_p (op0))
1975 return op1;
1977 /* In IEEE floating point, x*1 is not equivalent to x for
1978 signalling NaNs. */
1979 if (!HONOR_SNANS (mode)
1980 && trueop1 == CONST1_RTX (mode))
1981 return op0;
1983 /* Convert multiply by constant power of two into shift unless
1984 we are still generating RTL. This test is a kludge. */
1985 if (GET_CODE (trueop1) == CONST_INT
1986 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1987 /* If the mode is larger than the host word size, and the
1988 uppermost bit is set, then this isn't a power of two due
1989 to implicit sign extension. */
1990 && (width <= HOST_BITS_PER_WIDE_INT
1991 || val != HOST_BITS_PER_WIDE_INT - 1))
1992 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1994 /* Likewise for multipliers wider than a word. */
1995 if (GET_CODE (trueop1) == CONST_DOUBLE
1996 && (GET_MODE (trueop1) == VOIDmode
1997 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
1998 && GET_MODE (op0) == mode
1999 && CONST_DOUBLE_LOW (trueop1) == 0
2000 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
2001 return simplify_gen_binary (ASHIFT, mode, op0,
2002 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2004 /* x*2 is x+x and x*(-1) is -x */
2005 if (GET_CODE (trueop1) == CONST_DOUBLE
2006 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2007 && GET_MODE (op0) == mode)
2009 REAL_VALUE_TYPE d;
2010 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2012 if (REAL_VALUES_EQUAL (d, dconst2))
2013 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2015 if (!HONOR_SNANS (mode)
2016 && REAL_VALUES_EQUAL (d, dconstm1))
2017 return simplify_gen_unary (NEG, mode, op0, mode);
2020 /* Optimize -x * -x as x * x. */
2021 if (FLOAT_MODE_P (mode)
2022 && GET_CODE (op0) == NEG
2023 && GET_CODE (op1) == NEG
2024 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2025 && !side_effects_p (XEXP (op0, 0)))
2026 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2028 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2029 if (SCALAR_FLOAT_MODE_P (mode)
2030 && GET_CODE (op0) == ABS
2031 && GET_CODE (op1) == ABS
2032 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2033 && !side_effects_p (XEXP (op0, 0)))
2034 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2036 /* Reassociate multiplication, but for floating point MULTs
2037 only when the user specifies unsafe math optimizations. */
2038 if (! FLOAT_MODE_P (mode)
2039 || flag_unsafe_math_optimizations)
2041 tem = simplify_associative_operation (code, mode, op0, op1);
2042 if (tem)
2043 return tem;
2045 break;
2047 case IOR:
2048 if (trueop1 == const0_rtx)
2049 return op0;
2050 if (GET_CODE (trueop1) == CONST_INT
2051 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2052 == GET_MODE_MASK (mode)))
2053 return op1;
2054 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2055 return op0;
2056 /* A | (~A) -> -1 */
2057 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2058 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2059 && ! side_effects_p (op0)
2060 && SCALAR_INT_MODE_P (mode))
2061 return constm1_rtx;
2063 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2064 if (GET_CODE (op1) == CONST_INT
2065 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2066 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
2067 return op1;
2069 /* Canonicalize (X & C1) | C2. */
2070 if (GET_CODE (op0) == AND
2071 && GET_CODE (trueop1) == CONST_INT
2072 && GET_CODE (XEXP (op0, 1)) == CONST_INT)
2074 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2075 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2076 HOST_WIDE_INT c2 = INTVAL (trueop1);
2078 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2079 if ((c1 & c2) == c1
2080 && !side_effects_p (XEXP (op0, 0)))
2081 return trueop1;
2083 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2084 if (((c1|c2) & mask) == mask)
2085 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2087 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2088 if (((c1 & ~c2) & mask) != (c1 & mask))
2090 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2091 gen_int_mode (c1 & ~c2, mode));
2092 return simplify_gen_binary (IOR, mode, tem, op1);
2096 /* Convert (A & B) | A to A. */
2097 if (GET_CODE (op0) == AND
2098 && (rtx_equal_p (XEXP (op0, 0), op1)
2099 || rtx_equal_p (XEXP (op0, 1), op1))
2100 && ! side_effects_p (XEXP (op0, 0))
2101 && ! side_effects_p (XEXP (op0, 1)))
2102 return op1;
2104 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2105 mode size to (rotate A CX). */
2107 if (GET_CODE (op1) == ASHIFT
2108 || GET_CODE (op1) == SUBREG)
2110 opleft = op1;
2111 opright = op0;
2113 else
2115 opright = op1;
2116 opleft = op0;
2119 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2120 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2121 && GET_CODE (XEXP (opleft, 1)) == CONST_INT
2122 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2123 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2124 == GET_MODE_BITSIZE (mode)))
2125 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2127 /* Same, but for ashift that has been "simplified" to a wider mode
2128 by simplify_shift_const. */
2130 if (GET_CODE (opleft) == SUBREG
2131 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2132 && GET_CODE (opright) == LSHIFTRT
2133 && GET_CODE (XEXP (opright, 0)) == SUBREG
2134 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2135 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2136 && (GET_MODE_SIZE (GET_MODE (opleft))
2137 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2138 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2139 SUBREG_REG (XEXP (opright, 0)))
2140 && GET_CODE (XEXP (SUBREG_REG (opleft), 1)) == CONST_INT
2141 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2142 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2143 == GET_MODE_BITSIZE (mode)))
2144 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2145 XEXP (SUBREG_REG (opleft), 1));
2147 /* If we have (ior (and (X C1) C2)), simplify this by making
2148 C1 as small as possible if C1 actually changes. */
2149 if (GET_CODE (op1) == CONST_INT
2150 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2151 || INTVAL (op1) > 0)
2152 && GET_CODE (op0) == AND
2153 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2154 && GET_CODE (op1) == CONST_INT
2155 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2156 return simplify_gen_binary (IOR, mode,
2157 simplify_gen_binary
2158 (AND, mode, XEXP (op0, 0),
2159 GEN_INT (INTVAL (XEXP (op0, 1))
2160 & ~INTVAL (op1))),
2161 op1);
2163 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2164 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2165 the PLUS does not affect any of the bits in OP1: then we can do
2166 the IOR as a PLUS and we can associate. This is valid if OP1
2167 can be safely shifted left C bits. */
2168 if (GET_CODE (trueop1) == CONST_INT && GET_CODE (op0) == ASHIFTRT
2169 && GET_CODE (XEXP (op0, 0)) == PLUS
2170 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
2171 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2172 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2174 int count = INTVAL (XEXP (op0, 1));
2175 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2177 if (mask >> count == INTVAL (trueop1)
2178 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2179 return simplify_gen_binary (ASHIFTRT, mode,
2180 plus_constant (XEXP (op0, 0), mask),
2181 XEXP (op0, 1));
2184 tem = simplify_associative_operation (code, mode, op0, op1);
2185 if (tem)
2186 return tem;
2187 break;
2189 case XOR:
2190 if (trueop1 == const0_rtx)
2191 return op0;
2192 if (GET_CODE (trueop1) == CONST_INT
2193 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2194 == GET_MODE_MASK (mode)))
2195 return simplify_gen_unary (NOT, mode, op0, mode);
2196 if (rtx_equal_p (trueop0, trueop1)
2197 && ! side_effects_p (op0)
2198 && GET_MODE_CLASS (mode) != MODE_CC)
2199 return CONST0_RTX (mode);
2201 /* Canonicalize XOR of the most significant bit to PLUS. */
2202 if ((GET_CODE (op1) == CONST_INT
2203 || GET_CODE (op1) == CONST_DOUBLE)
2204 && mode_signbit_p (mode, op1))
2205 return simplify_gen_binary (PLUS, mode, op0, op1);
2206 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2207 if ((GET_CODE (op1) == CONST_INT
2208 || GET_CODE (op1) == CONST_DOUBLE)
2209 && GET_CODE (op0) == PLUS
2210 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
2211 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2212 && mode_signbit_p (mode, XEXP (op0, 1)))
2213 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2214 simplify_gen_binary (XOR, mode, op1,
2215 XEXP (op0, 1)));
2217 /* If we are XORing two things that have no bits in common,
2218 convert them into an IOR. This helps to detect rotation encoded
2219 using those methods and possibly other simplifications. */
2221 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2222 && (nonzero_bits (op0, mode)
2223 & nonzero_bits (op1, mode)) == 0)
2224 return (simplify_gen_binary (IOR, mode, op0, op1));
2226 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2227 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2228 (NOT y). */
2230 int num_negated = 0;
2232 if (GET_CODE (op0) == NOT)
2233 num_negated++, op0 = XEXP (op0, 0);
2234 if (GET_CODE (op1) == NOT)
2235 num_negated++, op1 = XEXP (op1, 0);
2237 if (num_negated == 2)
2238 return simplify_gen_binary (XOR, mode, op0, op1);
2239 else if (num_negated == 1)
2240 return simplify_gen_unary (NOT, mode,
2241 simplify_gen_binary (XOR, mode, op0, op1),
2242 mode);
2245 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2246 correspond to a machine insn or result in further simplifications
2247 if B is a constant. */
2249 if (GET_CODE (op0) == AND
2250 && rtx_equal_p (XEXP (op0, 1), op1)
2251 && ! side_effects_p (op1))
2252 return simplify_gen_binary (AND, mode,
2253 simplify_gen_unary (NOT, mode,
2254 XEXP (op0, 0), mode),
2255 op1);
2257 else if (GET_CODE (op0) == AND
2258 && rtx_equal_p (XEXP (op0, 0), op1)
2259 && ! side_effects_p (op1))
2260 return simplify_gen_binary (AND, mode,
2261 simplify_gen_unary (NOT, mode,
2262 XEXP (op0, 1), mode),
2263 op1);
2265 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2266 comparison if STORE_FLAG_VALUE is 1. */
2267 if (STORE_FLAG_VALUE == 1
2268 && trueop1 == const1_rtx
2269 && COMPARISON_P (op0)
2270 && (reversed = reversed_comparison (op0, mode)))
2271 return reversed;
2273 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2274 is (lt foo (const_int 0)), so we can perform the above
2275 simplification if STORE_FLAG_VALUE is 1. */
2277 if (STORE_FLAG_VALUE == 1
2278 && trueop1 == const1_rtx
2279 && GET_CODE (op0) == LSHIFTRT
2280 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2281 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2282 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2284 /* (xor (comparison foo bar) (const_int sign-bit))
2285 when STORE_FLAG_VALUE is the sign bit. */
2286 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2287 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2288 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2289 && trueop1 == const_true_rtx
2290 && COMPARISON_P (op0)
2291 && (reversed = reversed_comparison (op0, mode)))
2292 return reversed;
2294 tem = simplify_associative_operation (code, mode, op0, op1);
2295 if (tem)
2296 return tem;
2297 break;
2299 case AND:
2300 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2301 return trueop1;
2302 /* If we are turning off bits already known off in OP0, we need
2303 not do an AND. */
2304 if (GET_CODE (trueop1) == CONST_INT
2305 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2306 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
2307 return op0;
2308 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2309 && GET_MODE_CLASS (mode) != MODE_CC)
2310 return op0;
2311 /* A & (~A) -> 0 */
2312 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2313 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2314 && ! side_effects_p (op0)
2315 && GET_MODE_CLASS (mode) != MODE_CC)
2316 return CONST0_RTX (mode);
2318 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2319 there are no nonzero bits of C outside of X's mode. */
2320 if ((GET_CODE (op0) == SIGN_EXTEND
2321 || GET_CODE (op0) == ZERO_EXTEND)
2322 && GET_CODE (trueop1) == CONST_INT
2323 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2324 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2325 & INTVAL (trueop1)) == 0)
2327 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2328 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2329 gen_int_mode (INTVAL (trueop1),
2330 imode));
2331 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2334 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2335 if (GET_CODE (op0) == IOR
2336 && GET_CODE (trueop1) == CONST_INT
2337 && GET_CODE (XEXP (op0, 1)) == CONST_INT)
2339 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2340 return simplify_gen_binary (IOR, mode,
2341 simplify_gen_binary (AND, mode,
2342 XEXP (op0, 0), op1),
2343 gen_int_mode (tmp, mode));
2346 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2347 insn (and may simplify more). */
2348 if (GET_CODE (op0) == XOR
2349 && rtx_equal_p (XEXP (op0, 0), op1)
2350 && ! side_effects_p (op1))
2351 return simplify_gen_binary (AND, mode,
2352 simplify_gen_unary (NOT, mode,
2353 XEXP (op0, 1), mode),
2354 op1);
2356 if (GET_CODE (op0) == XOR
2357 && rtx_equal_p (XEXP (op0, 1), op1)
2358 && ! side_effects_p (op1))
2359 return simplify_gen_binary (AND, mode,
2360 simplify_gen_unary (NOT, mode,
2361 XEXP (op0, 0), mode),
2362 op1);
2364 /* Similarly for (~(A ^ B)) & A. */
2365 if (GET_CODE (op0) == NOT
2366 && GET_CODE (XEXP (op0, 0)) == XOR
2367 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2368 && ! side_effects_p (op1))
2369 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2371 if (GET_CODE (op0) == NOT
2372 && GET_CODE (XEXP (op0, 0)) == XOR
2373 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2374 && ! side_effects_p (op1))
2375 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2377 /* Convert (A | B) & A to A. */
2378 if (GET_CODE (op0) == IOR
2379 && (rtx_equal_p (XEXP (op0, 0), op1)
2380 || rtx_equal_p (XEXP (op0, 1), op1))
2381 && ! side_effects_p (XEXP (op0, 0))
2382 && ! side_effects_p (XEXP (op0, 1)))
2383 return op1;
2385 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2386 ((A & N) + B) & M -> (A + B) & M
2387 Similarly if (N & M) == 0,
2388 ((A | N) + B) & M -> (A + B) & M
2389 and for - instead of + and/or ^ instead of |. */
2390 if (GET_CODE (trueop1) == CONST_INT
2391 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2392 && ~INTVAL (trueop1)
2393 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2394 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2396 rtx pmop[2];
2397 int which;
2399 pmop[0] = XEXP (op0, 0);
2400 pmop[1] = XEXP (op0, 1);
2402 for (which = 0; which < 2; which++)
2404 tem = pmop[which];
2405 switch (GET_CODE (tem))
2407 case AND:
2408 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2409 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2410 == INTVAL (trueop1))
2411 pmop[which] = XEXP (tem, 0);
2412 break;
2413 case IOR:
2414 case XOR:
2415 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2416 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2417 pmop[which] = XEXP (tem, 0);
2418 break;
2419 default:
2420 break;
2424 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2426 tem = simplify_gen_binary (GET_CODE (op0), mode,
2427 pmop[0], pmop[1]);
2428 return simplify_gen_binary (code, mode, tem, op1);
2431 tem = simplify_associative_operation (code, mode, op0, op1);
2432 if (tem)
2433 return tem;
2434 break;
2436 case UDIV:
2437 /* 0/x is 0 (or x&0 if x has side-effects). */
2438 if (trueop0 == CONST0_RTX (mode))
2440 if (side_effects_p (op1))
2441 return simplify_gen_binary (AND, mode, op1, trueop0);
2442 return trueop0;
2444 /* x/1 is x. */
2445 if (trueop1 == CONST1_RTX (mode))
2446 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2447 /* Convert divide by power of two into shift. */
2448 if (GET_CODE (trueop1) == CONST_INT
2449 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2450 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2451 break;
2453 case DIV:
2454 /* Handle floating point and integers separately. */
2455 if (SCALAR_FLOAT_MODE_P (mode))
2457 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2458 safe for modes with NaNs, since 0.0 / 0.0 will then be
2459 NaN rather than 0.0. Nor is it safe for modes with signed
2460 zeros, since dividing 0 by a negative number gives -0.0 */
2461 if (trueop0 == CONST0_RTX (mode)
2462 && !HONOR_NANS (mode)
2463 && !HONOR_SIGNED_ZEROS (mode)
2464 && ! side_effects_p (op1))
2465 return op0;
2466 /* x/1.0 is x. */
2467 if (trueop1 == CONST1_RTX (mode)
2468 && !HONOR_SNANS (mode))
2469 return op0;
2471 if (GET_CODE (trueop1) == CONST_DOUBLE
2472 && trueop1 != CONST0_RTX (mode))
2474 REAL_VALUE_TYPE d;
2475 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2477 /* x/-1.0 is -x. */
2478 if (REAL_VALUES_EQUAL (d, dconstm1)
2479 && !HONOR_SNANS (mode))
2480 return simplify_gen_unary (NEG, mode, op0, mode);
2482 /* Change FP division by a constant into multiplication.
2483 Only do this with -freciprocal-math. */
2484 if (flag_reciprocal_math
2485 && !REAL_VALUES_EQUAL (d, dconst0))
2487 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2488 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2489 return simplify_gen_binary (MULT, mode, op0, tem);
2493 else
2495 /* 0/x is 0 (or x&0 if x has side-effects). */
2496 if (trueop0 == CONST0_RTX (mode))
2498 if (side_effects_p (op1))
2499 return simplify_gen_binary (AND, mode, op1, trueop0);
2500 return trueop0;
2502 /* x/1 is x. */
2503 if (trueop1 == CONST1_RTX (mode))
2504 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2505 /* x/-1 is -x. */
2506 if (trueop1 == constm1_rtx)
2508 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2509 return simplify_gen_unary (NEG, mode, x, mode);
2512 break;
2514 case UMOD:
2515 /* 0%x is 0 (or x&0 if x has side-effects). */
2516 if (trueop0 == CONST0_RTX (mode))
2518 if (side_effects_p (op1))
2519 return simplify_gen_binary (AND, mode, op1, trueop0);
2520 return trueop0;
2522 /* x%1 is 0 (of x&0 if x has side-effects). */
2523 if (trueop1 == CONST1_RTX (mode))
2525 if (side_effects_p (op0))
2526 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2527 return CONST0_RTX (mode);
2529 /* Implement modulus by power of two as AND. */
2530 if (GET_CODE (trueop1) == CONST_INT
2531 && exact_log2 (INTVAL (trueop1)) > 0)
2532 return simplify_gen_binary (AND, mode, op0,
2533 GEN_INT (INTVAL (op1) - 1));
2534 break;
2536 case MOD:
2537 /* 0%x is 0 (or x&0 if x has side-effects). */
2538 if (trueop0 == CONST0_RTX (mode))
2540 if (side_effects_p (op1))
2541 return simplify_gen_binary (AND, mode, op1, trueop0);
2542 return trueop0;
2544 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2545 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2547 if (side_effects_p (op0))
2548 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2549 return CONST0_RTX (mode);
2551 break;
2553 case ROTATERT:
2554 case ROTATE:
2555 case ASHIFTRT:
2556 if (trueop1 == CONST0_RTX (mode))
2557 return op0;
2558 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2559 return op0;
2560 /* Rotating ~0 always results in ~0. */
2561 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2562 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2563 && ! side_effects_p (op1))
2564 return op0;
2565 canonicalize_shift:
2566 if (SHIFT_COUNT_TRUNCATED && GET_CODE (op1) == CONST_INT)
2568 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
2569 if (val != INTVAL (op1))
2570 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
2572 break;
2574 case ASHIFT:
2575 case SS_ASHIFT:
2576 case US_ASHIFT:
2577 if (trueop1 == CONST0_RTX (mode))
2578 return op0;
2579 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2580 return op0;
2581 goto canonicalize_shift;
2583 case LSHIFTRT:
2584 if (trueop1 == CONST0_RTX (mode))
2585 return op0;
2586 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2587 return op0;
2588 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2589 if (GET_CODE (op0) == CLZ
2590 && GET_CODE (trueop1) == CONST_INT
2591 && STORE_FLAG_VALUE == 1
2592 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2594 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2595 unsigned HOST_WIDE_INT zero_val = 0;
2597 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2598 && zero_val == GET_MODE_BITSIZE (imode)
2599 && INTVAL (trueop1) == exact_log2 (zero_val))
2600 return simplify_gen_relational (EQ, mode, imode,
2601 XEXP (op0, 0), const0_rtx);
2603 goto canonicalize_shift;
2605 case SMIN:
2606 if (width <= HOST_BITS_PER_WIDE_INT
2607 && GET_CODE (trueop1) == CONST_INT
2608 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2609 && ! side_effects_p (op0))
2610 return op1;
2611 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2612 return op0;
2613 tem = simplify_associative_operation (code, mode, op0, op1);
2614 if (tem)
2615 return tem;
2616 break;
2618 case SMAX:
2619 if (width <= HOST_BITS_PER_WIDE_INT
2620 && GET_CODE (trueop1) == CONST_INT
2621 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2622 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2623 && ! side_effects_p (op0))
2624 return op1;
2625 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2626 return op0;
2627 tem = simplify_associative_operation (code, mode, op0, op1);
2628 if (tem)
2629 return tem;
2630 break;
2632 case UMIN:
2633 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2634 return op1;
2635 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2636 return op0;
2637 tem = simplify_associative_operation (code, mode, op0, op1);
2638 if (tem)
2639 return tem;
2640 break;
2642 case UMAX:
2643 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2644 return op1;
2645 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2646 return op0;
2647 tem = simplify_associative_operation (code, mode, op0, op1);
2648 if (tem)
2649 return tem;
2650 break;
2652 case SS_PLUS:
2653 case US_PLUS:
2654 case SS_MINUS:
2655 case US_MINUS:
2656 case SS_MULT:
2657 case US_MULT:
2658 case SS_DIV:
2659 case US_DIV:
2660 /* ??? There are simplifications that can be done. */
2661 return 0;
2663 case VEC_SELECT:
2664 if (!VECTOR_MODE_P (mode))
2666 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2667 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2668 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2669 gcc_assert (XVECLEN (trueop1, 0) == 1);
2670 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2672 if (GET_CODE (trueop0) == CONST_VECTOR)
2673 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2674 (trueop1, 0, 0)));
2676 /* Extract a scalar element from a nested VEC_SELECT expression
2677 (with optional nested VEC_CONCAT expression). Some targets
2678 (i386) extract scalar element from a vector using chain of
2679 nested VEC_SELECT expressions. When input operand is a memory
2680 operand, this operation can be simplified to a simple scalar
2681 load from an offseted memory address. */
2682 if (GET_CODE (trueop0) == VEC_SELECT)
2684 rtx op0 = XEXP (trueop0, 0);
2685 rtx op1 = XEXP (trueop0, 1);
2687 enum machine_mode opmode = GET_MODE (op0);
2688 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
2689 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
2691 int i = INTVAL (XVECEXP (trueop1, 0, 0));
2692 int elem;
2694 rtvec vec;
2695 rtx tmp_op, tmp;
2697 gcc_assert (GET_CODE (op1) == PARALLEL);
2698 gcc_assert (i < n_elts);
2700 /* Select element, pointed by nested selector. */
2701 elem = INTVAL (XVECEXP (op1, 0, i));
2703 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
2704 if (GET_CODE (op0) == VEC_CONCAT)
2706 rtx op00 = XEXP (op0, 0);
2707 rtx op01 = XEXP (op0, 1);
2709 enum machine_mode mode00, mode01;
2710 int n_elts00, n_elts01;
2712 mode00 = GET_MODE (op00);
2713 mode01 = GET_MODE (op01);
2715 /* Find out number of elements of each operand. */
2716 if (VECTOR_MODE_P (mode00))
2718 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
2719 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
2721 else
2722 n_elts00 = 1;
2724 if (VECTOR_MODE_P (mode01))
2726 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
2727 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
2729 else
2730 n_elts01 = 1;
2732 gcc_assert (n_elts == n_elts00 + n_elts01);
2734 /* Select correct operand of VEC_CONCAT
2735 and adjust selector. */
2736 if (elem < n_elts01)
2737 tmp_op = op00;
2738 else
2740 tmp_op = op01;
2741 elem -= n_elts00;
2744 else
2745 tmp_op = op0;
2747 vec = rtvec_alloc (1);
2748 RTVEC_ELT (vec, 0) = GEN_INT (elem);
2750 tmp = gen_rtx_fmt_ee (code, mode,
2751 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
2752 return tmp;
2755 else
2757 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2758 gcc_assert (GET_MODE_INNER (mode)
2759 == GET_MODE_INNER (GET_MODE (trueop0)));
2760 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2762 if (GET_CODE (trueop0) == CONST_VECTOR)
2764 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2765 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2766 rtvec v = rtvec_alloc (n_elts);
2767 unsigned int i;
2769 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2770 for (i = 0; i < n_elts; i++)
2772 rtx x = XVECEXP (trueop1, 0, i);
2774 gcc_assert (GET_CODE (x) == CONST_INT);
2775 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2776 INTVAL (x));
2779 return gen_rtx_CONST_VECTOR (mode, v);
2783 if (XVECLEN (trueop1, 0) == 1
2784 && GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT
2785 && GET_CODE (trueop0) == VEC_CONCAT)
2787 rtx vec = trueop0;
2788 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2790 /* Try to find the element in the VEC_CONCAT. */
2791 while (GET_MODE (vec) != mode
2792 && GET_CODE (vec) == VEC_CONCAT)
2794 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2795 if (offset < vec_size)
2796 vec = XEXP (vec, 0);
2797 else
2799 offset -= vec_size;
2800 vec = XEXP (vec, 1);
2802 vec = avoid_constant_pool_reference (vec);
2805 if (GET_MODE (vec) == mode)
2806 return vec;
2809 return 0;
2810 case VEC_CONCAT:
2812 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2813 ? GET_MODE (trueop0)
2814 : GET_MODE_INNER (mode));
2815 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2816 ? GET_MODE (trueop1)
2817 : GET_MODE_INNER (mode));
2819 gcc_assert (VECTOR_MODE_P (mode));
2820 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2821 == GET_MODE_SIZE (mode));
2823 if (VECTOR_MODE_P (op0_mode))
2824 gcc_assert (GET_MODE_INNER (mode)
2825 == GET_MODE_INNER (op0_mode));
2826 else
2827 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2829 if (VECTOR_MODE_P (op1_mode))
2830 gcc_assert (GET_MODE_INNER (mode)
2831 == GET_MODE_INNER (op1_mode));
2832 else
2833 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2835 if ((GET_CODE (trueop0) == CONST_VECTOR
2836 || GET_CODE (trueop0) == CONST_INT
2837 || GET_CODE (trueop0) == CONST_DOUBLE)
2838 && (GET_CODE (trueop1) == CONST_VECTOR
2839 || GET_CODE (trueop1) == CONST_INT
2840 || GET_CODE (trueop1) == CONST_DOUBLE))
2842 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2843 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2844 rtvec v = rtvec_alloc (n_elts);
2845 unsigned int i;
2846 unsigned in_n_elts = 1;
2848 if (VECTOR_MODE_P (op0_mode))
2849 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2850 for (i = 0; i < n_elts; i++)
2852 if (i < in_n_elts)
2854 if (!VECTOR_MODE_P (op0_mode))
2855 RTVEC_ELT (v, i) = trueop0;
2856 else
2857 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2859 else
2861 if (!VECTOR_MODE_P (op1_mode))
2862 RTVEC_ELT (v, i) = trueop1;
2863 else
2864 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2865 i - in_n_elts);
2869 return gen_rtx_CONST_VECTOR (mode, v);
2872 return 0;
2874 default:
2875 gcc_unreachable ();
2878 return 0;
2882 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
2883 rtx op0, rtx op1)
2885 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
2886 HOST_WIDE_INT val;
2887 unsigned int width = GET_MODE_BITSIZE (mode);
2889 if (VECTOR_MODE_P (mode)
2890 && code != VEC_CONCAT
2891 && GET_CODE (op0) == CONST_VECTOR
2892 && GET_CODE (op1) == CONST_VECTOR)
2894 unsigned n_elts = GET_MODE_NUNITS (mode);
2895 enum machine_mode op0mode = GET_MODE (op0);
2896 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2897 enum machine_mode op1mode = GET_MODE (op1);
2898 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2899 rtvec v = rtvec_alloc (n_elts);
2900 unsigned int i;
2902 gcc_assert (op0_n_elts == n_elts);
2903 gcc_assert (op1_n_elts == n_elts);
2904 for (i = 0; i < n_elts; i++)
2906 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2907 CONST_VECTOR_ELT (op0, i),
2908 CONST_VECTOR_ELT (op1, i));
2909 if (!x)
2910 return 0;
2911 RTVEC_ELT (v, i) = x;
2914 return gen_rtx_CONST_VECTOR (mode, v);
2917 if (VECTOR_MODE_P (mode)
2918 && code == VEC_CONCAT
2919 && CONSTANT_P (op0) && CONSTANT_P (op1))
2921 unsigned n_elts = GET_MODE_NUNITS (mode);
2922 rtvec v = rtvec_alloc (n_elts);
2924 gcc_assert (n_elts >= 2);
2925 if (n_elts == 2)
2927 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2928 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2930 RTVEC_ELT (v, 0) = op0;
2931 RTVEC_ELT (v, 1) = op1;
2933 else
2935 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2936 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2937 unsigned i;
2939 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2940 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2941 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2943 for (i = 0; i < op0_n_elts; ++i)
2944 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2945 for (i = 0; i < op1_n_elts; ++i)
2946 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2949 return gen_rtx_CONST_VECTOR (mode, v);
2952 if (SCALAR_FLOAT_MODE_P (mode)
2953 && GET_CODE (op0) == CONST_DOUBLE
2954 && GET_CODE (op1) == CONST_DOUBLE
2955 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2957 if (code == AND
2958 || code == IOR
2959 || code == XOR)
2961 long tmp0[4];
2962 long tmp1[4];
2963 REAL_VALUE_TYPE r;
2964 int i;
2966 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
2967 GET_MODE (op0));
2968 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
2969 GET_MODE (op1));
2970 for (i = 0; i < 4; i++)
2972 switch (code)
2974 case AND:
2975 tmp0[i] &= tmp1[i];
2976 break;
2977 case IOR:
2978 tmp0[i] |= tmp1[i];
2979 break;
2980 case XOR:
2981 tmp0[i] ^= tmp1[i];
2982 break;
2983 default:
2984 gcc_unreachable ();
2987 real_from_target (&r, tmp0, mode);
2988 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
2990 else
2992 REAL_VALUE_TYPE f0, f1, value, result;
2993 bool inexact;
2995 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
2996 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
2997 real_convert (&f0, mode, &f0);
2998 real_convert (&f1, mode, &f1);
3000 if (HONOR_SNANS (mode)
3001 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3002 return 0;
3004 if (code == DIV
3005 && REAL_VALUES_EQUAL (f1, dconst0)
3006 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3007 return 0;
3009 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3010 && flag_trapping_math
3011 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3013 int s0 = REAL_VALUE_NEGATIVE (f0);
3014 int s1 = REAL_VALUE_NEGATIVE (f1);
3016 switch (code)
3018 case PLUS:
3019 /* Inf + -Inf = NaN plus exception. */
3020 if (s0 != s1)
3021 return 0;
3022 break;
3023 case MINUS:
3024 /* Inf - Inf = NaN plus exception. */
3025 if (s0 == s1)
3026 return 0;
3027 break;
3028 case DIV:
3029 /* Inf / Inf = NaN plus exception. */
3030 return 0;
3031 default:
3032 break;
3036 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3037 && flag_trapping_math
3038 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3039 || (REAL_VALUE_ISINF (f1)
3040 && REAL_VALUES_EQUAL (f0, dconst0))))
3041 /* Inf * 0 = NaN plus exception. */
3042 return 0;
3044 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3045 &f0, &f1);
3046 real_convert (&result, mode, &value);
3048 /* Don't constant fold this floating point operation if
3049 the result has overflowed and flag_trapping_math. */
3051 if (flag_trapping_math
3052 && MODE_HAS_INFINITIES (mode)
3053 && REAL_VALUE_ISINF (result)
3054 && !REAL_VALUE_ISINF (f0)
3055 && !REAL_VALUE_ISINF (f1))
3056 /* Overflow plus exception. */
3057 return 0;
3059 /* Don't constant fold this floating point operation if the
3060 result may dependent upon the run-time rounding mode and
3061 flag_rounding_math is set, or if GCC's software emulation
3062 is unable to accurately represent the result. */
3064 if ((flag_rounding_math
3065 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
3066 && !flag_unsafe_math_optimizations))
3067 && (inexact || !real_identical (&result, &value)))
3068 return NULL_RTX;
3070 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3074 /* We can fold some multi-word operations. */
3075 if (GET_MODE_CLASS (mode) == MODE_INT
3076 && width == HOST_BITS_PER_WIDE_INT * 2
3077 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
3078 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
3080 unsigned HOST_WIDE_INT l1, l2, lv, lt;
3081 HOST_WIDE_INT h1, h2, hv, ht;
3083 if (GET_CODE (op0) == CONST_DOUBLE)
3084 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
3085 else
3086 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
3088 if (GET_CODE (op1) == CONST_DOUBLE)
3089 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
3090 else
3091 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
3093 switch (code)
3095 case MINUS:
3096 /* A - B == A + (-B). */
3097 neg_double (l2, h2, &lv, &hv);
3098 l2 = lv, h2 = hv;
3100 /* Fall through.... */
3102 case PLUS:
3103 add_double (l1, h1, l2, h2, &lv, &hv);
3104 break;
3106 case MULT:
3107 mul_double (l1, h1, l2, h2, &lv, &hv);
3108 break;
3110 case DIV:
3111 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3112 &lv, &hv, &lt, &ht))
3113 return 0;
3114 break;
3116 case MOD:
3117 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3118 &lt, &ht, &lv, &hv))
3119 return 0;
3120 break;
3122 case UDIV:
3123 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3124 &lv, &hv, &lt, &ht))
3125 return 0;
3126 break;
3128 case UMOD:
3129 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3130 &lt, &ht, &lv, &hv))
3131 return 0;
3132 break;
3134 case AND:
3135 lv = l1 & l2, hv = h1 & h2;
3136 break;
3138 case IOR:
3139 lv = l1 | l2, hv = h1 | h2;
3140 break;
3142 case XOR:
3143 lv = l1 ^ l2, hv = h1 ^ h2;
3144 break;
3146 case SMIN:
3147 if (h1 < h2
3148 || (h1 == h2
3149 && ((unsigned HOST_WIDE_INT) l1
3150 < (unsigned HOST_WIDE_INT) l2)))
3151 lv = l1, hv = h1;
3152 else
3153 lv = l2, hv = h2;
3154 break;
3156 case SMAX:
3157 if (h1 > h2
3158 || (h1 == h2
3159 && ((unsigned HOST_WIDE_INT) l1
3160 > (unsigned HOST_WIDE_INT) l2)))
3161 lv = l1, hv = h1;
3162 else
3163 lv = l2, hv = h2;
3164 break;
3166 case UMIN:
3167 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
3168 || (h1 == h2
3169 && ((unsigned HOST_WIDE_INT) l1
3170 < (unsigned HOST_WIDE_INT) l2)))
3171 lv = l1, hv = h1;
3172 else
3173 lv = l2, hv = h2;
3174 break;
3176 case UMAX:
3177 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
3178 || (h1 == h2
3179 && ((unsigned HOST_WIDE_INT) l1
3180 > (unsigned HOST_WIDE_INT) l2)))
3181 lv = l1, hv = h1;
3182 else
3183 lv = l2, hv = h2;
3184 break;
3186 case LSHIFTRT: case ASHIFTRT:
3187 case ASHIFT:
3188 case ROTATE: case ROTATERT:
3189 if (SHIFT_COUNT_TRUNCATED)
3190 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
3192 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
3193 return 0;
3195 if (code == LSHIFTRT || code == ASHIFTRT)
3196 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
3197 code == ASHIFTRT);
3198 else if (code == ASHIFT)
3199 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
3200 else if (code == ROTATE)
3201 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3202 else /* code == ROTATERT */
3203 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3204 break;
3206 default:
3207 return 0;
3210 return immed_double_const (lv, hv, mode);
3213 if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
3214 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3216 /* Get the integer argument values in two forms:
3217 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3219 arg0 = INTVAL (op0);
3220 arg1 = INTVAL (op1);
3222 if (width < HOST_BITS_PER_WIDE_INT)
3224 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
3225 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
3227 arg0s = arg0;
3228 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3229 arg0s |= ((HOST_WIDE_INT) (-1) << width);
3231 arg1s = arg1;
3232 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3233 arg1s |= ((HOST_WIDE_INT) (-1) << width);
3235 else
3237 arg0s = arg0;
3238 arg1s = arg1;
3241 /* Compute the value of the arithmetic. */
3243 switch (code)
3245 case PLUS:
3246 val = arg0s + arg1s;
3247 break;
3249 case MINUS:
3250 val = arg0s - arg1s;
3251 break;
3253 case MULT:
3254 val = arg0s * arg1s;
3255 break;
3257 case DIV:
3258 if (arg1s == 0
3259 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3260 && arg1s == -1))
3261 return 0;
3262 val = arg0s / arg1s;
3263 break;
3265 case MOD:
3266 if (arg1s == 0
3267 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3268 && arg1s == -1))
3269 return 0;
3270 val = arg0s % arg1s;
3271 break;
3273 case UDIV:
3274 if (arg1 == 0
3275 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3276 && arg1s == -1))
3277 return 0;
3278 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3279 break;
3281 case UMOD:
3282 if (arg1 == 0
3283 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3284 && arg1s == -1))
3285 return 0;
3286 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3287 break;
3289 case AND:
3290 val = arg0 & arg1;
3291 break;
3293 case IOR:
3294 val = arg0 | arg1;
3295 break;
3297 case XOR:
3298 val = arg0 ^ arg1;
3299 break;
3301 case LSHIFTRT:
3302 case ASHIFT:
3303 case ASHIFTRT:
3304 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3305 the value is in range. We can't return any old value for
3306 out-of-range arguments because either the middle-end (via
3307 shift_truncation_mask) or the back-end might be relying on
3308 target-specific knowledge. Nor can we rely on
3309 shift_truncation_mask, since the shift might not be part of an
3310 ashlM3, lshrM3 or ashrM3 instruction. */
3311 if (SHIFT_COUNT_TRUNCATED)
3312 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3313 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3314 return 0;
3316 val = (code == ASHIFT
3317 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3318 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3320 /* Sign-extend the result for arithmetic right shifts. */
3321 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3322 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3323 break;
3325 case ROTATERT:
3326 if (arg1 < 0)
3327 return 0;
3329 arg1 %= width;
3330 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3331 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3332 break;
3334 case ROTATE:
3335 if (arg1 < 0)
3336 return 0;
3338 arg1 %= width;
3339 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3340 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3341 break;
3343 case COMPARE:
3344 /* Do nothing here. */
3345 return 0;
3347 case SMIN:
3348 val = arg0s <= arg1s ? arg0s : arg1s;
3349 break;
3351 case UMIN:
3352 val = ((unsigned HOST_WIDE_INT) arg0
3353 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3354 break;
3356 case SMAX:
3357 val = arg0s > arg1s ? arg0s : arg1s;
3358 break;
3360 case UMAX:
3361 val = ((unsigned HOST_WIDE_INT) arg0
3362 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3363 break;
3365 case SS_PLUS:
3366 case US_PLUS:
3367 case SS_MINUS:
3368 case US_MINUS:
3369 case SS_MULT:
3370 case US_MULT:
3371 case SS_DIV:
3372 case US_DIV:
3373 case SS_ASHIFT:
3374 case US_ASHIFT:
3375 /* ??? There are simplifications that can be done. */
3376 return 0;
3378 default:
3379 gcc_unreachable ();
3382 return gen_int_mode (val, mode);
3385 return NULL_RTX;
3390 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3391 PLUS or MINUS.
3393 Rather than test for specific case, we do this by a brute-force method
3394 and do all possible simplifications until no more changes occur. Then
3395 we rebuild the operation. */
3397 struct simplify_plus_minus_op_data
3399 rtx op;
3400 short neg;
3403 static bool
3404 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3406 int result;
3408 result = (commutative_operand_precedence (y)
3409 - commutative_operand_precedence (x));
3410 if (result)
3411 return result > 0;
3413 /* Group together equal REGs to do more simplification. */
3414 if (REG_P (x) && REG_P (y))
3415 return REGNO (x) > REGNO (y);
3416 else
3417 return false;
3420 static rtx
3421 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3422 rtx op1)
3424 struct simplify_plus_minus_op_data ops[8];
3425 rtx result, tem;
3426 int n_ops = 2, input_ops = 2;
3427 int changed, n_constants = 0, canonicalized = 0;
3428 int i, j;
3430 memset (ops, 0, sizeof ops);
3432 /* Set up the two operands and then expand them until nothing has been
3433 changed. If we run out of room in our array, give up; this should
3434 almost never happen. */
3436 ops[0].op = op0;
3437 ops[0].neg = 0;
3438 ops[1].op = op1;
3439 ops[1].neg = (code == MINUS);
3443 changed = 0;
3445 for (i = 0; i < n_ops; i++)
3447 rtx this_op = ops[i].op;
3448 int this_neg = ops[i].neg;
3449 enum rtx_code this_code = GET_CODE (this_op);
3451 switch (this_code)
3453 case PLUS:
3454 case MINUS:
3455 if (n_ops == 7)
3456 return NULL_RTX;
3458 ops[n_ops].op = XEXP (this_op, 1);
3459 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3460 n_ops++;
3462 ops[i].op = XEXP (this_op, 0);
3463 input_ops++;
3464 changed = 1;
3465 canonicalized |= this_neg;
3466 break;
3468 case NEG:
3469 ops[i].op = XEXP (this_op, 0);
3470 ops[i].neg = ! this_neg;
3471 changed = 1;
3472 canonicalized = 1;
3473 break;
3475 case CONST:
3476 if (n_ops < 7
3477 && GET_CODE (XEXP (this_op, 0)) == PLUS
3478 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3479 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3481 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3482 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3483 ops[n_ops].neg = this_neg;
3484 n_ops++;
3485 changed = 1;
3486 canonicalized = 1;
3488 break;
3490 case NOT:
3491 /* ~a -> (-a - 1) */
3492 if (n_ops != 7)
3494 ops[n_ops].op = constm1_rtx;
3495 ops[n_ops++].neg = this_neg;
3496 ops[i].op = XEXP (this_op, 0);
3497 ops[i].neg = !this_neg;
3498 changed = 1;
3499 canonicalized = 1;
3501 break;
3503 case CONST_INT:
3504 n_constants++;
3505 if (this_neg)
3507 ops[i].op = neg_const_int (mode, this_op);
3508 ops[i].neg = 0;
3509 changed = 1;
3510 canonicalized = 1;
3512 break;
3514 default:
3515 break;
3519 while (changed);
3521 if (n_constants > 1)
3522 canonicalized = 1;
3524 gcc_assert (n_ops >= 2);
3526 /* If we only have two operands, we can avoid the loops. */
3527 if (n_ops == 2)
3529 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3530 rtx lhs, rhs;
3532 /* Get the two operands. Be careful with the order, especially for
3533 the cases where code == MINUS. */
3534 if (ops[0].neg && ops[1].neg)
3536 lhs = gen_rtx_NEG (mode, ops[0].op);
3537 rhs = ops[1].op;
3539 else if (ops[0].neg)
3541 lhs = ops[1].op;
3542 rhs = ops[0].op;
3544 else
3546 lhs = ops[0].op;
3547 rhs = ops[1].op;
3550 return simplify_const_binary_operation (code, mode, lhs, rhs);
3553 /* Now simplify each pair of operands until nothing changes. */
3556 /* Insertion sort is good enough for an eight-element array. */
3557 for (i = 1; i < n_ops; i++)
3559 struct simplify_plus_minus_op_data save;
3560 j = i - 1;
3561 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
3562 continue;
3564 canonicalized = 1;
3565 save = ops[i];
3567 ops[j + 1] = ops[j];
3568 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
3569 ops[j + 1] = save;
3572 /* This is only useful the first time through. */
3573 if (!canonicalized)
3574 return NULL_RTX;
3576 changed = 0;
3577 for (i = n_ops - 1; i > 0; i--)
3578 for (j = i - 1; j >= 0; j--)
3580 rtx lhs = ops[j].op, rhs = ops[i].op;
3581 int lneg = ops[j].neg, rneg = ops[i].neg;
3583 if (lhs != 0 && rhs != 0)
3585 enum rtx_code ncode = PLUS;
3587 if (lneg != rneg)
3589 ncode = MINUS;
3590 if (lneg)
3591 tem = lhs, lhs = rhs, rhs = tem;
3593 else if (swap_commutative_operands_p (lhs, rhs))
3594 tem = lhs, lhs = rhs, rhs = tem;
3596 if ((GET_CODE (lhs) == CONST || GET_CODE (lhs) == CONST_INT)
3597 && (GET_CODE (rhs) == CONST || GET_CODE (rhs) == CONST_INT))
3599 rtx tem_lhs, tem_rhs;
3601 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3602 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3603 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3605 if (tem && !CONSTANT_P (tem))
3606 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3608 else
3609 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3611 /* Reject "simplifications" that just wrap the two
3612 arguments in a CONST. Failure to do so can result
3613 in infinite recursion with simplify_binary_operation
3614 when it calls us to simplify CONST operations. */
3615 if (tem
3616 && ! (GET_CODE (tem) == CONST
3617 && GET_CODE (XEXP (tem, 0)) == ncode
3618 && XEXP (XEXP (tem, 0), 0) == lhs
3619 && XEXP (XEXP (tem, 0), 1) == rhs))
3621 lneg &= rneg;
3622 if (GET_CODE (tem) == NEG)
3623 tem = XEXP (tem, 0), lneg = !lneg;
3624 if (GET_CODE (tem) == CONST_INT && lneg)
3625 tem = neg_const_int (mode, tem), lneg = 0;
3627 ops[i].op = tem;
3628 ops[i].neg = lneg;
3629 ops[j].op = NULL_RTX;
3630 changed = 1;
3635 /* Pack all the operands to the lower-numbered entries. */
3636 for (i = 0, j = 0; j < n_ops; j++)
3637 if (ops[j].op)
3639 ops[i] = ops[j];
3640 i++;
3642 n_ops = i;
3644 while (changed);
3646 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3647 if (n_ops == 2
3648 && GET_CODE (ops[1].op) == CONST_INT
3649 && CONSTANT_P (ops[0].op)
3650 && ops[0].neg)
3651 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3653 /* We suppressed creation of trivial CONST expressions in the
3654 combination loop to avoid recursion. Create one manually now.
3655 The combination loop should have ensured that there is exactly
3656 one CONST_INT, and the sort will have ensured that it is last
3657 in the array and that any other constant will be next-to-last. */
3659 if (n_ops > 1
3660 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
3661 && CONSTANT_P (ops[n_ops - 2].op))
3663 rtx value = ops[n_ops - 1].op;
3664 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3665 value = neg_const_int (mode, value);
3666 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3667 n_ops--;
3670 /* Put a non-negated operand first, if possible. */
3672 for (i = 0; i < n_ops && ops[i].neg; i++)
3673 continue;
3674 if (i == n_ops)
3675 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3676 else if (i != 0)
3678 tem = ops[0].op;
3679 ops[0] = ops[i];
3680 ops[i].op = tem;
3681 ops[i].neg = 1;
3684 /* Now make the result by performing the requested operations. */
3685 result = ops[0].op;
3686 for (i = 1; i < n_ops; i++)
3687 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3688 mode, result, ops[i].op);
3690 return result;
3693 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3694 static bool
3695 plus_minus_operand_p (const_rtx x)
3697 return GET_CODE (x) == PLUS
3698 || GET_CODE (x) == MINUS
3699 || (GET_CODE (x) == CONST
3700 && GET_CODE (XEXP (x, 0)) == PLUS
3701 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3702 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3705 /* Like simplify_binary_operation except used for relational operators.
3706 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3707 not also be VOIDmode.
3709 CMP_MODE specifies in which mode the comparison is done in, so it is
3710 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3711 the operands or, if both are VOIDmode, the operands are compared in
3712 "infinite precision". */
3714 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3715 enum machine_mode cmp_mode, rtx op0, rtx op1)
3717 rtx tem, trueop0, trueop1;
3719 if (cmp_mode == VOIDmode)
3720 cmp_mode = GET_MODE (op0);
3721 if (cmp_mode == VOIDmode)
3722 cmp_mode = GET_MODE (op1);
3724 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3725 if (tem)
3727 if (SCALAR_FLOAT_MODE_P (mode))
3729 if (tem == const0_rtx)
3730 return CONST0_RTX (mode);
3731 #ifdef FLOAT_STORE_FLAG_VALUE
3733 REAL_VALUE_TYPE val;
3734 val = FLOAT_STORE_FLAG_VALUE (mode);
3735 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3737 #else
3738 return NULL_RTX;
3739 #endif
3741 if (VECTOR_MODE_P (mode))
3743 if (tem == const0_rtx)
3744 return CONST0_RTX (mode);
3745 #ifdef VECTOR_STORE_FLAG_VALUE
3747 int i, units;
3748 rtvec v;
3750 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3751 if (val == NULL_RTX)
3752 return NULL_RTX;
3753 if (val == const1_rtx)
3754 return CONST1_RTX (mode);
3756 units = GET_MODE_NUNITS (mode);
3757 v = rtvec_alloc (units);
3758 for (i = 0; i < units; i++)
3759 RTVEC_ELT (v, i) = val;
3760 return gen_rtx_raw_CONST_VECTOR (mode, v);
3762 #else
3763 return NULL_RTX;
3764 #endif
3767 return tem;
3770 /* For the following tests, ensure const0_rtx is op1. */
3771 if (swap_commutative_operands_p (op0, op1)
3772 || (op0 == const0_rtx && op1 != const0_rtx))
3773 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3775 /* If op0 is a compare, extract the comparison arguments from it. */
3776 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3777 return simplify_relational_operation (code, mode, VOIDmode,
3778 XEXP (op0, 0), XEXP (op0, 1));
3780 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
3781 || CC0_P (op0))
3782 return NULL_RTX;
3784 trueop0 = avoid_constant_pool_reference (op0);
3785 trueop1 = avoid_constant_pool_reference (op1);
3786 return simplify_relational_operation_1 (code, mode, cmp_mode,
3787 trueop0, trueop1);
3790 /* This part of simplify_relational_operation is only used when CMP_MODE
3791 is not in class MODE_CC (i.e. it is a real comparison).
3793 MODE is the mode of the result, while CMP_MODE specifies in which
3794 mode the comparison is done in, so it is the mode of the operands. */
3796 static rtx
3797 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
3798 enum machine_mode cmp_mode, rtx op0, rtx op1)
3800 enum rtx_code op0code = GET_CODE (op0);
3802 if (op1 == const0_rtx && COMPARISON_P (op0))
3804 /* If op0 is a comparison, extract the comparison arguments
3805 from it. */
3806 if (code == NE)
3808 if (GET_MODE (op0) == mode)
3809 return simplify_rtx (op0);
3810 else
3811 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
3812 XEXP (op0, 0), XEXP (op0, 1));
3814 else if (code == EQ)
3816 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
3817 if (new_code != UNKNOWN)
3818 return simplify_gen_relational (new_code, mode, VOIDmode,
3819 XEXP (op0, 0), XEXP (op0, 1));
3823 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
3824 if ((code == LTU || code == GEU)
3825 && GET_CODE (op0) == PLUS
3826 && rtx_equal_p (op1, XEXP (op0, 1))
3827 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
3828 && !rtx_equal_p (op1, XEXP (op0, 0)))
3829 return simplify_gen_relational (code, mode, cmp_mode, op0, XEXP (op0, 0));
3831 if (op1 == const0_rtx)
3833 /* Canonicalize (GTU x 0) as (NE x 0). */
3834 if (code == GTU)
3835 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
3836 /* Canonicalize (LEU x 0) as (EQ x 0). */
3837 if (code == LEU)
3838 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
3840 else if (op1 == const1_rtx)
3842 switch (code)
3844 case GE:
3845 /* Canonicalize (GE x 1) as (GT x 0). */
3846 return simplify_gen_relational (GT, mode, cmp_mode,
3847 op0, const0_rtx);
3848 case GEU:
3849 /* Canonicalize (GEU x 1) as (NE x 0). */
3850 return simplify_gen_relational (NE, mode, cmp_mode,
3851 op0, const0_rtx);
3852 case LT:
3853 /* Canonicalize (LT x 1) as (LE x 0). */
3854 return simplify_gen_relational (LE, mode, cmp_mode,
3855 op0, const0_rtx);
3856 case LTU:
3857 /* Canonicalize (LTU x 1) as (EQ x 0). */
3858 return simplify_gen_relational (EQ, mode, cmp_mode,
3859 op0, const0_rtx);
3860 default:
3861 break;
3864 else if (op1 == constm1_rtx)
3866 /* Canonicalize (LE x -1) as (LT x 0). */
3867 if (code == LE)
3868 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
3869 /* Canonicalize (GT x -1) as (GE x 0). */
3870 if (code == GT)
3871 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
3874 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
3875 if ((code == EQ || code == NE)
3876 && (op0code == PLUS || op0code == MINUS)
3877 && CONSTANT_P (op1)
3878 && CONSTANT_P (XEXP (op0, 1))
3879 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
3881 rtx x = XEXP (op0, 0);
3882 rtx c = XEXP (op0, 1);
3884 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
3885 cmp_mode, op1, c);
3886 return simplify_gen_relational (code, mode, cmp_mode, x, c);
3889 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3890 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3891 if (code == NE
3892 && op1 == const0_rtx
3893 && GET_MODE_CLASS (mode) == MODE_INT
3894 && cmp_mode != VOIDmode
3895 /* ??? Work-around BImode bugs in the ia64 backend. */
3896 && mode != BImode
3897 && cmp_mode != BImode
3898 && nonzero_bits (op0, cmp_mode) == 1
3899 && STORE_FLAG_VALUE == 1)
3900 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
3901 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
3902 : lowpart_subreg (mode, op0, cmp_mode);
3904 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
3905 if ((code == EQ || code == NE)
3906 && op1 == const0_rtx
3907 && op0code == XOR)
3908 return simplify_gen_relational (code, mode, cmp_mode,
3909 XEXP (op0, 0), XEXP (op0, 1));
3911 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
3912 if ((code == EQ || code == NE)
3913 && op0code == XOR
3914 && rtx_equal_p (XEXP (op0, 0), op1)
3915 && !side_effects_p (XEXP (op0, 0)))
3916 return simplify_gen_relational (code, mode, cmp_mode,
3917 XEXP (op0, 1), const0_rtx);
3919 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
3920 if ((code == EQ || code == NE)
3921 && op0code == XOR
3922 && rtx_equal_p (XEXP (op0, 1), op1)
3923 && !side_effects_p (XEXP (op0, 1)))
3924 return simplify_gen_relational (code, mode, cmp_mode,
3925 XEXP (op0, 0), const0_rtx);
3927 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
3928 if ((code == EQ || code == NE)
3929 && op0code == XOR
3930 && (GET_CODE (op1) == CONST_INT
3931 || GET_CODE (op1) == CONST_DOUBLE)
3932 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
3933 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
3934 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
3935 simplify_gen_binary (XOR, cmp_mode,
3936 XEXP (op0, 1), op1));
3938 if (op0code == POPCOUNT && op1 == const0_rtx)
3939 switch (code)
3941 case EQ:
3942 case LE:
3943 case LEU:
3944 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
3945 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
3946 XEXP (op0, 0), const0_rtx);
3948 case NE:
3949 case GT:
3950 case GTU:
3951 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
3952 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
3953 XEXP (op0, 0), const0_rtx);
3955 default:
3956 break;
3959 return NULL_RTX;
3962 enum
3964 CMP_EQ = 1,
3965 CMP_LT = 2,
3966 CMP_GT = 4,
3967 CMP_LTU = 8,
3968 CMP_GTU = 16
3972 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
3973 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
3974 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
3975 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
3976 For floating-point comparisons, assume that the operands were ordered. */
3978 static rtx
3979 comparison_result (enum rtx_code code, int known_results)
3981 switch (code)
3983 case EQ:
3984 case UNEQ:
3985 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
3986 case NE:
3987 case LTGT:
3988 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
3990 case LT:
3991 case UNLT:
3992 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
3993 case GE:
3994 case UNGE:
3995 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
3997 case GT:
3998 case UNGT:
3999 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4000 case LE:
4001 case UNLE:
4002 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4004 case LTU:
4005 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4006 case GEU:
4007 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4009 case GTU:
4010 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4011 case LEU:
4012 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4014 case ORDERED:
4015 return const_true_rtx;
4016 case UNORDERED:
4017 return const0_rtx;
4018 default:
4019 gcc_unreachable ();
4023 /* Check if the given comparison (done in the given MODE) is actually a
4024 tautology or a contradiction.
4025 If no simplification is possible, this function returns zero.
4026 Otherwise, it returns either const_true_rtx or const0_rtx. */
4029 simplify_const_relational_operation (enum rtx_code code,
4030 enum machine_mode mode,
4031 rtx op0, rtx op1)
4033 rtx tem;
4034 rtx trueop0;
4035 rtx trueop1;
4037 gcc_assert (mode != VOIDmode
4038 || (GET_MODE (op0) == VOIDmode
4039 && GET_MODE (op1) == VOIDmode));
4041 /* If op0 is a compare, extract the comparison arguments from it. */
4042 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4044 op1 = XEXP (op0, 1);
4045 op0 = XEXP (op0, 0);
4047 if (GET_MODE (op0) != VOIDmode)
4048 mode = GET_MODE (op0);
4049 else if (GET_MODE (op1) != VOIDmode)
4050 mode = GET_MODE (op1);
4051 else
4052 return 0;
4055 /* We can't simplify MODE_CC values since we don't know what the
4056 actual comparison is. */
4057 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4058 return 0;
4060 /* Make sure the constant is second. */
4061 if (swap_commutative_operands_p (op0, op1))
4063 tem = op0, op0 = op1, op1 = tem;
4064 code = swap_condition (code);
4067 trueop0 = avoid_constant_pool_reference (op0);
4068 trueop1 = avoid_constant_pool_reference (op1);
4070 /* For integer comparisons of A and B maybe we can simplify A - B and can
4071 then simplify a comparison of that with zero. If A and B are both either
4072 a register or a CONST_INT, this can't help; testing for these cases will
4073 prevent infinite recursion here and speed things up.
4075 We can only do this for EQ and NE comparisons as otherwise we may
4076 lose or introduce overflow which we cannot disregard as undefined as
4077 we do not know the signedness of the operation on either the left or
4078 the right hand side of the comparison. */
4080 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4081 && (code == EQ || code == NE)
4082 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
4083 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
4084 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4085 /* We cannot do this if tem is a nonzero address. */
4086 && ! nonzero_address_p (tem))
4087 return simplify_const_relational_operation (signed_condition (code),
4088 mode, tem, const0_rtx);
4090 if (! HONOR_NANS (mode) && code == ORDERED)
4091 return const_true_rtx;
4093 if (! HONOR_NANS (mode) && code == UNORDERED)
4094 return const0_rtx;
4096 /* For modes without NaNs, if the two operands are equal, we know the
4097 result except if they have side-effects. Even with NaNs we know
4098 the result of unordered comparisons and, if signaling NaNs are
4099 irrelevant, also the result of LT/GT/LTGT. */
4100 if ((! HONOR_NANS (GET_MODE (trueop0))
4101 || code == UNEQ || code == UNLE || code == UNGE
4102 || ((code == LT || code == GT || code == LTGT)
4103 && ! HONOR_SNANS (GET_MODE (trueop0))))
4104 && rtx_equal_p (trueop0, trueop1)
4105 && ! side_effects_p (trueop0))
4106 return comparison_result (code, CMP_EQ);
4108 /* If the operands are floating-point constants, see if we can fold
4109 the result. */
4110 if (GET_CODE (trueop0) == CONST_DOUBLE
4111 && GET_CODE (trueop1) == CONST_DOUBLE
4112 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4114 REAL_VALUE_TYPE d0, d1;
4116 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4117 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4119 /* Comparisons are unordered iff at least one of the values is NaN. */
4120 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4121 switch (code)
4123 case UNEQ:
4124 case UNLT:
4125 case UNGT:
4126 case UNLE:
4127 case UNGE:
4128 case NE:
4129 case UNORDERED:
4130 return const_true_rtx;
4131 case EQ:
4132 case LT:
4133 case GT:
4134 case LE:
4135 case GE:
4136 case LTGT:
4137 case ORDERED:
4138 return const0_rtx;
4139 default:
4140 return 0;
4143 return comparison_result (code,
4144 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4145 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4148 /* Otherwise, see if the operands are both integers. */
4149 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4150 && (GET_CODE (trueop0) == CONST_DOUBLE
4151 || GET_CODE (trueop0) == CONST_INT)
4152 && (GET_CODE (trueop1) == CONST_DOUBLE
4153 || GET_CODE (trueop1) == CONST_INT))
4155 int width = GET_MODE_BITSIZE (mode);
4156 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4157 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4159 /* Get the two words comprising each integer constant. */
4160 if (GET_CODE (trueop0) == CONST_DOUBLE)
4162 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4163 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4165 else
4167 l0u = l0s = INTVAL (trueop0);
4168 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4171 if (GET_CODE (trueop1) == CONST_DOUBLE)
4173 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4174 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4176 else
4178 l1u = l1s = INTVAL (trueop1);
4179 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4182 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4183 we have to sign or zero-extend the values. */
4184 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4186 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
4187 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
4189 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
4190 l0s |= ((HOST_WIDE_INT) (-1) << width);
4192 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
4193 l1s |= ((HOST_WIDE_INT) (-1) << width);
4195 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4196 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4198 if (h0u == h1u && l0u == l1u)
4199 return comparison_result (code, CMP_EQ);
4200 else
4202 int cr;
4203 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
4204 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
4205 return comparison_result (code, cr);
4209 /* Optimize comparisons with upper and lower bounds. */
4210 if (SCALAR_INT_MODE_P (mode)
4211 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4212 && GET_CODE (trueop1) == CONST_INT)
4214 int sign;
4215 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4216 HOST_WIDE_INT val = INTVAL (trueop1);
4217 HOST_WIDE_INT mmin, mmax;
4219 if (code == GEU
4220 || code == LEU
4221 || code == GTU
4222 || code == LTU)
4223 sign = 0;
4224 else
4225 sign = 1;
4227 /* Get a reduced range if the sign bit is zero. */
4228 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4230 mmin = 0;
4231 mmax = nonzero;
4233 else
4235 rtx mmin_rtx, mmax_rtx;
4236 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4237 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4239 /* Since unsigned mmin will never be interpreted as negative, use
4240 INTVAL (and an arithmetic right shift). */
4241 mmin = INTVAL (mmin_rtx) >> (sign_copies - 1);
4242 /* Since signed mmax will always be positive, use UINTVAL (and
4243 a logical right shift). */
4244 mmax = UINTVAL (mmax_rtx) >> (sign_copies - 1);
4247 switch (code)
4249 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4250 case GEU:
4251 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4252 return const_true_rtx;
4253 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4254 return const0_rtx;
4255 break;
4256 case GE:
4257 if (val <= mmin)
4258 return const_true_rtx;
4259 if (val > mmax)
4260 return const0_rtx;
4261 break;
4263 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4264 case LEU:
4265 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4266 return const_true_rtx;
4267 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4268 return const0_rtx;
4269 break;
4270 case LE:
4271 if (val >= mmax)
4272 return const_true_rtx;
4273 if (val < mmin)
4274 return const0_rtx;
4275 break;
4277 case EQ:
4278 /* x == y is always false for y out of range. */
4279 if (val < mmin || val > mmax)
4280 return const0_rtx;
4281 break;
4283 /* x > y is always false for y >= mmax, always true for y < mmin. */
4284 case GTU:
4285 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4286 return const0_rtx;
4287 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4288 return const_true_rtx;
4289 break;
4290 case GT:
4291 if (val >= mmax)
4292 return const0_rtx;
4293 if (val < mmin)
4294 return const_true_rtx;
4295 break;
4297 /* x < y is always false for y <= mmin, always true for y > mmax. */
4298 case LTU:
4299 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4300 return const0_rtx;
4301 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4302 return const_true_rtx;
4303 break;
4304 case LT:
4305 if (val <= mmin)
4306 return const0_rtx;
4307 if (val > mmax)
4308 return const_true_rtx;
4309 break;
4311 case NE:
4312 /* x != y is always true for y out of range. */
4313 if (val < mmin || val > mmax)
4314 return const_true_rtx;
4315 break;
4317 default:
4318 break;
4322 /* Optimize integer comparisons with zero. */
4323 if (trueop1 == const0_rtx)
4325 /* Some addresses are known to be nonzero. We don't know
4326 their sign, but equality comparisons are known. */
4327 if (nonzero_address_p (trueop0))
4329 if (code == EQ || code == LEU)
4330 return const0_rtx;
4331 if (code == NE || code == GTU)
4332 return const_true_rtx;
4335 /* See if the first operand is an IOR with a constant. If so, we
4336 may be able to determine the result of this comparison. */
4337 if (GET_CODE (op0) == IOR)
4339 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
4340 if (GET_CODE (inner_const) == CONST_INT && inner_const != const0_rtx)
4342 int sign_bitnum = GET_MODE_BITSIZE (mode) - 1;
4343 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
4344 && (INTVAL (inner_const)
4345 & ((HOST_WIDE_INT) 1 << sign_bitnum)));
4347 switch (code)
4349 case EQ:
4350 case LEU:
4351 return const0_rtx;
4352 case NE:
4353 case GTU:
4354 return const_true_rtx;
4355 case LT:
4356 case LE:
4357 if (has_sign)
4358 return const_true_rtx;
4359 break;
4360 case GT:
4361 case GE:
4362 if (has_sign)
4363 return const0_rtx;
4364 break;
4365 default:
4366 break;
4372 /* Optimize comparison of ABS with zero. */
4373 if (trueop1 == CONST0_RTX (mode)
4374 && (GET_CODE (trueop0) == ABS
4375 || (GET_CODE (trueop0) == FLOAT_EXTEND
4376 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
4378 switch (code)
4380 case LT:
4381 /* Optimize abs(x) < 0.0. */
4382 if (!HONOR_SNANS (mode)
4383 && (!INTEGRAL_MODE_P (mode)
4384 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4386 if (INTEGRAL_MODE_P (mode)
4387 && (issue_strict_overflow_warning
4388 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4389 warning (OPT_Wstrict_overflow,
4390 ("assuming signed overflow does not occur when "
4391 "assuming abs (x) < 0 is false"));
4392 return const0_rtx;
4394 break;
4396 case GE:
4397 /* Optimize abs(x) >= 0.0. */
4398 if (!HONOR_NANS (mode)
4399 && (!INTEGRAL_MODE_P (mode)
4400 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4402 if (INTEGRAL_MODE_P (mode)
4403 && (issue_strict_overflow_warning
4404 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4405 warning (OPT_Wstrict_overflow,
4406 ("assuming signed overflow does not occur when "
4407 "assuming abs (x) >= 0 is true"));
4408 return const_true_rtx;
4410 break;
4412 case UNGE:
4413 /* Optimize ! (abs(x) < 0.0). */
4414 return const_true_rtx;
4416 default:
4417 break;
4421 return 0;
4424 /* Simplify CODE, an operation with result mode MODE and three operands,
4425 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4426 a constant. Return 0 if no simplifications is possible. */
4429 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4430 enum machine_mode op0_mode, rtx op0, rtx op1,
4431 rtx op2)
4433 unsigned int width = GET_MODE_BITSIZE (mode);
4435 /* VOIDmode means "infinite" precision. */
4436 if (width == 0)
4437 width = HOST_BITS_PER_WIDE_INT;
4439 switch (code)
4441 case SIGN_EXTRACT:
4442 case ZERO_EXTRACT:
4443 if (GET_CODE (op0) == CONST_INT
4444 && GET_CODE (op1) == CONST_INT
4445 && GET_CODE (op2) == CONST_INT
4446 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4447 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4449 /* Extracting a bit-field from a constant */
4450 HOST_WIDE_INT val = INTVAL (op0);
4452 if (BITS_BIG_ENDIAN)
4453 val >>= (GET_MODE_BITSIZE (op0_mode)
4454 - INTVAL (op2) - INTVAL (op1));
4455 else
4456 val >>= INTVAL (op2);
4458 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4460 /* First zero-extend. */
4461 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4462 /* If desired, propagate sign bit. */
4463 if (code == SIGN_EXTRACT
4464 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4465 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4468 /* Clear the bits that don't belong in our mode,
4469 unless they and our sign bit are all one.
4470 So we get either a reasonable negative value or a reasonable
4471 unsigned value for this mode. */
4472 if (width < HOST_BITS_PER_WIDE_INT
4473 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4474 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4475 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4477 return gen_int_mode (val, mode);
4479 break;
4481 case IF_THEN_ELSE:
4482 if (GET_CODE (op0) == CONST_INT)
4483 return op0 != const0_rtx ? op1 : op2;
4485 /* Convert c ? a : a into "a". */
4486 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4487 return op1;
4489 /* Convert a != b ? a : b into "a". */
4490 if (GET_CODE (op0) == NE
4491 && ! side_effects_p (op0)
4492 && ! HONOR_NANS (mode)
4493 && ! HONOR_SIGNED_ZEROS (mode)
4494 && ((rtx_equal_p (XEXP (op0, 0), op1)
4495 && rtx_equal_p (XEXP (op0, 1), op2))
4496 || (rtx_equal_p (XEXP (op0, 0), op2)
4497 && rtx_equal_p (XEXP (op0, 1), op1))))
4498 return op1;
4500 /* Convert a == b ? a : b into "b". */
4501 if (GET_CODE (op0) == EQ
4502 && ! side_effects_p (op0)
4503 && ! HONOR_NANS (mode)
4504 && ! HONOR_SIGNED_ZEROS (mode)
4505 && ((rtx_equal_p (XEXP (op0, 0), op1)
4506 && rtx_equal_p (XEXP (op0, 1), op2))
4507 || (rtx_equal_p (XEXP (op0, 0), op2)
4508 && rtx_equal_p (XEXP (op0, 1), op1))))
4509 return op2;
4511 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4513 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4514 ? GET_MODE (XEXP (op0, 1))
4515 : GET_MODE (XEXP (op0, 0)));
4516 rtx temp;
4518 /* Look for happy constants in op1 and op2. */
4519 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
4521 HOST_WIDE_INT t = INTVAL (op1);
4522 HOST_WIDE_INT f = INTVAL (op2);
4524 if (t == STORE_FLAG_VALUE && f == 0)
4525 code = GET_CODE (op0);
4526 else if (t == 0 && f == STORE_FLAG_VALUE)
4528 enum rtx_code tmp;
4529 tmp = reversed_comparison_code (op0, NULL_RTX);
4530 if (tmp == UNKNOWN)
4531 break;
4532 code = tmp;
4534 else
4535 break;
4537 return simplify_gen_relational (code, mode, cmp_mode,
4538 XEXP (op0, 0), XEXP (op0, 1));
4541 if (cmp_mode == VOIDmode)
4542 cmp_mode = op0_mode;
4543 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4544 cmp_mode, XEXP (op0, 0),
4545 XEXP (op0, 1));
4547 /* See if any simplifications were possible. */
4548 if (temp)
4550 if (GET_CODE (temp) == CONST_INT)
4551 return temp == const0_rtx ? op2 : op1;
4552 else if (temp)
4553 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4556 break;
4558 case VEC_MERGE:
4559 gcc_assert (GET_MODE (op0) == mode);
4560 gcc_assert (GET_MODE (op1) == mode);
4561 gcc_assert (VECTOR_MODE_P (mode));
4562 op2 = avoid_constant_pool_reference (op2);
4563 if (GET_CODE (op2) == CONST_INT)
4565 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4566 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4567 int mask = (1 << n_elts) - 1;
4569 if (!(INTVAL (op2) & mask))
4570 return op1;
4571 if ((INTVAL (op2) & mask) == mask)
4572 return op0;
4574 op0 = avoid_constant_pool_reference (op0);
4575 op1 = avoid_constant_pool_reference (op1);
4576 if (GET_CODE (op0) == CONST_VECTOR
4577 && GET_CODE (op1) == CONST_VECTOR)
4579 rtvec v = rtvec_alloc (n_elts);
4580 unsigned int i;
4582 for (i = 0; i < n_elts; i++)
4583 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4584 ? CONST_VECTOR_ELT (op0, i)
4585 : CONST_VECTOR_ELT (op1, i));
4586 return gen_rtx_CONST_VECTOR (mode, v);
4589 break;
4591 default:
4592 gcc_unreachable ();
4595 return 0;
4598 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
4599 or CONST_VECTOR,
4600 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
4602 Works by unpacking OP into a collection of 8-bit values
4603 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4604 and then repacking them again for OUTERMODE. */
4606 static rtx
4607 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4608 enum machine_mode innermode, unsigned int byte)
4610 /* We support up to 512-bit values (for V8DFmode). */
4611 enum {
4612 max_bitsize = 512,
4613 value_bit = 8,
4614 value_mask = (1 << value_bit) - 1
4616 unsigned char value[max_bitsize / value_bit];
4617 int value_start;
4618 int i;
4619 int elem;
4621 int num_elem;
4622 rtx * elems;
4623 int elem_bitsize;
4624 rtx result_s;
4625 rtvec result_v = NULL;
4626 enum mode_class outer_class;
4627 enum machine_mode outer_submode;
4629 /* Some ports misuse CCmode. */
4630 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
4631 return op;
4633 /* We have no way to represent a complex constant at the rtl level. */
4634 if (COMPLEX_MODE_P (outermode))
4635 return NULL_RTX;
4637 /* Unpack the value. */
4639 if (GET_CODE (op) == CONST_VECTOR)
4641 num_elem = CONST_VECTOR_NUNITS (op);
4642 elems = &CONST_VECTOR_ELT (op, 0);
4643 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4645 else
4647 num_elem = 1;
4648 elems = &op;
4649 elem_bitsize = max_bitsize;
4651 /* If this asserts, it is too complicated; reducing value_bit may help. */
4652 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4653 /* I don't know how to handle endianness of sub-units. */
4654 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4656 for (elem = 0; elem < num_elem; elem++)
4658 unsigned char * vp;
4659 rtx el = elems[elem];
4661 /* Vectors are kept in target memory order. (This is probably
4662 a mistake.) */
4664 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4665 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4666 / BITS_PER_UNIT);
4667 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4668 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4669 unsigned bytele = (subword_byte % UNITS_PER_WORD
4670 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4671 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4674 switch (GET_CODE (el))
4676 case CONST_INT:
4677 for (i = 0;
4678 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4679 i += value_bit)
4680 *vp++ = INTVAL (el) >> i;
4681 /* CONST_INTs are always logically sign-extended. */
4682 for (; i < elem_bitsize; i += value_bit)
4683 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4684 break;
4686 case CONST_DOUBLE:
4687 if (GET_MODE (el) == VOIDmode)
4689 /* If this triggers, someone should have generated a
4690 CONST_INT instead. */
4691 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4693 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4694 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4695 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4697 *vp++
4698 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4699 i += value_bit;
4701 /* It shouldn't matter what's done here, so fill it with
4702 zero. */
4703 for (; i < elem_bitsize; i += value_bit)
4704 *vp++ = 0;
4706 else
4708 long tmp[max_bitsize / 32];
4709 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4711 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4712 gcc_assert (bitsize <= elem_bitsize);
4713 gcc_assert (bitsize % value_bit == 0);
4715 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4716 GET_MODE (el));
4718 /* real_to_target produces its result in words affected by
4719 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4720 and use WORDS_BIG_ENDIAN instead; see the documentation
4721 of SUBREG in rtl.texi. */
4722 for (i = 0; i < bitsize; i += value_bit)
4724 int ibase;
4725 if (WORDS_BIG_ENDIAN)
4726 ibase = bitsize - 1 - i;
4727 else
4728 ibase = i;
4729 *vp++ = tmp[ibase / 32] >> i % 32;
4732 /* It shouldn't matter what's done here, so fill it with
4733 zero. */
4734 for (; i < elem_bitsize; i += value_bit)
4735 *vp++ = 0;
4737 break;
4739 case CONST_FIXED:
4740 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4742 for (i = 0; i < elem_bitsize; i += value_bit)
4743 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
4745 else
4747 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4748 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
4749 for (; i < 2 * HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4750 i += value_bit)
4751 *vp++ = CONST_FIXED_VALUE_HIGH (el)
4752 >> (i - HOST_BITS_PER_WIDE_INT);
4753 for (; i < elem_bitsize; i += value_bit)
4754 *vp++ = 0;
4756 break;
4758 default:
4759 gcc_unreachable ();
4763 /* Now, pick the right byte to start with. */
4764 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4765 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4766 will already have offset 0. */
4767 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4769 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4770 - byte);
4771 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4772 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4773 byte = (subword_byte % UNITS_PER_WORD
4774 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4777 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4778 so if it's become negative it will instead be very large.) */
4779 gcc_assert (byte < GET_MODE_SIZE (innermode));
4781 /* Convert from bytes to chunks of size value_bit. */
4782 value_start = byte * (BITS_PER_UNIT / value_bit);
4784 /* Re-pack the value. */
4786 if (VECTOR_MODE_P (outermode))
4788 num_elem = GET_MODE_NUNITS (outermode);
4789 result_v = rtvec_alloc (num_elem);
4790 elems = &RTVEC_ELT (result_v, 0);
4791 outer_submode = GET_MODE_INNER (outermode);
4793 else
4795 num_elem = 1;
4796 elems = &result_s;
4797 outer_submode = outermode;
4800 outer_class = GET_MODE_CLASS (outer_submode);
4801 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
4803 gcc_assert (elem_bitsize % value_bit == 0);
4804 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
4806 for (elem = 0; elem < num_elem; elem++)
4808 unsigned char *vp;
4810 /* Vectors are stored in target memory order. (This is probably
4811 a mistake.) */
4813 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4814 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4815 / BITS_PER_UNIT);
4816 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4817 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4818 unsigned bytele = (subword_byte % UNITS_PER_WORD
4819 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4820 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
4823 switch (outer_class)
4825 case MODE_INT:
4826 case MODE_PARTIAL_INT:
4828 unsigned HOST_WIDE_INT hi = 0, lo = 0;
4830 for (i = 0;
4831 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4832 i += value_bit)
4833 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
4834 for (; i < elem_bitsize; i += value_bit)
4835 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
4836 << (i - HOST_BITS_PER_WIDE_INT));
4838 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4839 know why. */
4840 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4841 elems[elem] = gen_int_mode (lo, outer_submode);
4842 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
4843 elems[elem] = immed_double_const (lo, hi, outer_submode);
4844 else
4845 return NULL_RTX;
4847 break;
4849 case MODE_FLOAT:
4850 case MODE_DECIMAL_FLOAT:
4852 REAL_VALUE_TYPE r;
4853 long tmp[max_bitsize / 32];
4855 /* real_from_target wants its input in words affected by
4856 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4857 and use WORDS_BIG_ENDIAN instead; see the documentation
4858 of SUBREG in rtl.texi. */
4859 for (i = 0; i < max_bitsize / 32; i++)
4860 tmp[i] = 0;
4861 for (i = 0; i < elem_bitsize; i += value_bit)
4863 int ibase;
4864 if (WORDS_BIG_ENDIAN)
4865 ibase = elem_bitsize - 1 - i;
4866 else
4867 ibase = i;
4868 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
4871 real_from_target (&r, tmp, outer_submode);
4872 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
4874 break;
4876 case MODE_FRACT:
4877 case MODE_UFRACT:
4878 case MODE_ACCUM:
4879 case MODE_UACCUM:
4881 FIXED_VALUE_TYPE f;
4882 f.data.low = 0;
4883 f.data.high = 0;
4884 f.mode = outer_submode;
4886 for (i = 0;
4887 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4888 i += value_bit)
4889 f.data.low |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
4890 for (; i < elem_bitsize; i += value_bit)
4891 f.data.high |= ((HOST_WIDE_INT)(*vp++ & value_mask)
4892 << (i - HOST_BITS_PER_WIDE_INT));
4894 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
4896 break;
4898 default:
4899 gcc_unreachable ();
4902 if (VECTOR_MODE_P (outermode))
4903 return gen_rtx_CONST_VECTOR (outermode, result_v);
4904 else
4905 return result_s;
4908 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
4909 Return 0 if no simplifications are possible. */
4911 simplify_subreg (enum machine_mode outermode, rtx op,
4912 enum machine_mode innermode, unsigned int byte)
4914 /* Little bit of sanity checking. */
4915 gcc_assert (innermode != VOIDmode);
4916 gcc_assert (outermode != VOIDmode);
4917 gcc_assert (innermode != BLKmode);
4918 gcc_assert (outermode != BLKmode);
4920 gcc_assert (GET_MODE (op) == innermode
4921 || GET_MODE (op) == VOIDmode);
4923 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
4924 gcc_assert (byte < GET_MODE_SIZE (innermode));
4926 if (outermode == innermode && !byte)
4927 return op;
4929 if (GET_CODE (op) == CONST_INT
4930 || GET_CODE (op) == CONST_DOUBLE
4931 || GET_CODE (op) == CONST_FIXED
4932 || GET_CODE (op) == CONST_VECTOR)
4933 return simplify_immed_subreg (outermode, op, innermode, byte);
4935 /* Changing mode twice with SUBREG => just change it once,
4936 or not at all if changing back op starting mode. */
4937 if (GET_CODE (op) == SUBREG)
4939 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
4940 int final_offset = byte + SUBREG_BYTE (op);
4941 rtx newx;
4943 if (outermode == innermostmode
4944 && byte == 0 && SUBREG_BYTE (op) == 0)
4945 return SUBREG_REG (op);
4947 /* The SUBREG_BYTE represents offset, as if the value were stored
4948 in memory. Irritating exception is paradoxical subreg, where
4949 we define SUBREG_BYTE to be 0. On big endian machines, this
4950 value should be negative. For a moment, undo this exception. */
4951 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4953 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
4954 if (WORDS_BIG_ENDIAN)
4955 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4956 if (BYTES_BIG_ENDIAN)
4957 final_offset += difference % UNITS_PER_WORD;
4959 if (SUBREG_BYTE (op) == 0
4960 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
4962 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
4963 if (WORDS_BIG_ENDIAN)
4964 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4965 if (BYTES_BIG_ENDIAN)
4966 final_offset += difference % UNITS_PER_WORD;
4969 /* See whether resulting subreg will be paradoxical. */
4970 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
4972 /* In nonparadoxical subregs we can't handle negative offsets. */
4973 if (final_offset < 0)
4974 return NULL_RTX;
4975 /* Bail out in case resulting subreg would be incorrect. */
4976 if (final_offset % GET_MODE_SIZE (outermode)
4977 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
4978 return NULL_RTX;
4980 else
4982 int offset = 0;
4983 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
4985 /* In paradoxical subreg, see if we are still looking on lower part.
4986 If so, our SUBREG_BYTE will be 0. */
4987 if (WORDS_BIG_ENDIAN)
4988 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4989 if (BYTES_BIG_ENDIAN)
4990 offset += difference % UNITS_PER_WORD;
4991 if (offset == final_offset)
4992 final_offset = 0;
4993 else
4994 return NULL_RTX;
4997 /* Recurse for further possible simplifications. */
4998 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
4999 final_offset);
5000 if (newx)
5001 return newx;
5002 if (validate_subreg (outermode, innermostmode,
5003 SUBREG_REG (op), final_offset))
5004 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5005 return NULL_RTX;
5008 /* Merge implicit and explicit truncations. */
5010 if (GET_CODE (op) == TRUNCATE
5011 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
5012 && subreg_lowpart_offset (outermode, innermode) == byte)
5013 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
5014 GET_MODE (XEXP (op, 0)));
5016 /* SUBREG of a hard register => just change the register number
5017 and/or mode. If the hard register is not valid in that mode,
5018 suppress this simplification. If the hard register is the stack,
5019 frame, or argument pointer, leave this as a SUBREG. */
5021 if (REG_P (op)
5022 && REGNO (op) < FIRST_PSEUDO_REGISTER
5023 #ifdef CANNOT_CHANGE_MODE_CLASS
5024 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
5025 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
5026 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
5027 #endif
5028 && ((reload_completed && !frame_pointer_needed)
5029 || (REGNO (op) != FRAME_POINTER_REGNUM
5030 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
5031 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
5032 #endif
5034 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
5035 && REGNO (op) != ARG_POINTER_REGNUM
5036 #endif
5037 && REGNO (op) != STACK_POINTER_REGNUM
5038 && subreg_offset_representable_p (REGNO (op), innermode,
5039 byte, outermode))
5041 unsigned int regno = REGNO (op);
5042 unsigned int final_regno
5043 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
5045 /* ??? We do allow it if the current REG is not valid for
5046 its mode. This is a kludge to work around how float/complex
5047 arguments are passed on 32-bit SPARC and should be fixed. */
5048 if (HARD_REGNO_MODE_OK (final_regno, outermode)
5049 || ! HARD_REGNO_MODE_OK (regno, innermode))
5051 rtx x;
5052 int final_offset = byte;
5054 /* Adjust offset for paradoxical subregs. */
5055 if (byte == 0
5056 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5058 int difference = (GET_MODE_SIZE (innermode)
5059 - GET_MODE_SIZE (outermode));
5060 if (WORDS_BIG_ENDIAN)
5061 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5062 if (BYTES_BIG_ENDIAN)
5063 final_offset += difference % UNITS_PER_WORD;
5066 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5068 /* Propagate original regno. We don't have any way to specify
5069 the offset inside original regno, so do so only for lowpart.
5070 The information is used only by alias analysis that can not
5071 grog partial register anyway. */
5073 if (subreg_lowpart_offset (outermode, innermode) == byte)
5074 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5075 return x;
5079 /* If we have a SUBREG of a register that we are replacing and we are
5080 replacing it with a MEM, make a new MEM and try replacing the
5081 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5082 or if we would be widening it. */
5084 if (MEM_P (op)
5085 && ! mode_dependent_address_p (XEXP (op, 0))
5086 /* Allow splitting of volatile memory references in case we don't
5087 have instruction to move the whole thing. */
5088 && (! MEM_VOLATILE_P (op)
5089 || ! have_insn_for (SET, innermode))
5090 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5091 return adjust_address_nv (op, outermode, byte);
5093 /* Handle complex values represented as CONCAT
5094 of real and imaginary part. */
5095 if (GET_CODE (op) == CONCAT)
5097 unsigned int part_size, final_offset;
5098 rtx part, res;
5100 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5101 if (byte < part_size)
5103 part = XEXP (op, 0);
5104 final_offset = byte;
5106 else
5108 part = XEXP (op, 1);
5109 final_offset = byte - part_size;
5112 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5113 return NULL_RTX;
5115 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5116 if (res)
5117 return res;
5118 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5119 return gen_rtx_SUBREG (outermode, part, final_offset);
5120 return NULL_RTX;
5123 /* Optimize SUBREG truncations of zero and sign extended values. */
5124 if ((GET_CODE (op) == ZERO_EXTEND
5125 || GET_CODE (op) == SIGN_EXTEND)
5126 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
5128 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5130 /* If we're requesting the lowpart of a zero or sign extension,
5131 there are three possibilities. If the outermode is the same
5132 as the origmode, we can omit both the extension and the subreg.
5133 If the outermode is not larger than the origmode, we can apply
5134 the truncation without the extension. Finally, if the outermode
5135 is larger than the origmode, but both are integer modes, we
5136 can just extend to the appropriate mode. */
5137 if (bitpos == 0)
5139 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
5140 if (outermode == origmode)
5141 return XEXP (op, 0);
5142 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
5143 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
5144 subreg_lowpart_offset (outermode,
5145 origmode));
5146 if (SCALAR_INT_MODE_P (outermode))
5147 return simplify_gen_unary (GET_CODE (op), outermode,
5148 XEXP (op, 0), origmode);
5151 /* A SUBREG resulting from a zero extension may fold to zero if
5152 it extracts higher bits that the ZERO_EXTEND's source bits. */
5153 if (GET_CODE (op) == ZERO_EXTEND
5154 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
5155 return CONST0_RTX (outermode);
5158 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5159 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5160 the outer subreg is effectively a truncation to the original mode. */
5161 if ((GET_CODE (op) == LSHIFTRT
5162 || GET_CODE (op) == ASHIFTRT)
5163 && SCALAR_INT_MODE_P (outermode)
5164 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5165 to avoid the possibility that an outer LSHIFTRT shifts by more
5166 than the sign extension's sign_bit_copies and introduces zeros
5167 into the high bits of the result. */
5168 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
5169 && GET_CODE (XEXP (op, 1)) == CONST_INT
5170 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
5171 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5172 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5173 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5174 return simplify_gen_binary (ASHIFTRT, outermode,
5175 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5177 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5178 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5179 the outer subreg is effectively a truncation to the original mode. */
5180 if ((GET_CODE (op) == LSHIFTRT
5181 || GET_CODE (op) == ASHIFTRT)
5182 && SCALAR_INT_MODE_P (outermode)
5183 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5184 && GET_CODE (XEXP (op, 1)) == CONST_INT
5185 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5186 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5187 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5188 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5189 return simplify_gen_binary (LSHIFTRT, outermode,
5190 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5192 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5193 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5194 the outer subreg is effectively a truncation to the original mode. */
5195 if (GET_CODE (op) == ASHIFT
5196 && SCALAR_INT_MODE_P (outermode)
5197 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5198 && GET_CODE (XEXP (op, 1)) == CONST_INT
5199 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5200 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
5201 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5202 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5203 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5204 return simplify_gen_binary (ASHIFT, outermode,
5205 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5207 return NULL_RTX;
5210 /* Make a SUBREG operation or equivalent if it folds. */
5213 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5214 enum machine_mode innermode, unsigned int byte)
5216 rtx newx;
5218 newx = simplify_subreg (outermode, op, innermode, byte);
5219 if (newx)
5220 return newx;
5222 if (GET_CODE (op) == SUBREG
5223 || GET_CODE (op) == CONCAT
5224 || GET_MODE (op) == VOIDmode)
5225 return NULL_RTX;
5227 if (validate_subreg (outermode, innermode, op, byte))
5228 return gen_rtx_SUBREG (outermode, op, byte);
5230 return NULL_RTX;
5233 /* Simplify X, an rtx expression.
5235 Return the simplified expression or NULL if no simplifications
5236 were possible.
5238 This is the preferred entry point into the simplification routines;
5239 however, we still allow passes to call the more specific routines.
5241 Right now GCC has three (yes, three) major bodies of RTL simplification
5242 code that need to be unified.
5244 1. fold_rtx in cse.c. This code uses various CSE specific
5245 information to aid in RTL simplification.
5247 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5248 it uses combine specific information to aid in RTL
5249 simplification.
5251 3. The routines in this file.
5254 Long term we want to only have one body of simplification code; to
5255 get to that state I recommend the following steps:
5257 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5258 which are not pass dependent state into these routines.
5260 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5261 use this routine whenever possible.
5263 3. Allow for pass dependent state to be provided to these
5264 routines and add simplifications based on the pass dependent
5265 state. Remove code from cse.c & combine.c that becomes
5266 redundant/dead.
5268 It will take time, but ultimately the compiler will be easier to
5269 maintain and improve. It's totally silly that when we add a
5270 simplification that it needs to be added to 4 places (3 for RTL
5271 simplification and 1 for tree simplification. */
5274 simplify_rtx (const_rtx x)
5276 const enum rtx_code code = GET_CODE (x);
5277 const enum machine_mode mode = GET_MODE (x);
5279 switch (GET_RTX_CLASS (code))
5281 case RTX_UNARY:
5282 return simplify_unary_operation (code, mode,
5283 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5284 case RTX_COMM_ARITH:
5285 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5286 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5288 /* Fall through.... */
5290 case RTX_BIN_ARITH:
5291 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5293 case RTX_TERNARY:
5294 case RTX_BITFIELD_OPS:
5295 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5296 XEXP (x, 0), XEXP (x, 1),
5297 XEXP (x, 2));
5299 case RTX_COMPARE:
5300 case RTX_COMM_COMPARE:
5301 return simplify_relational_operation (code, mode,
5302 ((GET_MODE (XEXP (x, 0))
5303 != VOIDmode)
5304 ? GET_MODE (XEXP (x, 0))
5305 : GET_MODE (XEXP (x, 1))),
5306 XEXP (x, 0),
5307 XEXP (x, 1));
5309 case RTX_EXTRA:
5310 if (code == SUBREG)
5311 return simplify_subreg (mode, SUBREG_REG (x),
5312 GET_MODE (SUBREG_REG (x)),
5313 SUBREG_BYTE (x));
5314 break;
5316 case RTX_OBJ:
5317 if (code == LO_SUM)
5319 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5320 if (GET_CODE (XEXP (x, 0)) == HIGH
5321 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5322 return XEXP (x, 1);
5324 break;
5326 default:
5327 break;
5329 return NULL;