re PR target/37283 (Cannot build crtbegin.o for ARM)
[official-gcc.git] / gcc / simplify-rtx.c
blob30bafea8ab19f665510e956e789df4fdde05af53
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, const_rtx);
53 static bool plus_minus_operand_p (const_rtx);
54 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
57 unsigned int);
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
59 rtx, rtx);
60 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
61 enum machine_mode, rtx, rtx);
62 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
63 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
64 rtx, rtx, rtx, rtx);
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
68 static rtx
69 neg_const_int (enum machine_mode mode, const_rtx i)
71 return gen_int_mode (- INTVAL (i), mode);
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
77 bool
78 mode_signbit_p (enum machine_mode mode, const_rtx x)
80 unsigned HOST_WIDE_INT val;
81 unsigned int width;
83 if (GET_MODE_CLASS (mode) != MODE_INT)
84 return false;
86 width = GET_MODE_BITSIZE (mode);
87 if (width == 0)
88 return false;
90 if (width <= HOST_BITS_PER_WIDE_INT
91 && GET_CODE (x) == CONST_INT)
92 val = INTVAL (x);
93 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x) == 0)
97 val = CONST_DOUBLE_HIGH (x);
98 width -= HOST_BITS_PER_WIDE_INT;
100 else
101 return false;
103 if (width < HOST_BITS_PER_WIDE_INT)
104 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
105 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
112 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
113 rtx op1)
115 rtx tem;
117 /* If this simplifies, do it. */
118 tem = simplify_binary_operation (code, mode, op0, op1);
119 if (tem)
120 return tem;
122 /* Put complex operands first and constants second if commutative. */
123 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
124 && swap_commutative_operands_p (op0, op1))
125 tem = op0, op0 = op1, op1 = tem;
127 return gen_rtx_fmt_ee (code, mode, op0, op1);
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
133 avoid_constant_pool_reference (rtx x)
135 rtx c, tmp, addr;
136 enum machine_mode cmode;
137 HOST_WIDE_INT offset = 0;
139 switch (GET_CODE (x))
141 case MEM:
142 break;
144 case FLOAT_EXTEND:
145 /* Handle float extensions of constant pool references. */
146 tmp = XEXP (x, 0);
147 c = avoid_constant_pool_reference (tmp);
148 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
150 REAL_VALUE_TYPE d;
152 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
155 return x;
157 default:
158 return x;
161 if (GET_MODE (x) == BLKmode)
162 return x;
164 addr = XEXP (x, 0);
166 /* Call target hook to avoid the effects of -fpic etc.... */
167 addr = targetm.delegitimize_address (addr);
169 /* Split the address into a base and integer offset. */
170 if (GET_CODE (addr) == CONST
171 && GET_CODE (XEXP (addr, 0)) == PLUS
172 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
174 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
175 addr = XEXP (XEXP (addr, 0), 0);
178 if (GET_CODE (addr) == LO_SUM)
179 addr = XEXP (addr, 1);
181 /* If this is a constant pool reference, we can turn it into its
182 constant and hope that simplifications happen. */
183 if (GET_CODE (addr) == SYMBOL_REF
184 && CONSTANT_POOL_ADDRESS_P (addr))
186 c = get_pool_constant (addr);
187 cmode = get_pool_mode (addr);
189 /* If we're accessing the constant in a different mode than it was
190 originally stored, attempt to fix that up via subreg simplifications.
191 If that fails we have no choice but to return the original memory. */
192 if (offset != 0 || cmode != GET_MODE (x))
194 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
195 if (tem && CONSTANT_P (tem))
196 return tem;
198 else
199 return c;
202 return x;
205 /* Make a unary operation by first seeing if it folds and otherwise making
206 the specified operation. */
209 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
210 enum machine_mode op_mode)
212 rtx tem;
214 /* If this simplifies, use it. */
215 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
216 return tem;
218 return gen_rtx_fmt_e (code, mode, op);
221 /* Likewise for ternary operations. */
224 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
225 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
227 rtx tem;
229 /* If this simplifies, use it. */
230 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
231 op0, op1, op2)))
232 return tem;
234 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
237 /* Likewise, for relational operations.
238 CMP_MODE specifies mode comparison is done in. */
241 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
242 enum machine_mode cmp_mode, rtx op0, rtx op1)
244 rtx tem;
246 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
247 op0, op1)))
248 return tem;
250 return gen_rtx_fmt_ee (code, mode, op0, op1);
253 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
254 resulting RTX. Return a new RTX which is as simplified as possible. */
257 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
259 enum rtx_code code = GET_CODE (x);
260 enum machine_mode mode = GET_MODE (x);
261 enum machine_mode op_mode;
262 rtx op0, op1, op2;
264 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
265 to build a new expression substituting recursively. If we can't do
266 anything, return our input. */
268 if (x == old_rtx)
269 return new_rtx;
271 switch (GET_RTX_CLASS (code))
273 case RTX_UNARY:
274 op0 = XEXP (x, 0);
275 op_mode = GET_MODE (op0);
276 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
277 if (op0 == XEXP (x, 0))
278 return x;
279 return simplify_gen_unary (code, mode, op0, op_mode);
281 case RTX_BIN_ARITH:
282 case RTX_COMM_ARITH:
283 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
284 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
285 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
286 return x;
287 return simplify_gen_binary (code, mode, op0, op1);
289 case RTX_COMPARE:
290 case RTX_COMM_COMPARE:
291 op0 = XEXP (x, 0);
292 op1 = XEXP (x, 1);
293 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
294 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
295 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
296 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
297 return x;
298 return simplify_gen_relational (code, mode, op_mode, op0, op1);
300 case RTX_TERNARY:
301 case RTX_BITFIELD_OPS:
302 op0 = XEXP (x, 0);
303 op_mode = GET_MODE (op0);
304 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
305 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
306 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
307 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
308 return x;
309 if (op_mode == VOIDmode)
310 op_mode = GET_MODE (op0);
311 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
313 case RTX_EXTRA:
314 /* The only case we try to handle is a SUBREG. */
315 if (code == SUBREG)
317 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
318 if (op0 == SUBREG_REG (x))
319 return x;
320 op0 = simplify_gen_subreg (GET_MODE (x), op0,
321 GET_MODE (SUBREG_REG (x)),
322 SUBREG_BYTE (x));
323 return op0 ? op0 : x;
325 break;
327 case RTX_OBJ:
328 if (code == MEM)
330 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
331 if (op0 == XEXP (x, 0))
332 return x;
333 return replace_equiv_address_nv (x, op0);
335 else if (code == LO_SUM)
337 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
338 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
340 /* (lo_sum (high x) x) -> x */
341 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
342 return op1;
344 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
345 return x;
346 return gen_rtx_LO_SUM (mode, op0, op1);
348 else if (code == REG)
350 if (rtx_equal_p (x, old_rtx))
351 return new_rtx;
353 break;
355 default:
356 break;
358 return x;
361 /* Try to simplify a unary operation CODE whose output mode is to be
362 MODE with input operand OP whose mode was originally OP_MODE.
363 Return zero if no simplification can be made. */
365 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
366 rtx op, enum machine_mode op_mode)
368 rtx trueop, tem;
370 if (GET_CODE (op) == CONST)
371 op = XEXP (op, 0);
373 trueop = avoid_constant_pool_reference (op);
375 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
376 if (tem)
377 return tem;
379 return simplify_unary_operation_1 (code, mode, op);
382 /* Perform some simplifications we can do even if the operands
383 aren't constant. */
384 static rtx
385 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
387 enum rtx_code reversed;
388 rtx temp;
390 switch (code)
392 case NOT:
393 /* (not (not X)) == X. */
394 if (GET_CODE (op) == NOT)
395 return XEXP (op, 0);
397 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
398 comparison is all ones. */
399 if (COMPARISON_P (op)
400 && (mode == BImode || STORE_FLAG_VALUE == -1)
401 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
402 return simplify_gen_relational (reversed, mode, VOIDmode,
403 XEXP (op, 0), XEXP (op, 1));
405 /* (not (plus X -1)) can become (neg X). */
406 if (GET_CODE (op) == PLUS
407 && XEXP (op, 1) == constm1_rtx)
408 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
410 /* Similarly, (not (neg X)) is (plus X -1). */
411 if (GET_CODE (op) == NEG)
412 return plus_constant (XEXP (op, 0), -1);
414 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
415 if (GET_CODE (op) == XOR
416 && GET_CODE (XEXP (op, 1)) == CONST_INT
417 && (temp = simplify_unary_operation (NOT, mode,
418 XEXP (op, 1), mode)) != 0)
419 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
421 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
422 if (GET_CODE (op) == PLUS
423 && GET_CODE (XEXP (op, 1)) == CONST_INT
424 && mode_signbit_p (mode, XEXP (op, 1))
425 && (temp = simplify_unary_operation (NOT, mode,
426 XEXP (op, 1), mode)) != 0)
427 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
430 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
431 operands other than 1, but that is not valid. We could do a
432 similar simplification for (not (lshiftrt C X)) where C is
433 just the sign bit, but this doesn't seem common enough to
434 bother with. */
435 if (GET_CODE (op) == ASHIFT
436 && XEXP (op, 0) == const1_rtx)
438 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
439 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
442 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
443 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
444 so we can perform the above simplification. */
446 if (STORE_FLAG_VALUE == -1
447 && GET_CODE (op) == ASHIFTRT
448 && GET_CODE (XEXP (op, 1)) == CONST_INT
449 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
450 return simplify_gen_relational (GE, mode, VOIDmode,
451 XEXP (op, 0), const0_rtx);
454 if (GET_CODE (op) == SUBREG
455 && subreg_lowpart_p (op)
456 && (GET_MODE_SIZE (GET_MODE (op))
457 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
458 && GET_CODE (SUBREG_REG (op)) == ASHIFT
459 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
461 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
462 rtx x;
464 x = gen_rtx_ROTATE (inner_mode,
465 simplify_gen_unary (NOT, inner_mode, const1_rtx,
466 inner_mode),
467 XEXP (SUBREG_REG (op), 1));
468 return rtl_hooks.gen_lowpart_no_emit (mode, x);
471 /* Apply De Morgan's laws to reduce number of patterns for machines
472 with negating logical insns (and-not, nand, etc.). If result has
473 only one NOT, put it first, since that is how the patterns are
474 coded. */
476 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
478 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
479 enum machine_mode op_mode;
481 op_mode = GET_MODE (in1);
482 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
484 op_mode = GET_MODE (in2);
485 if (op_mode == VOIDmode)
486 op_mode = mode;
487 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
489 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
491 rtx tem = in2;
492 in2 = in1; in1 = tem;
495 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
496 mode, in1, in2);
498 break;
500 case NEG:
501 /* (neg (neg X)) == X. */
502 if (GET_CODE (op) == NEG)
503 return XEXP (op, 0);
505 /* (neg (plus X 1)) can become (not X). */
506 if (GET_CODE (op) == PLUS
507 && XEXP (op, 1) == const1_rtx)
508 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
510 /* Similarly, (neg (not X)) is (plus X 1). */
511 if (GET_CODE (op) == NOT)
512 return plus_constant (XEXP (op, 0), 1);
514 /* (neg (minus X Y)) can become (minus Y X). This transformation
515 isn't safe for modes with signed zeros, since if X and Y are
516 both +0, (minus Y X) is the same as (minus X Y). If the
517 rounding mode is towards +infinity (or -infinity) then the two
518 expressions will be rounded differently. */
519 if (GET_CODE (op) == MINUS
520 && !HONOR_SIGNED_ZEROS (mode)
521 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
522 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
524 if (GET_CODE (op) == PLUS
525 && !HONOR_SIGNED_ZEROS (mode)
526 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
528 /* (neg (plus A C)) is simplified to (minus -C A). */
529 if (GET_CODE (XEXP (op, 1)) == CONST_INT
530 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
532 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
533 if (temp)
534 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
537 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
538 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
539 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
542 /* (neg (mult A B)) becomes (mult (neg A) B).
543 This works even for floating-point values. */
544 if (GET_CODE (op) == MULT
545 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
547 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
548 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
551 /* NEG commutes with ASHIFT since it is multiplication. Only do
552 this if we can then eliminate the NEG (e.g., if the operand
553 is a constant). */
554 if (GET_CODE (op) == ASHIFT)
556 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
557 if (temp)
558 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
561 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
562 C is equal to the width of MODE minus 1. */
563 if (GET_CODE (op) == ASHIFTRT
564 && GET_CODE (XEXP (op, 1)) == CONST_INT
565 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
566 return simplify_gen_binary (LSHIFTRT, mode,
567 XEXP (op, 0), XEXP (op, 1));
569 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
570 C is equal to the width of MODE minus 1. */
571 if (GET_CODE (op) == LSHIFTRT
572 && GET_CODE (XEXP (op, 1)) == CONST_INT
573 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
574 return simplify_gen_binary (ASHIFTRT, mode,
575 XEXP (op, 0), XEXP (op, 1));
577 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
578 if (GET_CODE (op) == XOR
579 && XEXP (op, 1) == const1_rtx
580 && nonzero_bits (XEXP (op, 0), mode) == 1)
581 return plus_constant (XEXP (op, 0), -1);
583 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
584 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
585 if (GET_CODE (op) == LT
586 && XEXP (op, 1) == const0_rtx
587 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
589 enum machine_mode inner = GET_MODE (XEXP (op, 0));
590 int isize = GET_MODE_BITSIZE (inner);
591 if (STORE_FLAG_VALUE == 1)
593 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
594 GEN_INT (isize - 1));
595 if (mode == inner)
596 return temp;
597 if (GET_MODE_BITSIZE (mode) > isize)
598 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
599 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
601 else if (STORE_FLAG_VALUE == -1)
603 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
604 GEN_INT (isize - 1));
605 if (mode == inner)
606 return temp;
607 if (GET_MODE_BITSIZE (mode) > isize)
608 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
609 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
612 break;
614 case TRUNCATE:
615 /* We can't handle truncation to a partial integer mode here
616 because we don't know the real bitsize of the partial
617 integer mode. */
618 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
619 break;
621 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
622 if ((GET_CODE (op) == SIGN_EXTEND
623 || GET_CODE (op) == ZERO_EXTEND)
624 && GET_MODE (XEXP (op, 0)) == mode)
625 return XEXP (op, 0);
627 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
628 (OP:SI foo:SI) if OP is NEG or ABS. */
629 if ((GET_CODE (op) == ABS
630 || GET_CODE (op) == NEG)
631 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
632 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
633 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
634 return simplify_gen_unary (GET_CODE (op), mode,
635 XEXP (XEXP (op, 0), 0), mode);
637 /* (truncate:A (subreg:B (truncate:C X) 0)) is
638 (truncate:A X). */
639 if (GET_CODE (op) == SUBREG
640 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
641 && subreg_lowpart_p (op))
642 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
643 GET_MODE (XEXP (SUBREG_REG (op), 0)));
645 /* If we know that the value is already truncated, we can
646 replace the TRUNCATE with a SUBREG. Note that this is also
647 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
648 modes we just have to apply a different definition for
649 truncation. But don't do this for an (LSHIFTRT (MULT ...))
650 since this will cause problems with the umulXi3_highpart
651 patterns. */
652 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
653 GET_MODE_BITSIZE (GET_MODE (op)))
654 ? (num_sign_bit_copies (op, GET_MODE (op))
655 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
656 - GET_MODE_BITSIZE (mode)))
657 : truncated_to_mode (mode, op))
658 && ! (GET_CODE (op) == LSHIFTRT
659 && GET_CODE (XEXP (op, 0)) == MULT))
660 return rtl_hooks.gen_lowpart_no_emit (mode, op);
662 /* A truncate of a comparison can be replaced with a subreg if
663 STORE_FLAG_VALUE permits. This is like the previous test,
664 but it works even if the comparison is done in a mode larger
665 than HOST_BITS_PER_WIDE_INT. */
666 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
667 && COMPARISON_P (op)
668 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
669 return rtl_hooks.gen_lowpart_no_emit (mode, op);
670 break;
672 case FLOAT_TRUNCATE:
673 if (DECIMAL_FLOAT_MODE_P (mode))
674 break;
676 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
677 if (GET_CODE (op) == FLOAT_EXTEND
678 && GET_MODE (XEXP (op, 0)) == mode)
679 return XEXP (op, 0);
681 /* (float_truncate:SF (float_truncate:DF foo:XF))
682 = (float_truncate:SF foo:XF).
683 This may eliminate double rounding, so it is unsafe.
685 (float_truncate:SF (float_extend:XF foo:DF))
686 = (float_truncate:SF foo:DF).
688 (float_truncate:DF (float_extend:XF foo:SF))
689 = (float_extend:SF foo:DF). */
690 if ((GET_CODE (op) == FLOAT_TRUNCATE
691 && flag_unsafe_math_optimizations)
692 || GET_CODE (op) == FLOAT_EXTEND)
693 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
694 0)))
695 > GET_MODE_SIZE (mode)
696 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
697 mode,
698 XEXP (op, 0), mode);
700 /* (float_truncate (float x)) is (float x) */
701 if (GET_CODE (op) == FLOAT
702 && (flag_unsafe_math_optimizations
703 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
704 && ((unsigned)significand_size (GET_MODE (op))
705 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
706 - num_sign_bit_copies (XEXP (op, 0),
707 GET_MODE (XEXP (op, 0))))))))
708 return simplify_gen_unary (FLOAT, mode,
709 XEXP (op, 0),
710 GET_MODE (XEXP (op, 0)));
712 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
713 (OP:SF foo:SF) if OP is NEG or ABS. */
714 if ((GET_CODE (op) == ABS
715 || GET_CODE (op) == NEG)
716 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
717 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
718 return simplify_gen_unary (GET_CODE (op), mode,
719 XEXP (XEXP (op, 0), 0), mode);
721 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
722 is (float_truncate:SF x). */
723 if (GET_CODE (op) == SUBREG
724 && subreg_lowpart_p (op)
725 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
726 return SUBREG_REG (op);
727 break;
729 case FLOAT_EXTEND:
730 if (DECIMAL_FLOAT_MODE_P (mode))
731 break;
733 /* (float_extend (float_extend x)) is (float_extend x)
735 (float_extend (float x)) is (float x) assuming that double
736 rounding can't happen.
738 if (GET_CODE (op) == FLOAT_EXTEND
739 || (GET_CODE (op) == FLOAT
740 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
741 && ((unsigned)significand_size (GET_MODE (op))
742 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
743 - num_sign_bit_copies (XEXP (op, 0),
744 GET_MODE (XEXP (op, 0)))))))
745 return simplify_gen_unary (GET_CODE (op), mode,
746 XEXP (op, 0),
747 GET_MODE (XEXP (op, 0)));
749 break;
751 case ABS:
752 /* (abs (neg <foo>)) -> (abs <foo>) */
753 if (GET_CODE (op) == NEG)
754 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
755 GET_MODE (XEXP (op, 0)));
757 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
758 do nothing. */
759 if (GET_MODE (op) == VOIDmode)
760 break;
762 /* If operand is something known to be positive, ignore the ABS. */
763 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
764 || ((GET_MODE_BITSIZE (GET_MODE (op))
765 <= HOST_BITS_PER_WIDE_INT)
766 && ((nonzero_bits (op, GET_MODE (op))
767 & ((HOST_WIDE_INT) 1
768 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
769 == 0)))
770 return op;
772 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
773 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
774 return gen_rtx_NEG (mode, op);
776 break;
778 case FFS:
779 /* (ffs (*_extend <X>)) = (ffs <X>) */
780 if (GET_CODE (op) == SIGN_EXTEND
781 || GET_CODE (op) == ZERO_EXTEND)
782 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
783 GET_MODE (XEXP (op, 0)));
784 break;
786 case POPCOUNT:
787 switch (GET_CODE (op))
789 case BSWAP:
790 case ZERO_EXTEND:
791 /* (popcount (zero_extend <X>)) = (popcount <X>) */
792 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
793 GET_MODE (XEXP (op, 0)));
795 case ROTATE:
796 case ROTATERT:
797 /* Rotations don't affect popcount. */
798 if (!side_effects_p (XEXP (op, 1)))
799 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
800 GET_MODE (XEXP (op, 0)));
801 break;
803 default:
804 break;
806 break;
808 case PARITY:
809 switch (GET_CODE (op))
811 case NOT:
812 case BSWAP:
813 case ZERO_EXTEND:
814 case SIGN_EXTEND:
815 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
816 GET_MODE (XEXP (op, 0)));
818 case ROTATE:
819 case ROTATERT:
820 /* Rotations don't affect parity. */
821 if (!side_effects_p (XEXP (op, 1)))
822 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
823 GET_MODE (XEXP (op, 0)));
824 break;
826 default:
827 break;
829 break;
831 case BSWAP:
832 /* (bswap (bswap x)) -> x. */
833 if (GET_CODE (op) == BSWAP)
834 return XEXP (op, 0);
835 break;
837 case FLOAT:
838 /* (float (sign_extend <X>)) = (float <X>). */
839 if (GET_CODE (op) == SIGN_EXTEND)
840 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
841 GET_MODE (XEXP (op, 0)));
842 break;
844 case SIGN_EXTEND:
845 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
846 becomes just the MINUS if its mode is MODE. This allows
847 folding switch statements on machines using casesi (such as
848 the VAX). */
849 if (GET_CODE (op) == TRUNCATE
850 && GET_MODE (XEXP (op, 0)) == mode
851 && GET_CODE (XEXP (op, 0)) == MINUS
852 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
853 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
854 return XEXP (op, 0);
856 /* Check for a sign extension of a subreg of a promoted
857 variable, where the promotion is sign-extended, and the
858 target mode is the same as the variable's promotion. */
859 if (GET_CODE (op) == SUBREG
860 && SUBREG_PROMOTED_VAR_P (op)
861 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
862 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
863 return rtl_hooks.gen_lowpart_no_emit (mode, op);
865 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
866 if (! POINTERS_EXTEND_UNSIGNED
867 && mode == Pmode && GET_MODE (op) == ptr_mode
868 && (CONSTANT_P (op)
869 || (GET_CODE (op) == SUBREG
870 && REG_P (SUBREG_REG (op))
871 && REG_POINTER (SUBREG_REG (op))
872 && GET_MODE (SUBREG_REG (op)) == Pmode)))
873 return convert_memory_address (Pmode, op);
874 #endif
875 break;
877 case ZERO_EXTEND:
878 /* Check for a zero extension of a subreg of a promoted
879 variable, where the promotion is zero-extended, and the
880 target mode is the same as the variable's promotion. */
881 if (GET_CODE (op) == SUBREG
882 && SUBREG_PROMOTED_VAR_P (op)
883 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
884 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
885 return rtl_hooks.gen_lowpart_no_emit (mode, op);
887 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
888 if (POINTERS_EXTEND_UNSIGNED > 0
889 && mode == Pmode && GET_MODE (op) == ptr_mode
890 && (CONSTANT_P (op)
891 || (GET_CODE (op) == SUBREG
892 && REG_P (SUBREG_REG (op))
893 && REG_POINTER (SUBREG_REG (op))
894 && GET_MODE (SUBREG_REG (op)) == Pmode)))
895 return convert_memory_address (Pmode, op);
896 #endif
897 break;
899 default:
900 break;
903 return 0;
906 /* Try to compute the value of a unary operation CODE whose output mode is to
907 be MODE with input operand OP whose mode was originally OP_MODE.
908 Return zero if the value cannot be computed. */
910 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
911 rtx op, enum machine_mode op_mode)
913 unsigned int width = GET_MODE_BITSIZE (mode);
915 if (code == VEC_DUPLICATE)
917 gcc_assert (VECTOR_MODE_P (mode));
918 if (GET_MODE (op) != VOIDmode)
920 if (!VECTOR_MODE_P (GET_MODE (op)))
921 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
922 else
923 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
924 (GET_MODE (op)));
926 if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
927 || GET_CODE (op) == CONST_VECTOR)
929 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
930 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
931 rtvec v = rtvec_alloc (n_elts);
932 unsigned int i;
934 if (GET_CODE (op) != CONST_VECTOR)
935 for (i = 0; i < n_elts; i++)
936 RTVEC_ELT (v, i) = op;
937 else
939 enum machine_mode inmode = GET_MODE (op);
940 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
941 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
943 gcc_assert (in_n_elts < n_elts);
944 gcc_assert ((n_elts % in_n_elts) == 0);
945 for (i = 0; i < n_elts; i++)
946 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
948 return gen_rtx_CONST_VECTOR (mode, v);
952 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
954 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
955 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
956 enum machine_mode opmode = GET_MODE (op);
957 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
958 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
959 rtvec v = rtvec_alloc (n_elts);
960 unsigned int i;
962 gcc_assert (op_n_elts == n_elts);
963 for (i = 0; i < n_elts; i++)
965 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
966 CONST_VECTOR_ELT (op, i),
967 GET_MODE_INNER (opmode));
968 if (!x)
969 return 0;
970 RTVEC_ELT (v, i) = x;
972 return gen_rtx_CONST_VECTOR (mode, v);
975 /* The order of these tests is critical so that, for example, we don't
976 check the wrong mode (input vs. output) for a conversion operation,
977 such as FIX. At some point, this should be simplified. */
979 if (code == FLOAT && GET_MODE (op) == VOIDmode
980 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
982 HOST_WIDE_INT hv, lv;
983 REAL_VALUE_TYPE d;
985 if (GET_CODE (op) == CONST_INT)
986 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
987 else
988 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
990 REAL_VALUE_FROM_INT (d, lv, hv, mode);
991 d = real_value_truncate (mode, d);
992 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
994 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
995 && (GET_CODE (op) == CONST_DOUBLE
996 || GET_CODE (op) == CONST_INT))
998 HOST_WIDE_INT hv, lv;
999 REAL_VALUE_TYPE d;
1001 if (GET_CODE (op) == CONST_INT)
1002 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1003 else
1004 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1006 if (op_mode == VOIDmode)
1008 /* We don't know how to interpret negative-looking numbers in
1009 this case, so don't try to fold those. */
1010 if (hv < 0)
1011 return 0;
1013 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
1015 else
1016 hv = 0, lv &= GET_MODE_MASK (op_mode);
1018 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1019 d = real_value_truncate (mode, d);
1020 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1023 if (GET_CODE (op) == CONST_INT
1024 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1026 HOST_WIDE_INT arg0 = INTVAL (op);
1027 HOST_WIDE_INT val;
1029 switch (code)
1031 case NOT:
1032 val = ~ arg0;
1033 break;
1035 case NEG:
1036 val = - arg0;
1037 break;
1039 case ABS:
1040 val = (arg0 >= 0 ? arg0 : - arg0);
1041 break;
1043 case FFS:
1044 /* Don't use ffs here. Instead, get low order bit and then its
1045 number. If arg0 is zero, this will return 0, as desired. */
1046 arg0 &= GET_MODE_MASK (mode);
1047 val = exact_log2 (arg0 & (- arg0)) + 1;
1048 break;
1050 case CLZ:
1051 arg0 &= GET_MODE_MASK (mode);
1052 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1054 else
1055 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1056 break;
1058 case CTZ:
1059 arg0 &= GET_MODE_MASK (mode);
1060 if (arg0 == 0)
1062 /* Even if the value at zero is undefined, we have to come
1063 up with some replacement. Seems good enough. */
1064 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1065 val = GET_MODE_BITSIZE (mode);
1067 else
1068 val = exact_log2 (arg0 & -arg0);
1069 break;
1071 case POPCOUNT:
1072 arg0 &= GET_MODE_MASK (mode);
1073 val = 0;
1074 while (arg0)
1075 val++, arg0 &= arg0 - 1;
1076 break;
1078 case PARITY:
1079 arg0 &= GET_MODE_MASK (mode);
1080 val = 0;
1081 while (arg0)
1082 val++, arg0 &= arg0 - 1;
1083 val &= 1;
1084 break;
1086 case BSWAP:
1088 unsigned int s;
1090 val = 0;
1091 for (s = 0; s < width; s += 8)
1093 unsigned int d = width - s - 8;
1094 unsigned HOST_WIDE_INT byte;
1095 byte = (arg0 >> s) & 0xff;
1096 val |= byte << d;
1099 break;
1101 case TRUNCATE:
1102 val = arg0;
1103 break;
1105 case ZERO_EXTEND:
1106 /* When zero-extending a CONST_INT, we need to know its
1107 original mode. */
1108 gcc_assert (op_mode != VOIDmode);
1109 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1111 /* If we were really extending the mode,
1112 we would have to distinguish between zero-extension
1113 and sign-extension. */
1114 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1115 val = arg0;
1117 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1118 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1119 else
1120 return 0;
1121 break;
1123 case SIGN_EXTEND:
1124 if (op_mode == VOIDmode)
1125 op_mode = mode;
1126 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1128 /* If we were really extending the mode,
1129 we would have to distinguish between zero-extension
1130 and sign-extension. */
1131 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1132 val = arg0;
1134 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1137 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1138 if (val
1139 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1140 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1142 else
1143 return 0;
1144 break;
1146 case SQRT:
1147 case FLOAT_EXTEND:
1148 case FLOAT_TRUNCATE:
1149 case SS_TRUNCATE:
1150 case US_TRUNCATE:
1151 case SS_NEG:
1152 case US_NEG:
1153 return 0;
1155 default:
1156 gcc_unreachable ();
1159 return gen_int_mode (val, mode);
1162 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1163 for a DImode operation on a CONST_INT. */
1164 else if (GET_MODE (op) == VOIDmode
1165 && width <= HOST_BITS_PER_WIDE_INT * 2
1166 && (GET_CODE (op) == CONST_DOUBLE
1167 || GET_CODE (op) == CONST_INT))
1169 unsigned HOST_WIDE_INT l1, lv;
1170 HOST_WIDE_INT h1, hv;
1172 if (GET_CODE (op) == CONST_DOUBLE)
1173 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1174 else
1175 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1177 switch (code)
1179 case NOT:
1180 lv = ~ l1;
1181 hv = ~ h1;
1182 break;
1184 case NEG:
1185 neg_double (l1, h1, &lv, &hv);
1186 break;
1188 case ABS:
1189 if (h1 < 0)
1190 neg_double (l1, h1, &lv, &hv);
1191 else
1192 lv = l1, hv = h1;
1193 break;
1195 case FFS:
1196 hv = 0;
1197 if (l1 == 0)
1199 if (h1 == 0)
1200 lv = 0;
1201 else
1202 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1204 else
1205 lv = exact_log2 (l1 & -l1) + 1;
1206 break;
1208 case CLZ:
1209 hv = 0;
1210 if (h1 != 0)
1211 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1212 - HOST_BITS_PER_WIDE_INT;
1213 else if (l1 != 0)
1214 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1215 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1216 lv = GET_MODE_BITSIZE (mode);
1217 break;
1219 case CTZ:
1220 hv = 0;
1221 if (l1 != 0)
1222 lv = exact_log2 (l1 & -l1);
1223 else if (h1 != 0)
1224 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1225 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1226 lv = GET_MODE_BITSIZE (mode);
1227 break;
1229 case POPCOUNT:
1230 hv = 0;
1231 lv = 0;
1232 while (l1)
1233 lv++, l1 &= l1 - 1;
1234 while (h1)
1235 lv++, h1 &= h1 - 1;
1236 break;
1238 case PARITY:
1239 hv = 0;
1240 lv = 0;
1241 while (l1)
1242 lv++, l1 &= l1 - 1;
1243 while (h1)
1244 lv++, h1 &= h1 - 1;
1245 lv &= 1;
1246 break;
1248 case BSWAP:
1250 unsigned int s;
1252 hv = 0;
1253 lv = 0;
1254 for (s = 0; s < width; s += 8)
1256 unsigned int d = width - s - 8;
1257 unsigned HOST_WIDE_INT byte;
1259 if (s < HOST_BITS_PER_WIDE_INT)
1260 byte = (l1 >> s) & 0xff;
1261 else
1262 byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1264 if (d < HOST_BITS_PER_WIDE_INT)
1265 lv |= byte << d;
1266 else
1267 hv |= byte << (d - HOST_BITS_PER_WIDE_INT);
1270 break;
1272 case TRUNCATE:
1273 /* This is just a change-of-mode, so do nothing. */
1274 lv = l1, hv = h1;
1275 break;
1277 case ZERO_EXTEND:
1278 gcc_assert (op_mode != VOIDmode);
1280 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1281 return 0;
1283 hv = 0;
1284 lv = l1 & GET_MODE_MASK (op_mode);
1285 break;
1287 case SIGN_EXTEND:
1288 if (op_mode == VOIDmode
1289 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1290 return 0;
1291 else
1293 lv = l1 & GET_MODE_MASK (op_mode);
1294 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1295 && (lv & ((HOST_WIDE_INT) 1
1296 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1297 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1299 hv = HWI_SIGN_EXTEND (lv);
1301 break;
1303 case SQRT:
1304 return 0;
1306 default:
1307 return 0;
1310 return immed_double_const (lv, hv, mode);
1313 else if (GET_CODE (op) == CONST_DOUBLE
1314 && SCALAR_FLOAT_MODE_P (mode))
1316 REAL_VALUE_TYPE d, t;
1317 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1319 switch (code)
1321 case SQRT:
1322 if (HONOR_SNANS (mode) && real_isnan (&d))
1323 return 0;
1324 real_sqrt (&t, mode, &d);
1325 d = t;
1326 break;
1327 case ABS:
1328 d = REAL_VALUE_ABS (d);
1329 break;
1330 case NEG:
1331 d = REAL_VALUE_NEGATE (d);
1332 break;
1333 case FLOAT_TRUNCATE:
1334 d = real_value_truncate (mode, d);
1335 break;
1336 case FLOAT_EXTEND:
1337 /* All this does is change the mode. */
1338 break;
1339 case FIX:
1340 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1341 break;
1342 case NOT:
1344 long tmp[4];
1345 int i;
1347 real_to_target (tmp, &d, GET_MODE (op));
1348 for (i = 0; i < 4; i++)
1349 tmp[i] = ~tmp[i];
1350 real_from_target (&d, tmp, mode);
1351 break;
1353 default:
1354 gcc_unreachable ();
1356 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1359 else if (GET_CODE (op) == CONST_DOUBLE
1360 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1361 && GET_MODE_CLASS (mode) == MODE_INT
1362 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1364 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1365 operators are intentionally left unspecified (to ease implementation
1366 by target backends), for consistency, this routine implements the
1367 same semantics for constant folding as used by the middle-end. */
1369 /* This was formerly used only for non-IEEE float.
1370 eggert@twinsun.com says it is safe for IEEE also. */
1371 HOST_WIDE_INT xh, xl, th, tl;
1372 REAL_VALUE_TYPE x, t;
1373 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1374 switch (code)
1376 case FIX:
1377 if (REAL_VALUE_ISNAN (x))
1378 return const0_rtx;
1380 /* Test against the signed upper bound. */
1381 if (width > HOST_BITS_PER_WIDE_INT)
1383 th = ((unsigned HOST_WIDE_INT) 1
1384 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1385 tl = -1;
1387 else
1389 th = 0;
1390 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1392 real_from_integer (&t, VOIDmode, tl, th, 0);
1393 if (REAL_VALUES_LESS (t, x))
1395 xh = th;
1396 xl = tl;
1397 break;
1400 /* Test against the signed lower bound. */
1401 if (width > HOST_BITS_PER_WIDE_INT)
1403 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1404 tl = 0;
1406 else
1408 th = -1;
1409 tl = (HOST_WIDE_INT) -1 << (width - 1);
1411 real_from_integer (&t, VOIDmode, tl, th, 0);
1412 if (REAL_VALUES_LESS (x, t))
1414 xh = th;
1415 xl = tl;
1416 break;
1418 REAL_VALUE_TO_INT (&xl, &xh, x);
1419 break;
1421 case UNSIGNED_FIX:
1422 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1423 return const0_rtx;
1425 /* Test against the unsigned upper bound. */
1426 if (width == 2*HOST_BITS_PER_WIDE_INT)
1428 th = -1;
1429 tl = -1;
1431 else if (width >= HOST_BITS_PER_WIDE_INT)
1433 th = ((unsigned HOST_WIDE_INT) 1
1434 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1435 tl = -1;
1437 else
1439 th = 0;
1440 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1442 real_from_integer (&t, VOIDmode, tl, th, 1);
1443 if (REAL_VALUES_LESS (t, x))
1445 xh = th;
1446 xl = tl;
1447 break;
1450 REAL_VALUE_TO_INT (&xl, &xh, x);
1451 break;
1453 default:
1454 gcc_unreachable ();
1456 return immed_double_const (xl, xh, mode);
1459 return NULL_RTX;
1462 /* Subroutine of simplify_binary_operation to simplify a commutative,
1463 associative binary operation CODE with result mode MODE, operating
1464 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1465 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1466 canonicalization is possible. */
1468 static rtx
1469 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1470 rtx op0, rtx op1)
1472 rtx tem;
1474 /* Linearize the operator to the left. */
1475 if (GET_CODE (op1) == code)
1477 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1478 if (GET_CODE (op0) == code)
1480 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1481 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1484 /* "a op (b op c)" becomes "(b op c) op a". */
1485 if (! swap_commutative_operands_p (op1, op0))
1486 return simplify_gen_binary (code, mode, op1, op0);
1488 tem = op0;
1489 op0 = op1;
1490 op1 = tem;
1493 if (GET_CODE (op0) == code)
1495 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1496 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1498 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1499 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1502 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1503 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1504 if (tem != 0)
1505 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1507 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1508 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1509 if (tem != 0)
1510 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1513 return 0;
1517 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1518 and OP1. Return 0 if no simplification is possible.
1520 Don't use this for relational operations such as EQ or LT.
1521 Use simplify_relational_operation instead. */
1523 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1524 rtx op0, rtx op1)
1526 rtx trueop0, trueop1;
1527 rtx tem;
1529 /* Relational operations don't work here. We must know the mode
1530 of the operands in order to do the comparison correctly.
1531 Assuming a full word can give incorrect results.
1532 Consider comparing 128 with -128 in QImode. */
1533 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1534 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1536 /* Make sure the constant is second. */
1537 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1538 && swap_commutative_operands_p (op0, op1))
1540 tem = op0, op0 = op1, op1 = tem;
1543 trueop0 = avoid_constant_pool_reference (op0);
1544 trueop1 = avoid_constant_pool_reference (op1);
1546 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1547 if (tem)
1548 return tem;
1549 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1552 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1553 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1554 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1555 actual constants. */
1557 static rtx
1558 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1559 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1561 rtx tem, reversed, opleft, opright;
1562 HOST_WIDE_INT val;
1563 unsigned int width = GET_MODE_BITSIZE (mode);
1565 /* Even if we can't compute a constant result,
1566 there are some cases worth simplifying. */
1568 switch (code)
1570 case PLUS:
1571 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1572 when x is NaN, infinite, or finite and nonzero. They aren't
1573 when x is -0 and the rounding mode is not towards -infinity,
1574 since (-0) + 0 is then 0. */
1575 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1576 return op0;
1578 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1579 transformations are safe even for IEEE. */
1580 if (GET_CODE (op0) == NEG)
1581 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1582 else if (GET_CODE (op1) == NEG)
1583 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1585 /* (~a) + 1 -> -a */
1586 if (INTEGRAL_MODE_P (mode)
1587 && GET_CODE (op0) == NOT
1588 && trueop1 == const1_rtx)
1589 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1591 /* Handle both-operands-constant cases. We can only add
1592 CONST_INTs to constants since the sum of relocatable symbols
1593 can't be handled by most assemblers. Don't add CONST_INT
1594 to CONST_INT since overflow won't be computed properly if wider
1595 than HOST_BITS_PER_WIDE_INT. */
1597 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1598 && GET_CODE (op1) == CONST_INT)
1599 return plus_constant (op0, INTVAL (op1));
1600 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1601 && GET_CODE (op0) == CONST_INT)
1602 return plus_constant (op1, INTVAL (op0));
1604 /* See if this is something like X * C - X or vice versa or
1605 if the multiplication is written as a shift. If so, we can
1606 distribute and make a new multiply, shift, or maybe just
1607 have X (if C is 2 in the example above). But don't make
1608 something more expensive than we had before. */
1610 if (SCALAR_INT_MODE_P (mode))
1612 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1613 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1614 rtx lhs = op0, rhs = op1;
1616 if (GET_CODE (lhs) == NEG)
1618 coeff0l = -1;
1619 coeff0h = -1;
1620 lhs = XEXP (lhs, 0);
1622 else if (GET_CODE (lhs) == MULT
1623 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1625 coeff0l = INTVAL (XEXP (lhs, 1));
1626 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1627 lhs = XEXP (lhs, 0);
1629 else if (GET_CODE (lhs) == ASHIFT
1630 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1631 && INTVAL (XEXP (lhs, 1)) >= 0
1632 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1634 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1635 coeff0h = 0;
1636 lhs = XEXP (lhs, 0);
1639 if (GET_CODE (rhs) == NEG)
1641 coeff1l = -1;
1642 coeff1h = -1;
1643 rhs = XEXP (rhs, 0);
1645 else if (GET_CODE (rhs) == MULT
1646 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1648 coeff1l = INTVAL (XEXP (rhs, 1));
1649 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1650 rhs = XEXP (rhs, 0);
1652 else if (GET_CODE (rhs) == ASHIFT
1653 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1654 && INTVAL (XEXP (rhs, 1)) >= 0
1655 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1657 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1658 coeff1h = 0;
1659 rhs = XEXP (rhs, 0);
1662 if (rtx_equal_p (lhs, rhs))
1664 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1665 rtx coeff;
1666 unsigned HOST_WIDE_INT l;
1667 HOST_WIDE_INT h;
1668 bool speed = optimize_function_for_speed_p (cfun);
1670 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1671 coeff = immed_double_const (l, h, mode);
1673 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1674 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
1675 ? tem : 0;
1679 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1680 if ((GET_CODE (op1) == CONST_INT
1681 || GET_CODE (op1) == CONST_DOUBLE)
1682 && GET_CODE (op0) == XOR
1683 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1684 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1685 && mode_signbit_p (mode, op1))
1686 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1687 simplify_gen_binary (XOR, mode, op1,
1688 XEXP (op0, 1)));
1690 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1691 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1692 && GET_CODE (op0) == MULT
1693 && GET_CODE (XEXP (op0, 0)) == NEG)
1695 rtx in1, in2;
1697 in1 = XEXP (XEXP (op0, 0), 0);
1698 in2 = XEXP (op0, 1);
1699 return simplify_gen_binary (MINUS, mode, op1,
1700 simplify_gen_binary (MULT, mode,
1701 in1, in2));
1704 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1705 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1706 is 1. */
1707 if (COMPARISON_P (op0)
1708 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1709 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1710 && (reversed = reversed_comparison (op0, mode)))
1711 return
1712 simplify_gen_unary (NEG, mode, reversed, mode);
1714 /* If one of the operands is a PLUS or a MINUS, see if we can
1715 simplify this by the associative law.
1716 Don't use the associative law for floating point.
1717 The inaccuracy makes it nonassociative,
1718 and subtle programs can break if operations are associated. */
1720 if (INTEGRAL_MODE_P (mode)
1721 && (plus_minus_operand_p (op0)
1722 || plus_minus_operand_p (op1))
1723 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1724 return tem;
1726 /* Reassociate floating point addition only when the user
1727 specifies associative math operations. */
1728 if (FLOAT_MODE_P (mode)
1729 && flag_associative_math)
1731 tem = simplify_associative_operation (code, mode, op0, op1);
1732 if (tem)
1733 return tem;
1735 break;
1737 case COMPARE:
1738 #ifdef HAVE_cc0
1739 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1740 using cc0, in which case we want to leave it as a COMPARE
1741 so we can distinguish it from a register-register-copy.
1743 In IEEE floating point, x-0 is not the same as x. */
1744 if (!(HONOR_SIGNED_ZEROS (mode)
1745 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1746 && trueop1 == CONST0_RTX (mode))
1747 return op0;
1748 #endif
1750 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1751 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1752 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1753 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1755 rtx xop00 = XEXP (op0, 0);
1756 rtx xop10 = XEXP (op1, 0);
1758 #ifdef HAVE_cc0
1759 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1760 #else
1761 if (REG_P (xop00) && REG_P (xop10)
1762 && GET_MODE (xop00) == GET_MODE (xop10)
1763 && REGNO (xop00) == REGNO (xop10)
1764 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1765 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1766 #endif
1767 return xop00;
1769 break;
1771 case MINUS:
1772 /* We can't assume x-x is 0 even with non-IEEE floating point,
1773 but since it is zero except in very strange circumstances, we
1774 will treat it as zero with -ffinite-math-only. */
1775 if (rtx_equal_p (trueop0, trueop1)
1776 && ! side_effects_p (op0)
1777 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
1778 return CONST0_RTX (mode);
1780 /* Change subtraction from zero into negation. (0 - x) is the
1781 same as -x when x is NaN, infinite, or finite and nonzero.
1782 But if the mode has signed zeros, and does not round towards
1783 -infinity, then 0 - 0 is 0, not -0. */
1784 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1785 return simplify_gen_unary (NEG, mode, op1, mode);
1787 /* (-1 - a) is ~a. */
1788 if (trueop0 == constm1_rtx)
1789 return simplify_gen_unary (NOT, mode, op1, mode);
1791 /* Subtracting 0 has no effect unless the mode has signed zeros
1792 and supports rounding towards -infinity. In such a case,
1793 0 - 0 is -0. */
1794 if (!(HONOR_SIGNED_ZEROS (mode)
1795 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1796 && trueop1 == CONST0_RTX (mode))
1797 return op0;
1799 /* See if this is something like X * C - X or vice versa or
1800 if the multiplication is written as a shift. If so, we can
1801 distribute and make a new multiply, shift, or maybe just
1802 have X (if C is 2 in the example above). But don't make
1803 something more expensive than we had before. */
1805 if (SCALAR_INT_MODE_P (mode))
1807 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1808 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1809 rtx lhs = op0, rhs = op1;
1811 if (GET_CODE (lhs) == NEG)
1813 coeff0l = -1;
1814 coeff0h = -1;
1815 lhs = XEXP (lhs, 0);
1817 else if (GET_CODE (lhs) == MULT
1818 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1820 coeff0l = INTVAL (XEXP (lhs, 1));
1821 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1822 lhs = XEXP (lhs, 0);
1824 else if (GET_CODE (lhs) == ASHIFT
1825 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1826 && INTVAL (XEXP (lhs, 1)) >= 0
1827 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1829 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1830 coeff0h = 0;
1831 lhs = XEXP (lhs, 0);
1834 if (GET_CODE (rhs) == NEG)
1836 negcoeff1l = 1;
1837 negcoeff1h = 0;
1838 rhs = XEXP (rhs, 0);
1840 else if (GET_CODE (rhs) == MULT
1841 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1843 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1844 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1845 rhs = XEXP (rhs, 0);
1847 else if (GET_CODE (rhs) == ASHIFT
1848 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1849 && INTVAL (XEXP (rhs, 1)) >= 0
1850 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1852 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1853 negcoeff1h = -1;
1854 rhs = XEXP (rhs, 0);
1857 if (rtx_equal_p (lhs, rhs))
1859 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1860 rtx coeff;
1861 unsigned HOST_WIDE_INT l;
1862 HOST_WIDE_INT h;
1863 bool speed = optimize_function_for_speed_p (cfun);
1865 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1866 coeff = immed_double_const (l, h, mode);
1868 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1869 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
1870 ? tem : 0;
1874 /* (a - (-b)) -> (a + b). True even for IEEE. */
1875 if (GET_CODE (op1) == NEG)
1876 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1878 /* (-x - c) may be simplified as (-c - x). */
1879 if (GET_CODE (op0) == NEG
1880 && (GET_CODE (op1) == CONST_INT
1881 || GET_CODE (op1) == CONST_DOUBLE))
1883 tem = simplify_unary_operation (NEG, mode, op1, mode);
1884 if (tem)
1885 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1888 /* Don't let a relocatable value get a negative coeff. */
1889 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1890 return simplify_gen_binary (PLUS, mode,
1891 op0,
1892 neg_const_int (mode, op1));
1894 /* (x - (x & y)) -> (x & ~y) */
1895 if (GET_CODE (op1) == AND)
1897 if (rtx_equal_p (op0, XEXP (op1, 0)))
1899 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1900 GET_MODE (XEXP (op1, 1)));
1901 return simplify_gen_binary (AND, mode, op0, tem);
1903 if (rtx_equal_p (op0, XEXP (op1, 1)))
1905 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1906 GET_MODE (XEXP (op1, 0)));
1907 return simplify_gen_binary (AND, mode, op0, tem);
1911 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1912 by reversing the comparison code if valid. */
1913 if (STORE_FLAG_VALUE == 1
1914 && trueop0 == const1_rtx
1915 && COMPARISON_P (op1)
1916 && (reversed = reversed_comparison (op1, mode)))
1917 return reversed;
1919 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
1920 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1921 && GET_CODE (op1) == MULT
1922 && GET_CODE (XEXP (op1, 0)) == NEG)
1924 rtx in1, in2;
1926 in1 = XEXP (XEXP (op1, 0), 0);
1927 in2 = XEXP (op1, 1);
1928 return simplify_gen_binary (PLUS, mode,
1929 simplify_gen_binary (MULT, mode,
1930 in1, in2),
1931 op0);
1934 /* Canonicalize (minus (neg A) (mult B C)) to
1935 (minus (mult (neg B) C) A). */
1936 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1937 && GET_CODE (op1) == MULT
1938 && GET_CODE (op0) == NEG)
1940 rtx in1, in2;
1942 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
1943 in2 = XEXP (op1, 1);
1944 return simplify_gen_binary (MINUS, mode,
1945 simplify_gen_binary (MULT, mode,
1946 in1, in2),
1947 XEXP (op0, 0));
1950 /* If one of the operands is a PLUS or a MINUS, see if we can
1951 simplify this by the associative law. This will, for example,
1952 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1953 Don't use the associative law for floating point.
1954 The inaccuracy makes it nonassociative,
1955 and subtle programs can break if operations are associated. */
1957 if (INTEGRAL_MODE_P (mode)
1958 && (plus_minus_operand_p (op0)
1959 || plus_minus_operand_p (op1))
1960 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1961 return tem;
1962 break;
1964 case MULT:
1965 if (trueop1 == constm1_rtx)
1966 return simplify_gen_unary (NEG, mode, op0, mode);
1968 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1969 x is NaN, since x * 0 is then also NaN. Nor is it valid
1970 when the mode has signed zeros, since multiplying a negative
1971 number by 0 will give -0, not 0. */
1972 if (!HONOR_NANS (mode)
1973 && !HONOR_SIGNED_ZEROS (mode)
1974 && trueop1 == CONST0_RTX (mode)
1975 && ! side_effects_p (op0))
1976 return op1;
1978 /* In IEEE floating point, x*1 is not equivalent to x for
1979 signalling NaNs. */
1980 if (!HONOR_SNANS (mode)
1981 && trueop1 == CONST1_RTX (mode))
1982 return op0;
1984 /* Convert multiply by constant power of two into shift unless
1985 we are still generating RTL. This test is a kludge. */
1986 if (GET_CODE (trueop1) == CONST_INT
1987 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1988 /* If the mode is larger than the host word size, and the
1989 uppermost bit is set, then this isn't a power of two due
1990 to implicit sign extension. */
1991 && (width <= HOST_BITS_PER_WIDE_INT
1992 || val != HOST_BITS_PER_WIDE_INT - 1))
1993 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1995 /* Likewise for multipliers wider than a word. */
1996 if (GET_CODE (trueop1) == CONST_DOUBLE
1997 && (GET_MODE (trueop1) == VOIDmode
1998 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
1999 && GET_MODE (op0) == mode
2000 && CONST_DOUBLE_LOW (trueop1) == 0
2001 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
2002 return simplify_gen_binary (ASHIFT, mode, op0,
2003 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2005 /* x*2 is x+x and x*(-1) is -x */
2006 if (GET_CODE (trueop1) == CONST_DOUBLE
2007 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2008 && GET_MODE (op0) == mode)
2010 REAL_VALUE_TYPE d;
2011 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2013 if (REAL_VALUES_EQUAL (d, dconst2))
2014 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2016 if (!HONOR_SNANS (mode)
2017 && REAL_VALUES_EQUAL (d, dconstm1))
2018 return simplify_gen_unary (NEG, mode, op0, mode);
2021 /* Optimize -x * -x as x * x. */
2022 if (FLOAT_MODE_P (mode)
2023 && GET_CODE (op0) == NEG
2024 && GET_CODE (op1) == NEG
2025 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2026 && !side_effects_p (XEXP (op0, 0)))
2027 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2029 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2030 if (SCALAR_FLOAT_MODE_P (mode)
2031 && GET_CODE (op0) == ABS
2032 && GET_CODE (op1) == ABS
2033 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2034 && !side_effects_p (XEXP (op0, 0)))
2035 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2037 /* Reassociate multiplication, but for floating point MULTs
2038 only when the user specifies unsafe math optimizations. */
2039 if (! FLOAT_MODE_P (mode)
2040 || flag_unsafe_math_optimizations)
2042 tem = simplify_associative_operation (code, mode, op0, op1);
2043 if (tem)
2044 return tem;
2046 break;
2048 case IOR:
2049 if (trueop1 == const0_rtx)
2050 return op0;
2051 if (GET_CODE (trueop1) == CONST_INT
2052 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2053 == GET_MODE_MASK (mode)))
2054 return op1;
2055 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2056 return op0;
2057 /* A | (~A) -> -1 */
2058 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2059 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2060 && ! side_effects_p (op0)
2061 && SCALAR_INT_MODE_P (mode))
2062 return constm1_rtx;
2064 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2065 if (GET_CODE (op1) == CONST_INT
2066 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2067 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
2068 return op1;
2070 /* Canonicalize (X & C1) | C2. */
2071 if (GET_CODE (op0) == AND
2072 && GET_CODE (trueop1) == CONST_INT
2073 && GET_CODE (XEXP (op0, 1)) == CONST_INT)
2075 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2076 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2077 HOST_WIDE_INT c2 = INTVAL (trueop1);
2079 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2080 if ((c1 & c2) == c1
2081 && !side_effects_p (XEXP (op0, 0)))
2082 return trueop1;
2084 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2085 if (((c1|c2) & mask) == mask)
2086 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2088 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2089 if (((c1 & ~c2) & mask) != (c1 & mask))
2091 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2092 gen_int_mode (c1 & ~c2, mode));
2093 return simplify_gen_binary (IOR, mode, tem, op1);
2097 /* Convert (A & B) | A to A. */
2098 if (GET_CODE (op0) == AND
2099 && (rtx_equal_p (XEXP (op0, 0), op1)
2100 || rtx_equal_p (XEXP (op0, 1), op1))
2101 && ! side_effects_p (XEXP (op0, 0))
2102 && ! side_effects_p (XEXP (op0, 1)))
2103 return op1;
2105 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2106 mode size to (rotate A CX). */
2108 if (GET_CODE (op1) == ASHIFT
2109 || GET_CODE (op1) == SUBREG)
2111 opleft = op1;
2112 opright = op0;
2114 else
2116 opright = op1;
2117 opleft = op0;
2120 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2121 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2122 && GET_CODE (XEXP (opleft, 1)) == CONST_INT
2123 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2124 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2125 == GET_MODE_BITSIZE (mode)))
2126 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2128 /* Same, but for ashift that has been "simplified" to a wider mode
2129 by simplify_shift_const. */
2131 if (GET_CODE (opleft) == SUBREG
2132 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2133 && GET_CODE (opright) == LSHIFTRT
2134 && GET_CODE (XEXP (opright, 0)) == SUBREG
2135 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2136 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2137 && (GET_MODE_SIZE (GET_MODE (opleft))
2138 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2139 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2140 SUBREG_REG (XEXP (opright, 0)))
2141 && GET_CODE (XEXP (SUBREG_REG (opleft), 1)) == CONST_INT
2142 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2143 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2144 == GET_MODE_BITSIZE (mode)))
2145 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2146 XEXP (SUBREG_REG (opleft), 1));
2148 /* If we have (ior (and (X C1) C2)), simplify this by making
2149 C1 as small as possible if C1 actually changes. */
2150 if (GET_CODE (op1) == CONST_INT
2151 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2152 || INTVAL (op1) > 0)
2153 && GET_CODE (op0) == AND
2154 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2155 && GET_CODE (op1) == CONST_INT
2156 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2157 return simplify_gen_binary (IOR, mode,
2158 simplify_gen_binary
2159 (AND, mode, XEXP (op0, 0),
2160 GEN_INT (INTVAL (XEXP (op0, 1))
2161 & ~INTVAL (op1))),
2162 op1);
2164 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2165 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2166 the PLUS does not affect any of the bits in OP1: then we can do
2167 the IOR as a PLUS and we can associate. This is valid if OP1
2168 can be safely shifted left C bits. */
2169 if (GET_CODE (trueop1) == CONST_INT && GET_CODE (op0) == ASHIFTRT
2170 && GET_CODE (XEXP (op0, 0)) == PLUS
2171 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
2172 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2173 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2175 int count = INTVAL (XEXP (op0, 1));
2176 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2178 if (mask >> count == INTVAL (trueop1)
2179 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2180 return simplify_gen_binary (ASHIFTRT, mode,
2181 plus_constant (XEXP (op0, 0), mask),
2182 XEXP (op0, 1));
2185 tem = simplify_associative_operation (code, mode, op0, op1);
2186 if (tem)
2187 return tem;
2188 break;
2190 case XOR:
2191 if (trueop1 == const0_rtx)
2192 return op0;
2193 if (GET_CODE (trueop1) == CONST_INT
2194 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2195 == GET_MODE_MASK (mode)))
2196 return simplify_gen_unary (NOT, mode, op0, mode);
2197 if (rtx_equal_p (trueop0, trueop1)
2198 && ! side_effects_p (op0)
2199 && GET_MODE_CLASS (mode) != MODE_CC)
2200 return CONST0_RTX (mode);
2202 /* Canonicalize XOR of the most significant bit to PLUS. */
2203 if ((GET_CODE (op1) == CONST_INT
2204 || GET_CODE (op1) == CONST_DOUBLE)
2205 && mode_signbit_p (mode, op1))
2206 return simplify_gen_binary (PLUS, mode, op0, op1);
2207 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2208 if ((GET_CODE (op1) == CONST_INT
2209 || GET_CODE (op1) == CONST_DOUBLE)
2210 && GET_CODE (op0) == PLUS
2211 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
2212 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2213 && mode_signbit_p (mode, XEXP (op0, 1)))
2214 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2215 simplify_gen_binary (XOR, mode, op1,
2216 XEXP (op0, 1)));
2218 /* If we are XORing two things that have no bits in common,
2219 convert them into an IOR. This helps to detect rotation encoded
2220 using those methods and possibly other simplifications. */
2222 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2223 && (nonzero_bits (op0, mode)
2224 & nonzero_bits (op1, mode)) == 0)
2225 return (simplify_gen_binary (IOR, mode, op0, op1));
2227 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2228 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2229 (NOT y). */
2231 int num_negated = 0;
2233 if (GET_CODE (op0) == NOT)
2234 num_negated++, op0 = XEXP (op0, 0);
2235 if (GET_CODE (op1) == NOT)
2236 num_negated++, op1 = XEXP (op1, 0);
2238 if (num_negated == 2)
2239 return simplify_gen_binary (XOR, mode, op0, op1);
2240 else if (num_negated == 1)
2241 return simplify_gen_unary (NOT, mode,
2242 simplify_gen_binary (XOR, mode, op0, op1),
2243 mode);
2246 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2247 correspond to a machine insn or result in further simplifications
2248 if B is a constant. */
2250 if (GET_CODE (op0) == AND
2251 && rtx_equal_p (XEXP (op0, 1), op1)
2252 && ! side_effects_p (op1))
2253 return simplify_gen_binary (AND, mode,
2254 simplify_gen_unary (NOT, mode,
2255 XEXP (op0, 0), mode),
2256 op1);
2258 else if (GET_CODE (op0) == AND
2259 && rtx_equal_p (XEXP (op0, 0), op1)
2260 && ! side_effects_p (op1))
2261 return simplify_gen_binary (AND, mode,
2262 simplify_gen_unary (NOT, mode,
2263 XEXP (op0, 1), mode),
2264 op1);
2266 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2267 comparison if STORE_FLAG_VALUE is 1. */
2268 if (STORE_FLAG_VALUE == 1
2269 && trueop1 == const1_rtx
2270 && COMPARISON_P (op0)
2271 && (reversed = reversed_comparison (op0, mode)))
2272 return reversed;
2274 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2275 is (lt foo (const_int 0)), so we can perform the above
2276 simplification if STORE_FLAG_VALUE is 1. */
2278 if (STORE_FLAG_VALUE == 1
2279 && trueop1 == const1_rtx
2280 && GET_CODE (op0) == LSHIFTRT
2281 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2282 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2283 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2285 /* (xor (comparison foo bar) (const_int sign-bit))
2286 when STORE_FLAG_VALUE is the sign bit. */
2287 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2288 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2289 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2290 && trueop1 == const_true_rtx
2291 && COMPARISON_P (op0)
2292 && (reversed = reversed_comparison (op0, mode)))
2293 return reversed;
2295 tem = simplify_associative_operation (code, mode, op0, op1);
2296 if (tem)
2297 return tem;
2298 break;
2300 case AND:
2301 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2302 return trueop1;
2303 /* If we are turning off bits already known off in OP0, we need
2304 not do an AND. */
2305 if (GET_CODE (trueop1) == CONST_INT
2306 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2307 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
2308 return op0;
2309 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2310 && GET_MODE_CLASS (mode) != MODE_CC)
2311 return op0;
2312 /* A & (~A) -> 0 */
2313 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2314 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2315 && ! side_effects_p (op0)
2316 && GET_MODE_CLASS (mode) != MODE_CC)
2317 return CONST0_RTX (mode);
2319 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2320 there are no nonzero bits of C outside of X's mode. */
2321 if ((GET_CODE (op0) == SIGN_EXTEND
2322 || GET_CODE (op0) == ZERO_EXTEND)
2323 && GET_CODE (trueop1) == CONST_INT
2324 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2325 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2326 & INTVAL (trueop1)) == 0)
2328 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2329 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2330 gen_int_mode (INTVAL (trueop1),
2331 imode));
2332 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2335 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2336 if (GET_CODE (op0) == IOR
2337 && GET_CODE (trueop1) == CONST_INT
2338 && GET_CODE (XEXP (op0, 1)) == CONST_INT)
2340 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2341 return simplify_gen_binary (IOR, mode,
2342 simplify_gen_binary (AND, mode,
2343 XEXP (op0, 0), op1),
2344 gen_int_mode (tmp, mode));
2347 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2348 insn (and may simplify more). */
2349 if (GET_CODE (op0) == XOR
2350 && rtx_equal_p (XEXP (op0, 0), op1)
2351 && ! side_effects_p (op1))
2352 return simplify_gen_binary (AND, mode,
2353 simplify_gen_unary (NOT, mode,
2354 XEXP (op0, 1), mode),
2355 op1);
2357 if (GET_CODE (op0) == XOR
2358 && rtx_equal_p (XEXP (op0, 1), op1)
2359 && ! side_effects_p (op1))
2360 return simplify_gen_binary (AND, mode,
2361 simplify_gen_unary (NOT, mode,
2362 XEXP (op0, 0), mode),
2363 op1);
2365 /* Similarly for (~(A ^ B)) & A. */
2366 if (GET_CODE (op0) == NOT
2367 && GET_CODE (XEXP (op0, 0)) == XOR
2368 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2369 && ! side_effects_p (op1))
2370 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2372 if (GET_CODE (op0) == NOT
2373 && GET_CODE (XEXP (op0, 0)) == XOR
2374 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2375 && ! side_effects_p (op1))
2376 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2378 /* Convert (A | B) & A to A. */
2379 if (GET_CODE (op0) == IOR
2380 && (rtx_equal_p (XEXP (op0, 0), op1)
2381 || rtx_equal_p (XEXP (op0, 1), op1))
2382 && ! side_effects_p (XEXP (op0, 0))
2383 && ! side_effects_p (XEXP (op0, 1)))
2384 return op1;
2386 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2387 ((A & N) + B) & M -> (A + B) & M
2388 Similarly if (N & M) == 0,
2389 ((A | N) + B) & M -> (A + B) & M
2390 and for - instead of + and/or ^ instead of |. */
2391 if (GET_CODE (trueop1) == CONST_INT
2392 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2393 && ~INTVAL (trueop1)
2394 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2395 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2397 rtx pmop[2];
2398 int which;
2400 pmop[0] = XEXP (op0, 0);
2401 pmop[1] = XEXP (op0, 1);
2403 for (which = 0; which < 2; which++)
2405 tem = pmop[which];
2406 switch (GET_CODE (tem))
2408 case AND:
2409 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2410 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2411 == INTVAL (trueop1))
2412 pmop[which] = XEXP (tem, 0);
2413 break;
2414 case IOR:
2415 case XOR:
2416 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2417 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2418 pmop[which] = XEXP (tem, 0);
2419 break;
2420 default:
2421 break;
2425 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2427 tem = simplify_gen_binary (GET_CODE (op0), mode,
2428 pmop[0], pmop[1]);
2429 return simplify_gen_binary (code, mode, tem, op1);
2433 /* (and X (ior (not X) Y) -> (and X Y) */
2434 if (GET_CODE (op1) == IOR
2435 && GET_CODE (XEXP (op1, 0)) == NOT
2436 && op0 == XEXP (XEXP (op1, 0), 0))
2437 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2439 /* (and (ior (not X) Y) X) -> (and X Y) */
2440 if (GET_CODE (op0) == IOR
2441 && GET_CODE (XEXP (op0, 0)) == NOT
2442 && op1 == XEXP (XEXP (op0, 0), 0))
2443 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2445 tem = simplify_associative_operation (code, mode, op0, op1);
2446 if (tem)
2447 return tem;
2448 break;
2450 case UDIV:
2451 /* 0/x is 0 (or x&0 if x has side-effects). */
2452 if (trueop0 == CONST0_RTX (mode))
2454 if (side_effects_p (op1))
2455 return simplify_gen_binary (AND, mode, op1, trueop0);
2456 return trueop0;
2458 /* x/1 is x. */
2459 if (trueop1 == CONST1_RTX (mode))
2460 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2461 /* Convert divide by power of two into shift. */
2462 if (GET_CODE (trueop1) == CONST_INT
2463 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2464 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2465 break;
2467 case DIV:
2468 /* Handle floating point and integers separately. */
2469 if (SCALAR_FLOAT_MODE_P (mode))
2471 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2472 safe for modes with NaNs, since 0.0 / 0.0 will then be
2473 NaN rather than 0.0. Nor is it safe for modes with signed
2474 zeros, since dividing 0 by a negative number gives -0.0 */
2475 if (trueop0 == CONST0_RTX (mode)
2476 && !HONOR_NANS (mode)
2477 && !HONOR_SIGNED_ZEROS (mode)
2478 && ! side_effects_p (op1))
2479 return op0;
2480 /* x/1.0 is x. */
2481 if (trueop1 == CONST1_RTX (mode)
2482 && !HONOR_SNANS (mode))
2483 return op0;
2485 if (GET_CODE (trueop1) == CONST_DOUBLE
2486 && trueop1 != CONST0_RTX (mode))
2488 REAL_VALUE_TYPE d;
2489 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2491 /* x/-1.0 is -x. */
2492 if (REAL_VALUES_EQUAL (d, dconstm1)
2493 && !HONOR_SNANS (mode))
2494 return simplify_gen_unary (NEG, mode, op0, mode);
2496 /* Change FP division by a constant into multiplication.
2497 Only do this with -freciprocal-math. */
2498 if (flag_reciprocal_math
2499 && !REAL_VALUES_EQUAL (d, dconst0))
2501 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2502 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2503 return simplify_gen_binary (MULT, mode, op0, tem);
2507 else
2509 /* 0/x is 0 (or x&0 if x has side-effects). */
2510 if (trueop0 == CONST0_RTX (mode))
2512 if (side_effects_p (op1))
2513 return simplify_gen_binary (AND, mode, op1, trueop0);
2514 return trueop0;
2516 /* x/1 is x. */
2517 if (trueop1 == CONST1_RTX (mode))
2518 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2519 /* x/-1 is -x. */
2520 if (trueop1 == constm1_rtx)
2522 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2523 return simplify_gen_unary (NEG, mode, x, mode);
2526 break;
2528 case UMOD:
2529 /* 0%x is 0 (or x&0 if x has side-effects). */
2530 if (trueop0 == CONST0_RTX (mode))
2532 if (side_effects_p (op1))
2533 return simplify_gen_binary (AND, mode, op1, trueop0);
2534 return trueop0;
2536 /* x%1 is 0 (of x&0 if x has side-effects). */
2537 if (trueop1 == CONST1_RTX (mode))
2539 if (side_effects_p (op0))
2540 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2541 return CONST0_RTX (mode);
2543 /* Implement modulus by power of two as AND. */
2544 if (GET_CODE (trueop1) == CONST_INT
2545 && exact_log2 (INTVAL (trueop1)) > 0)
2546 return simplify_gen_binary (AND, mode, op0,
2547 GEN_INT (INTVAL (op1) - 1));
2548 break;
2550 case MOD:
2551 /* 0%x is 0 (or x&0 if x has side-effects). */
2552 if (trueop0 == CONST0_RTX (mode))
2554 if (side_effects_p (op1))
2555 return simplify_gen_binary (AND, mode, op1, trueop0);
2556 return trueop0;
2558 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2559 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2561 if (side_effects_p (op0))
2562 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2563 return CONST0_RTX (mode);
2565 break;
2567 case ROTATERT:
2568 case ROTATE:
2569 case ASHIFTRT:
2570 if (trueop1 == CONST0_RTX (mode))
2571 return op0;
2572 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2573 return op0;
2574 /* Rotating ~0 always results in ~0. */
2575 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2576 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2577 && ! side_effects_p (op1))
2578 return op0;
2579 canonicalize_shift:
2580 if (SHIFT_COUNT_TRUNCATED && GET_CODE (op1) == CONST_INT)
2582 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
2583 if (val != INTVAL (op1))
2584 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
2586 break;
2588 case ASHIFT:
2589 case SS_ASHIFT:
2590 case US_ASHIFT:
2591 if (trueop1 == CONST0_RTX (mode))
2592 return op0;
2593 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2594 return op0;
2595 goto canonicalize_shift;
2597 case LSHIFTRT:
2598 if (trueop1 == CONST0_RTX (mode))
2599 return op0;
2600 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2601 return op0;
2602 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2603 if (GET_CODE (op0) == CLZ
2604 && GET_CODE (trueop1) == CONST_INT
2605 && STORE_FLAG_VALUE == 1
2606 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2608 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2609 unsigned HOST_WIDE_INT zero_val = 0;
2611 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2612 && zero_val == GET_MODE_BITSIZE (imode)
2613 && INTVAL (trueop1) == exact_log2 (zero_val))
2614 return simplify_gen_relational (EQ, mode, imode,
2615 XEXP (op0, 0), const0_rtx);
2617 goto canonicalize_shift;
2619 case SMIN:
2620 if (width <= HOST_BITS_PER_WIDE_INT
2621 && GET_CODE (trueop1) == CONST_INT
2622 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2623 && ! side_effects_p (op0))
2624 return op1;
2625 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2626 return op0;
2627 tem = simplify_associative_operation (code, mode, op0, op1);
2628 if (tem)
2629 return tem;
2630 break;
2632 case SMAX:
2633 if (width <= HOST_BITS_PER_WIDE_INT
2634 && GET_CODE (trueop1) == CONST_INT
2635 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2636 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2637 && ! side_effects_p (op0))
2638 return op1;
2639 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2640 return op0;
2641 tem = simplify_associative_operation (code, mode, op0, op1);
2642 if (tem)
2643 return tem;
2644 break;
2646 case UMIN:
2647 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2648 return op1;
2649 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2650 return op0;
2651 tem = simplify_associative_operation (code, mode, op0, op1);
2652 if (tem)
2653 return tem;
2654 break;
2656 case UMAX:
2657 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2658 return op1;
2659 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2660 return op0;
2661 tem = simplify_associative_operation (code, mode, op0, op1);
2662 if (tem)
2663 return tem;
2664 break;
2666 case SS_PLUS:
2667 case US_PLUS:
2668 case SS_MINUS:
2669 case US_MINUS:
2670 case SS_MULT:
2671 case US_MULT:
2672 case SS_DIV:
2673 case US_DIV:
2674 /* ??? There are simplifications that can be done. */
2675 return 0;
2677 case VEC_SELECT:
2678 if (!VECTOR_MODE_P (mode))
2680 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2681 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2682 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2683 gcc_assert (XVECLEN (trueop1, 0) == 1);
2684 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2686 if (GET_CODE (trueop0) == CONST_VECTOR)
2687 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2688 (trueop1, 0, 0)));
2690 /* Extract a scalar element from a nested VEC_SELECT expression
2691 (with optional nested VEC_CONCAT expression). Some targets
2692 (i386) extract scalar element from a vector using chain of
2693 nested VEC_SELECT expressions. When input operand is a memory
2694 operand, this operation can be simplified to a simple scalar
2695 load from an offseted memory address. */
2696 if (GET_CODE (trueop0) == VEC_SELECT)
2698 rtx op0 = XEXP (trueop0, 0);
2699 rtx op1 = XEXP (trueop0, 1);
2701 enum machine_mode opmode = GET_MODE (op0);
2702 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
2703 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
2705 int i = INTVAL (XVECEXP (trueop1, 0, 0));
2706 int elem;
2708 rtvec vec;
2709 rtx tmp_op, tmp;
2711 gcc_assert (GET_CODE (op1) == PARALLEL);
2712 gcc_assert (i < n_elts);
2714 /* Select element, pointed by nested selector. */
2715 elem = INTVAL (XVECEXP (op1, 0, i));
2717 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
2718 if (GET_CODE (op0) == VEC_CONCAT)
2720 rtx op00 = XEXP (op0, 0);
2721 rtx op01 = XEXP (op0, 1);
2723 enum machine_mode mode00, mode01;
2724 int n_elts00, n_elts01;
2726 mode00 = GET_MODE (op00);
2727 mode01 = GET_MODE (op01);
2729 /* Find out number of elements of each operand. */
2730 if (VECTOR_MODE_P (mode00))
2732 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
2733 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
2735 else
2736 n_elts00 = 1;
2738 if (VECTOR_MODE_P (mode01))
2740 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
2741 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
2743 else
2744 n_elts01 = 1;
2746 gcc_assert (n_elts == n_elts00 + n_elts01);
2748 /* Select correct operand of VEC_CONCAT
2749 and adjust selector. */
2750 if (elem < n_elts01)
2751 tmp_op = op00;
2752 else
2754 tmp_op = op01;
2755 elem -= n_elts00;
2758 else
2759 tmp_op = op0;
2761 vec = rtvec_alloc (1);
2762 RTVEC_ELT (vec, 0) = GEN_INT (elem);
2764 tmp = gen_rtx_fmt_ee (code, mode,
2765 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
2766 return tmp;
2769 else
2771 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2772 gcc_assert (GET_MODE_INNER (mode)
2773 == GET_MODE_INNER (GET_MODE (trueop0)));
2774 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2776 if (GET_CODE (trueop0) == CONST_VECTOR)
2778 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2779 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2780 rtvec v = rtvec_alloc (n_elts);
2781 unsigned int i;
2783 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2784 for (i = 0; i < n_elts; i++)
2786 rtx x = XVECEXP (trueop1, 0, i);
2788 gcc_assert (GET_CODE (x) == CONST_INT);
2789 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2790 INTVAL (x));
2793 return gen_rtx_CONST_VECTOR (mode, v);
2797 if (XVECLEN (trueop1, 0) == 1
2798 && GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT
2799 && GET_CODE (trueop0) == VEC_CONCAT)
2801 rtx vec = trueop0;
2802 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2804 /* Try to find the element in the VEC_CONCAT. */
2805 while (GET_MODE (vec) != mode
2806 && GET_CODE (vec) == VEC_CONCAT)
2808 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2809 if (offset < vec_size)
2810 vec = XEXP (vec, 0);
2811 else
2813 offset -= vec_size;
2814 vec = XEXP (vec, 1);
2816 vec = avoid_constant_pool_reference (vec);
2819 if (GET_MODE (vec) == mode)
2820 return vec;
2823 return 0;
2824 case VEC_CONCAT:
2826 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2827 ? GET_MODE (trueop0)
2828 : GET_MODE_INNER (mode));
2829 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2830 ? GET_MODE (trueop1)
2831 : GET_MODE_INNER (mode));
2833 gcc_assert (VECTOR_MODE_P (mode));
2834 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2835 == GET_MODE_SIZE (mode));
2837 if (VECTOR_MODE_P (op0_mode))
2838 gcc_assert (GET_MODE_INNER (mode)
2839 == GET_MODE_INNER (op0_mode));
2840 else
2841 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2843 if (VECTOR_MODE_P (op1_mode))
2844 gcc_assert (GET_MODE_INNER (mode)
2845 == GET_MODE_INNER (op1_mode));
2846 else
2847 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2849 if ((GET_CODE (trueop0) == CONST_VECTOR
2850 || GET_CODE (trueop0) == CONST_INT
2851 || GET_CODE (trueop0) == CONST_DOUBLE)
2852 && (GET_CODE (trueop1) == CONST_VECTOR
2853 || GET_CODE (trueop1) == CONST_INT
2854 || GET_CODE (trueop1) == CONST_DOUBLE))
2856 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2857 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2858 rtvec v = rtvec_alloc (n_elts);
2859 unsigned int i;
2860 unsigned in_n_elts = 1;
2862 if (VECTOR_MODE_P (op0_mode))
2863 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2864 for (i = 0; i < n_elts; i++)
2866 if (i < in_n_elts)
2868 if (!VECTOR_MODE_P (op0_mode))
2869 RTVEC_ELT (v, i) = trueop0;
2870 else
2871 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2873 else
2875 if (!VECTOR_MODE_P (op1_mode))
2876 RTVEC_ELT (v, i) = trueop1;
2877 else
2878 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2879 i - in_n_elts);
2883 return gen_rtx_CONST_VECTOR (mode, v);
2886 return 0;
2888 default:
2889 gcc_unreachable ();
2892 return 0;
2896 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
2897 rtx op0, rtx op1)
2899 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
2900 HOST_WIDE_INT val;
2901 unsigned int width = GET_MODE_BITSIZE (mode);
2903 if (VECTOR_MODE_P (mode)
2904 && code != VEC_CONCAT
2905 && GET_CODE (op0) == CONST_VECTOR
2906 && GET_CODE (op1) == CONST_VECTOR)
2908 unsigned n_elts = GET_MODE_NUNITS (mode);
2909 enum machine_mode op0mode = GET_MODE (op0);
2910 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2911 enum machine_mode op1mode = GET_MODE (op1);
2912 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2913 rtvec v = rtvec_alloc (n_elts);
2914 unsigned int i;
2916 gcc_assert (op0_n_elts == n_elts);
2917 gcc_assert (op1_n_elts == n_elts);
2918 for (i = 0; i < n_elts; i++)
2920 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2921 CONST_VECTOR_ELT (op0, i),
2922 CONST_VECTOR_ELT (op1, i));
2923 if (!x)
2924 return 0;
2925 RTVEC_ELT (v, i) = x;
2928 return gen_rtx_CONST_VECTOR (mode, v);
2931 if (VECTOR_MODE_P (mode)
2932 && code == VEC_CONCAT
2933 && (CONST_INT_P (op0)
2934 || GET_CODE (op0) == CONST_DOUBLE
2935 || GET_CODE (op0) == CONST_FIXED)
2936 && (CONST_INT_P (op1)
2937 || GET_CODE (op1) == CONST_DOUBLE
2938 || GET_CODE (op1) == CONST_FIXED))
2940 unsigned n_elts = GET_MODE_NUNITS (mode);
2941 rtvec v = rtvec_alloc (n_elts);
2943 gcc_assert (n_elts >= 2);
2944 if (n_elts == 2)
2946 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2947 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2949 RTVEC_ELT (v, 0) = op0;
2950 RTVEC_ELT (v, 1) = op1;
2952 else
2954 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2955 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2956 unsigned i;
2958 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2959 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2960 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2962 for (i = 0; i < op0_n_elts; ++i)
2963 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2964 for (i = 0; i < op1_n_elts; ++i)
2965 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2968 return gen_rtx_CONST_VECTOR (mode, v);
2971 if (SCALAR_FLOAT_MODE_P (mode)
2972 && GET_CODE (op0) == CONST_DOUBLE
2973 && GET_CODE (op1) == CONST_DOUBLE
2974 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2976 if (code == AND
2977 || code == IOR
2978 || code == XOR)
2980 long tmp0[4];
2981 long tmp1[4];
2982 REAL_VALUE_TYPE r;
2983 int i;
2985 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
2986 GET_MODE (op0));
2987 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
2988 GET_MODE (op1));
2989 for (i = 0; i < 4; i++)
2991 switch (code)
2993 case AND:
2994 tmp0[i] &= tmp1[i];
2995 break;
2996 case IOR:
2997 tmp0[i] |= tmp1[i];
2998 break;
2999 case XOR:
3000 tmp0[i] ^= tmp1[i];
3001 break;
3002 default:
3003 gcc_unreachable ();
3006 real_from_target (&r, tmp0, mode);
3007 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3009 else
3011 REAL_VALUE_TYPE f0, f1, value, result;
3012 bool inexact;
3014 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3015 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3016 real_convert (&f0, mode, &f0);
3017 real_convert (&f1, mode, &f1);
3019 if (HONOR_SNANS (mode)
3020 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3021 return 0;
3023 if (code == DIV
3024 && REAL_VALUES_EQUAL (f1, dconst0)
3025 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3026 return 0;
3028 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3029 && flag_trapping_math
3030 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3032 int s0 = REAL_VALUE_NEGATIVE (f0);
3033 int s1 = REAL_VALUE_NEGATIVE (f1);
3035 switch (code)
3037 case PLUS:
3038 /* Inf + -Inf = NaN plus exception. */
3039 if (s0 != s1)
3040 return 0;
3041 break;
3042 case MINUS:
3043 /* Inf - Inf = NaN plus exception. */
3044 if (s0 == s1)
3045 return 0;
3046 break;
3047 case DIV:
3048 /* Inf / Inf = NaN plus exception. */
3049 return 0;
3050 default:
3051 break;
3055 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3056 && flag_trapping_math
3057 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3058 || (REAL_VALUE_ISINF (f1)
3059 && REAL_VALUES_EQUAL (f0, dconst0))))
3060 /* Inf * 0 = NaN plus exception. */
3061 return 0;
3063 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3064 &f0, &f1);
3065 real_convert (&result, mode, &value);
3067 /* Don't constant fold this floating point operation if
3068 the result has overflowed and flag_trapping_math. */
3070 if (flag_trapping_math
3071 && MODE_HAS_INFINITIES (mode)
3072 && REAL_VALUE_ISINF (result)
3073 && !REAL_VALUE_ISINF (f0)
3074 && !REAL_VALUE_ISINF (f1))
3075 /* Overflow plus exception. */
3076 return 0;
3078 /* Don't constant fold this floating point operation if the
3079 result may dependent upon the run-time rounding mode and
3080 flag_rounding_math is set, or if GCC's software emulation
3081 is unable to accurately represent the result. */
3083 if ((flag_rounding_math
3084 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3085 && (inexact || !real_identical (&result, &value)))
3086 return NULL_RTX;
3088 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3092 /* We can fold some multi-word operations. */
3093 if (GET_MODE_CLASS (mode) == MODE_INT
3094 && width == HOST_BITS_PER_WIDE_INT * 2
3095 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
3096 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
3098 unsigned HOST_WIDE_INT l1, l2, lv, lt;
3099 HOST_WIDE_INT h1, h2, hv, ht;
3101 if (GET_CODE (op0) == CONST_DOUBLE)
3102 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
3103 else
3104 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
3106 if (GET_CODE (op1) == CONST_DOUBLE)
3107 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
3108 else
3109 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
3111 switch (code)
3113 case MINUS:
3114 /* A - B == A + (-B). */
3115 neg_double (l2, h2, &lv, &hv);
3116 l2 = lv, h2 = hv;
3118 /* Fall through.... */
3120 case PLUS:
3121 add_double (l1, h1, l2, h2, &lv, &hv);
3122 break;
3124 case MULT:
3125 mul_double (l1, h1, l2, h2, &lv, &hv);
3126 break;
3128 case DIV:
3129 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3130 &lv, &hv, &lt, &ht))
3131 return 0;
3132 break;
3134 case MOD:
3135 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3136 &lt, &ht, &lv, &hv))
3137 return 0;
3138 break;
3140 case UDIV:
3141 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3142 &lv, &hv, &lt, &ht))
3143 return 0;
3144 break;
3146 case UMOD:
3147 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3148 &lt, &ht, &lv, &hv))
3149 return 0;
3150 break;
3152 case AND:
3153 lv = l1 & l2, hv = h1 & h2;
3154 break;
3156 case IOR:
3157 lv = l1 | l2, hv = h1 | h2;
3158 break;
3160 case XOR:
3161 lv = l1 ^ l2, hv = h1 ^ h2;
3162 break;
3164 case SMIN:
3165 if (h1 < h2
3166 || (h1 == h2
3167 && ((unsigned HOST_WIDE_INT) l1
3168 < (unsigned HOST_WIDE_INT) l2)))
3169 lv = l1, hv = h1;
3170 else
3171 lv = l2, hv = h2;
3172 break;
3174 case SMAX:
3175 if (h1 > h2
3176 || (h1 == h2
3177 && ((unsigned HOST_WIDE_INT) l1
3178 > (unsigned HOST_WIDE_INT) l2)))
3179 lv = l1, hv = h1;
3180 else
3181 lv = l2, hv = h2;
3182 break;
3184 case UMIN:
3185 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
3186 || (h1 == h2
3187 && ((unsigned HOST_WIDE_INT) l1
3188 < (unsigned HOST_WIDE_INT) l2)))
3189 lv = l1, hv = h1;
3190 else
3191 lv = l2, hv = h2;
3192 break;
3194 case UMAX:
3195 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
3196 || (h1 == h2
3197 && ((unsigned HOST_WIDE_INT) l1
3198 > (unsigned HOST_WIDE_INT) l2)))
3199 lv = l1, hv = h1;
3200 else
3201 lv = l2, hv = h2;
3202 break;
3204 case LSHIFTRT: case ASHIFTRT:
3205 case ASHIFT:
3206 case ROTATE: case ROTATERT:
3207 if (SHIFT_COUNT_TRUNCATED)
3208 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
3210 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
3211 return 0;
3213 if (code == LSHIFTRT || code == ASHIFTRT)
3214 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
3215 code == ASHIFTRT);
3216 else if (code == ASHIFT)
3217 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
3218 else if (code == ROTATE)
3219 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3220 else /* code == ROTATERT */
3221 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3222 break;
3224 default:
3225 return 0;
3228 return immed_double_const (lv, hv, mode);
3231 if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
3232 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3234 /* Get the integer argument values in two forms:
3235 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3237 arg0 = INTVAL (op0);
3238 arg1 = INTVAL (op1);
3240 if (width < HOST_BITS_PER_WIDE_INT)
3242 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
3243 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
3245 arg0s = arg0;
3246 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3247 arg0s |= ((HOST_WIDE_INT) (-1) << width);
3249 arg1s = arg1;
3250 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3251 arg1s |= ((HOST_WIDE_INT) (-1) << width);
3253 else
3255 arg0s = arg0;
3256 arg1s = arg1;
3259 /* Compute the value of the arithmetic. */
3261 switch (code)
3263 case PLUS:
3264 val = arg0s + arg1s;
3265 break;
3267 case MINUS:
3268 val = arg0s - arg1s;
3269 break;
3271 case MULT:
3272 val = arg0s * arg1s;
3273 break;
3275 case DIV:
3276 if (arg1s == 0
3277 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3278 && arg1s == -1))
3279 return 0;
3280 val = arg0s / arg1s;
3281 break;
3283 case MOD:
3284 if (arg1s == 0
3285 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3286 && arg1s == -1))
3287 return 0;
3288 val = arg0s % arg1s;
3289 break;
3291 case UDIV:
3292 if (arg1 == 0
3293 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3294 && arg1s == -1))
3295 return 0;
3296 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3297 break;
3299 case UMOD:
3300 if (arg1 == 0
3301 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3302 && arg1s == -1))
3303 return 0;
3304 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3305 break;
3307 case AND:
3308 val = arg0 & arg1;
3309 break;
3311 case IOR:
3312 val = arg0 | arg1;
3313 break;
3315 case XOR:
3316 val = arg0 ^ arg1;
3317 break;
3319 case LSHIFTRT:
3320 case ASHIFT:
3321 case ASHIFTRT:
3322 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3323 the value is in range. We can't return any old value for
3324 out-of-range arguments because either the middle-end (via
3325 shift_truncation_mask) or the back-end might be relying on
3326 target-specific knowledge. Nor can we rely on
3327 shift_truncation_mask, since the shift might not be part of an
3328 ashlM3, lshrM3 or ashrM3 instruction. */
3329 if (SHIFT_COUNT_TRUNCATED)
3330 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3331 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3332 return 0;
3334 val = (code == ASHIFT
3335 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3336 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3338 /* Sign-extend the result for arithmetic right shifts. */
3339 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3340 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3341 break;
3343 case ROTATERT:
3344 if (arg1 < 0)
3345 return 0;
3347 arg1 %= width;
3348 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3349 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3350 break;
3352 case ROTATE:
3353 if (arg1 < 0)
3354 return 0;
3356 arg1 %= width;
3357 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3358 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3359 break;
3361 case COMPARE:
3362 /* Do nothing here. */
3363 return 0;
3365 case SMIN:
3366 val = arg0s <= arg1s ? arg0s : arg1s;
3367 break;
3369 case UMIN:
3370 val = ((unsigned HOST_WIDE_INT) arg0
3371 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3372 break;
3374 case SMAX:
3375 val = arg0s > arg1s ? arg0s : arg1s;
3376 break;
3378 case UMAX:
3379 val = ((unsigned HOST_WIDE_INT) arg0
3380 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3381 break;
3383 case SS_PLUS:
3384 case US_PLUS:
3385 case SS_MINUS:
3386 case US_MINUS:
3387 case SS_MULT:
3388 case US_MULT:
3389 case SS_DIV:
3390 case US_DIV:
3391 case SS_ASHIFT:
3392 case US_ASHIFT:
3393 /* ??? There are simplifications that can be done. */
3394 return 0;
3396 default:
3397 gcc_unreachable ();
3400 return gen_int_mode (val, mode);
3403 return NULL_RTX;
3408 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3409 PLUS or MINUS.
3411 Rather than test for specific case, we do this by a brute-force method
3412 and do all possible simplifications until no more changes occur. Then
3413 we rebuild the operation. */
3415 struct simplify_plus_minus_op_data
3417 rtx op;
3418 short neg;
3421 static bool
3422 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3424 int result;
3426 result = (commutative_operand_precedence (y)
3427 - commutative_operand_precedence (x));
3428 if (result)
3429 return result > 0;
3431 /* Group together equal REGs to do more simplification. */
3432 if (REG_P (x) && REG_P (y))
3433 return REGNO (x) > REGNO (y);
3434 else
3435 return false;
3438 static rtx
3439 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3440 rtx op1)
3442 struct simplify_plus_minus_op_data ops[8];
3443 rtx result, tem;
3444 int n_ops = 2, input_ops = 2;
3445 int changed, n_constants = 0, canonicalized = 0;
3446 int i, j;
3448 memset (ops, 0, sizeof ops);
3450 /* Set up the two operands and then expand them until nothing has been
3451 changed. If we run out of room in our array, give up; this should
3452 almost never happen. */
3454 ops[0].op = op0;
3455 ops[0].neg = 0;
3456 ops[1].op = op1;
3457 ops[1].neg = (code == MINUS);
3461 changed = 0;
3463 for (i = 0; i < n_ops; i++)
3465 rtx this_op = ops[i].op;
3466 int this_neg = ops[i].neg;
3467 enum rtx_code this_code = GET_CODE (this_op);
3469 switch (this_code)
3471 case PLUS:
3472 case MINUS:
3473 if (n_ops == 7)
3474 return NULL_RTX;
3476 ops[n_ops].op = XEXP (this_op, 1);
3477 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3478 n_ops++;
3480 ops[i].op = XEXP (this_op, 0);
3481 input_ops++;
3482 changed = 1;
3483 canonicalized |= this_neg;
3484 break;
3486 case NEG:
3487 ops[i].op = XEXP (this_op, 0);
3488 ops[i].neg = ! this_neg;
3489 changed = 1;
3490 canonicalized = 1;
3491 break;
3493 case CONST:
3494 if (n_ops < 7
3495 && GET_CODE (XEXP (this_op, 0)) == PLUS
3496 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3497 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3499 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3500 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3501 ops[n_ops].neg = this_neg;
3502 n_ops++;
3503 changed = 1;
3504 canonicalized = 1;
3506 break;
3508 case NOT:
3509 /* ~a -> (-a - 1) */
3510 if (n_ops != 7)
3512 ops[n_ops].op = constm1_rtx;
3513 ops[n_ops++].neg = this_neg;
3514 ops[i].op = XEXP (this_op, 0);
3515 ops[i].neg = !this_neg;
3516 changed = 1;
3517 canonicalized = 1;
3519 break;
3521 case CONST_INT:
3522 n_constants++;
3523 if (this_neg)
3525 ops[i].op = neg_const_int (mode, this_op);
3526 ops[i].neg = 0;
3527 changed = 1;
3528 canonicalized = 1;
3530 break;
3532 default:
3533 break;
3537 while (changed);
3539 if (n_constants > 1)
3540 canonicalized = 1;
3542 gcc_assert (n_ops >= 2);
3544 /* If we only have two operands, we can avoid the loops. */
3545 if (n_ops == 2)
3547 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3548 rtx lhs, rhs;
3550 /* Get the two operands. Be careful with the order, especially for
3551 the cases where code == MINUS. */
3552 if (ops[0].neg && ops[1].neg)
3554 lhs = gen_rtx_NEG (mode, ops[0].op);
3555 rhs = ops[1].op;
3557 else if (ops[0].neg)
3559 lhs = ops[1].op;
3560 rhs = ops[0].op;
3562 else
3564 lhs = ops[0].op;
3565 rhs = ops[1].op;
3568 return simplify_const_binary_operation (code, mode, lhs, rhs);
3571 /* Now simplify each pair of operands until nothing changes. */
3574 /* Insertion sort is good enough for an eight-element array. */
3575 for (i = 1; i < n_ops; i++)
3577 struct simplify_plus_minus_op_data save;
3578 j = i - 1;
3579 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
3580 continue;
3582 canonicalized = 1;
3583 save = ops[i];
3585 ops[j + 1] = ops[j];
3586 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
3587 ops[j + 1] = save;
3590 /* This is only useful the first time through. */
3591 if (!canonicalized)
3592 return NULL_RTX;
3594 changed = 0;
3595 for (i = n_ops - 1; i > 0; i--)
3596 for (j = i - 1; j >= 0; j--)
3598 rtx lhs = ops[j].op, rhs = ops[i].op;
3599 int lneg = ops[j].neg, rneg = ops[i].neg;
3601 if (lhs != 0 && rhs != 0)
3603 enum rtx_code ncode = PLUS;
3605 if (lneg != rneg)
3607 ncode = MINUS;
3608 if (lneg)
3609 tem = lhs, lhs = rhs, rhs = tem;
3611 else if (swap_commutative_operands_p (lhs, rhs))
3612 tem = lhs, lhs = rhs, rhs = tem;
3614 if ((GET_CODE (lhs) == CONST || GET_CODE (lhs) == CONST_INT)
3615 && (GET_CODE (rhs) == CONST || GET_CODE (rhs) == CONST_INT))
3617 rtx tem_lhs, tem_rhs;
3619 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3620 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3621 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3623 if (tem && !CONSTANT_P (tem))
3624 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3626 else
3627 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3629 /* Reject "simplifications" that just wrap the two
3630 arguments in a CONST. Failure to do so can result
3631 in infinite recursion with simplify_binary_operation
3632 when it calls us to simplify CONST operations. */
3633 if (tem
3634 && ! (GET_CODE (tem) == CONST
3635 && GET_CODE (XEXP (tem, 0)) == ncode
3636 && XEXP (XEXP (tem, 0), 0) == lhs
3637 && XEXP (XEXP (tem, 0), 1) == rhs))
3639 lneg &= rneg;
3640 if (GET_CODE (tem) == NEG)
3641 tem = XEXP (tem, 0), lneg = !lneg;
3642 if (GET_CODE (tem) == CONST_INT && lneg)
3643 tem = neg_const_int (mode, tem), lneg = 0;
3645 ops[i].op = tem;
3646 ops[i].neg = lneg;
3647 ops[j].op = NULL_RTX;
3648 changed = 1;
3653 /* Pack all the operands to the lower-numbered entries. */
3654 for (i = 0, j = 0; j < n_ops; j++)
3655 if (ops[j].op)
3657 ops[i] = ops[j];
3658 i++;
3660 n_ops = i;
3662 while (changed);
3664 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3665 if (n_ops == 2
3666 && GET_CODE (ops[1].op) == CONST_INT
3667 && CONSTANT_P (ops[0].op)
3668 && ops[0].neg)
3669 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3671 /* We suppressed creation of trivial CONST expressions in the
3672 combination loop to avoid recursion. Create one manually now.
3673 The combination loop should have ensured that there is exactly
3674 one CONST_INT, and the sort will have ensured that it is last
3675 in the array and that any other constant will be next-to-last. */
3677 if (GET_CODE (ops[n_ops - 1].op) == CONST_INT)
3678 i = n_ops - 2;
3679 else
3680 i = n_ops - 1;
3682 if (i >= 1
3683 && ops[i].neg
3684 && !ops[i - 1].neg
3685 && CONSTANT_P (ops[i].op)
3686 && GET_CODE (ops[i].op) == GET_CODE (ops[i - 1].op))
3688 ops[i - 1].op = gen_rtx_MINUS (mode, ops[i - 1].op, ops[i].op);
3689 ops[i - 1].op = gen_rtx_CONST (mode, ops[i - 1].op);
3690 if (i < n_ops - 1)
3691 ops[i] = ops[i + 1];
3692 n_ops--;
3695 if (n_ops > 1
3696 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
3697 && CONSTANT_P (ops[n_ops - 2].op))
3699 rtx value = ops[n_ops - 1].op;
3700 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3701 value = neg_const_int (mode, value);
3702 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3703 n_ops--;
3706 /* Put a non-negated operand first, if possible. */
3708 for (i = 0; i < n_ops && ops[i].neg; i++)
3709 continue;
3710 if (i == n_ops)
3711 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3712 else if (i != 0)
3714 tem = ops[0].op;
3715 ops[0] = ops[i];
3716 ops[i].op = tem;
3717 ops[i].neg = 1;
3720 /* Now make the result by performing the requested operations. */
3721 result = ops[0].op;
3722 for (i = 1; i < n_ops; i++)
3723 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3724 mode, result, ops[i].op);
3726 return result;
3729 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3730 static bool
3731 plus_minus_operand_p (const_rtx x)
3733 return GET_CODE (x) == PLUS
3734 || GET_CODE (x) == MINUS
3735 || (GET_CODE (x) == CONST
3736 && GET_CODE (XEXP (x, 0)) == PLUS
3737 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3738 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3741 /* Like simplify_binary_operation except used for relational operators.
3742 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3743 not also be VOIDmode.
3745 CMP_MODE specifies in which mode the comparison is done in, so it is
3746 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3747 the operands or, if both are VOIDmode, the operands are compared in
3748 "infinite precision". */
3750 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3751 enum machine_mode cmp_mode, rtx op0, rtx op1)
3753 rtx tem, trueop0, trueop1;
3755 if (cmp_mode == VOIDmode)
3756 cmp_mode = GET_MODE (op0);
3757 if (cmp_mode == VOIDmode)
3758 cmp_mode = GET_MODE (op1);
3760 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3761 if (tem)
3763 if (SCALAR_FLOAT_MODE_P (mode))
3765 if (tem == const0_rtx)
3766 return CONST0_RTX (mode);
3767 #ifdef FLOAT_STORE_FLAG_VALUE
3769 REAL_VALUE_TYPE val;
3770 val = FLOAT_STORE_FLAG_VALUE (mode);
3771 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3773 #else
3774 return NULL_RTX;
3775 #endif
3777 if (VECTOR_MODE_P (mode))
3779 if (tem == const0_rtx)
3780 return CONST0_RTX (mode);
3781 #ifdef VECTOR_STORE_FLAG_VALUE
3783 int i, units;
3784 rtvec v;
3786 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3787 if (val == NULL_RTX)
3788 return NULL_RTX;
3789 if (val == const1_rtx)
3790 return CONST1_RTX (mode);
3792 units = GET_MODE_NUNITS (mode);
3793 v = rtvec_alloc (units);
3794 for (i = 0; i < units; i++)
3795 RTVEC_ELT (v, i) = val;
3796 return gen_rtx_raw_CONST_VECTOR (mode, v);
3798 #else
3799 return NULL_RTX;
3800 #endif
3803 return tem;
3806 /* For the following tests, ensure const0_rtx is op1. */
3807 if (swap_commutative_operands_p (op0, op1)
3808 || (op0 == const0_rtx && op1 != const0_rtx))
3809 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3811 /* If op0 is a compare, extract the comparison arguments from it. */
3812 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3813 return simplify_relational_operation (code, mode, VOIDmode,
3814 XEXP (op0, 0), XEXP (op0, 1));
3816 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
3817 || CC0_P (op0))
3818 return NULL_RTX;
3820 trueop0 = avoid_constant_pool_reference (op0);
3821 trueop1 = avoid_constant_pool_reference (op1);
3822 return simplify_relational_operation_1 (code, mode, cmp_mode,
3823 trueop0, trueop1);
3826 /* This part of simplify_relational_operation is only used when CMP_MODE
3827 is not in class MODE_CC (i.e. it is a real comparison).
3829 MODE is the mode of the result, while CMP_MODE specifies in which
3830 mode the comparison is done in, so it is the mode of the operands. */
3832 static rtx
3833 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
3834 enum machine_mode cmp_mode, rtx op0, rtx op1)
3836 enum rtx_code op0code = GET_CODE (op0);
3838 if (op1 == const0_rtx && COMPARISON_P (op0))
3840 /* If op0 is a comparison, extract the comparison arguments
3841 from it. */
3842 if (code == NE)
3844 if (GET_MODE (op0) == mode)
3845 return simplify_rtx (op0);
3846 else
3847 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
3848 XEXP (op0, 0), XEXP (op0, 1));
3850 else if (code == EQ)
3852 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
3853 if (new_code != UNKNOWN)
3854 return simplify_gen_relational (new_code, mode, VOIDmode,
3855 XEXP (op0, 0), XEXP (op0, 1));
3859 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
3860 if ((code == LTU || code == GEU)
3861 && GET_CODE (op0) == PLUS
3862 && rtx_equal_p (op1, XEXP (op0, 1))
3863 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
3864 && !rtx_equal_p (op1, XEXP (op0, 0)))
3865 return simplify_gen_relational (code, mode, cmp_mode, op0, XEXP (op0, 0));
3867 if (op1 == const0_rtx)
3869 /* Canonicalize (GTU x 0) as (NE x 0). */
3870 if (code == GTU)
3871 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
3872 /* Canonicalize (LEU x 0) as (EQ x 0). */
3873 if (code == LEU)
3874 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
3876 else if (op1 == const1_rtx)
3878 switch (code)
3880 case GE:
3881 /* Canonicalize (GE x 1) as (GT x 0). */
3882 return simplify_gen_relational (GT, mode, cmp_mode,
3883 op0, const0_rtx);
3884 case GEU:
3885 /* Canonicalize (GEU x 1) as (NE x 0). */
3886 return simplify_gen_relational (NE, mode, cmp_mode,
3887 op0, const0_rtx);
3888 case LT:
3889 /* Canonicalize (LT x 1) as (LE x 0). */
3890 return simplify_gen_relational (LE, mode, cmp_mode,
3891 op0, const0_rtx);
3892 case LTU:
3893 /* Canonicalize (LTU x 1) as (EQ x 0). */
3894 return simplify_gen_relational (EQ, mode, cmp_mode,
3895 op0, const0_rtx);
3896 default:
3897 break;
3900 else if (op1 == constm1_rtx)
3902 /* Canonicalize (LE x -1) as (LT x 0). */
3903 if (code == LE)
3904 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
3905 /* Canonicalize (GT x -1) as (GE x 0). */
3906 if (code == GT)
3907 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
3910 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
3911 if ((code == EQ || code == NE)
3912 && (op0code == PLUS || op0code == MINUS)
3913 && CONSTANT_P (op1)
3914 && CONSTANT_P (XEXP (op0, 1))
3915 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
3917 rtx x = XEXP (op0, 0);
3918 rtx c = XEXP (op0, 1);
3920 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
3921 cmp_mode, op1, c);
3922 return simplify_gen_relational (code, mode, cmp_mode, x, c);
3925 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3926 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3927 if (code == NE
3928 && op1 == const0_rtx
3929 && GET_MODE_CLASS (mode) == MODE_INT
3930 && cmp_mode != VOIDmode
3931 /* ??? Work-around BImode bugs in the ia64 backend. */
3932 && mode != BImode
3933 && cmp_mode != BImode
3934 && nonzero_bits (op0, cmp_mode) == 1
3935 && STORE_FLAG_VALUE == 1)
3936 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
3937 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
3938 : lowpart_subreg (mode, op0, cmp_mode);
3940 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
3941 if ((code == EQ || code == NE)
3942 && op1 == const0_rtx
3943 && op0code == XOR)
3944 return simplify_gen_relational (code, mode, cmp_mode,
3945 XEXP (op0, 0), XEXP (op0, 1));
3947 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
3948 if ((code == EQ || code == NE)
3949 && op0code == XOR
3950 && rtx_equal_p (XEXP (op0, 0), op1)
3951 && !side_effects_p (XEXP (op0, 0)))
3952 return simplify_gen_relational (code, mode, cmp_mode,
3953 XEXP (op0, 1), const0_rtx);
3955 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
3956 if ((code == EQ || code == NE)
3957 && op0code == XOR
3958 && rtx_equal_p (XEXP (op0, 1), op1)
3959 && !side_effects_p (XEXP (op0, 1)))
3960 return simplify_gen_relational (code, mode, cmp_mode,
3961 XEXP (op0, 0), const0_rtx);
3963 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
3964 if ((code == EQ || code == NE)
3965 && op0code == XOR
3966 && (GET_CODE (op1) == CONST_INT
3967 || GET_CODE (op1) == CONST_DOUBLE)
3968 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
3969 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
3970 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
3971 simplify_gen_binary (XOR, cmp_mode,
3972 XEXP (op0, 1), op1));
3974 if (op0code == POPCOUNT && op1 == const0_rtx)
3975 switch (code)
3977 case EQ:
3978 case LE:
3979 case LEU:
3980 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
3981 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
3982 XEXP (op0, 0), const0_rtx);
3984 case NE:
3985 case GT:
3986 case GTU:
3987 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
3988 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
3989 XEXP (op0, 0), const0_rtx);
3991 default:
3992 break;
3995 return NULL_RTX;
3998 enum
4000 CMP_EQ = 1,
4001 CMP_LT = 2,
4002 CMP_GT = 4,
4003 CMP_LTU = 8,
4004 CMP_GTU = 16
4008 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4009 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4010 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4011 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4012 For floating-point comparisons, assume that the operands were ordered. */
4014 static rtx
4015 comparison_result (enum rtx_code code, int known_results)
4017 switch (code)
4019 case EQ:
4020 case UNEQ:
4021 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4022 case NE:
4023 case LTGT:
4024 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4026 case LT:
4027 case UNLT:
4028 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4029 case GE:
4030 case UNGE:
4031 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4033 case GT:
4034 case UNGT:
4035 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4036 case LE:
4037 case UNLE:
4038 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4040 case LTU:
4041 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4042 case GEU:
4043 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4045 case GTU:
4046 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4047 case LEU:
4048 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4050 case ORDERED:
4051 return const_true_rtx;
4052 case UNORDERED:
4053 return const0_rtx;
4054 default:
4055 gcc_unreachable ();
4059 /* Check if the given comparison (done in the given MODE) is actually a
4060 tautology or a contradiction.
4061 If no simplification is possible, this function returns zero.
4062 Otherwise, it returns either const_true_rtx or const0_rtx. */
4065 simplify_const_relational_operation (enum rtx_code code,
4066 enum machine_mode mode,
4067 rtx op0, rtx op1)
4069 rtx tem;
4070 rtx trueop0;
4071 rtx trueop1;
4073 gcc_assert (mode != VOIDmode
4074 || (GET_MODE (op0) == VOIDmode
4075 && GET_MODE (op1) == VOIDmode));
4077 /* If op0 is a compare, extract the comparison arguments from it. */
4078 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4080 op1 = XEXP (op0, 1);
4081 op0 = XEXP (op0, 0);
4083 if (GET_MODE (op0) != VOIDmode)
4084 mode = GET_MODE (op0);
4085 else if (GET_MODE (op1) != VOIDmode)
4086 mode = GET_MODE (op1);
4087 else
4088 return 0;
4091 /* We can't simplify MODE_CC values since we don't know what the
4092 actual comparison is. */
4093 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4094 return 0;
4096 /* Make sure the constant is second. */
4097 if (swap_commutative_operands_p (op0, op1))
4099 tem = op0, op0 = op1, op1 = tem;
4100 code = swap_condition (code);
4103 trueop0 = avoid_constant_pool_reference (op0);
4104 trueop1 = avoid_constant_pool_reference (op1);
4106 /* For integer comparisons of A and B maybe we can simplify A - B and can
4107 then simplify a comparison of that with zero. If A and B are both either
4108 a register or a CONST_INT, this can't help; testing for these cases will
4109 prevent infinite recursion here and speed things up.
4111 We can only do this for EQ and NE comparisons as otherwise we may
4112 lose or introduce overflow which we cannot disregard as undefined as
4113 we do not know the signedness of the operation on either the left or
4114 the right hand side of the comparison. */
4116 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4117 && (code == EQ || code == NE)
4118 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
4119 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
4120 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4121 /* We cannot do this if tem is a nonzero address. */
4122 && ! nonzero_address_p (tem))
4123 return simplify_const_relational_operation (signed_condition (code),
4124 mode, tem, const0_rtx);
4126 if (! HONOR_NANS (mode) && code == ORDERED)
4127 return const_true_rtx;
4129 if (! HONOR_NANS (mode) && code == UNORDERED)
4130 return const0_rtx;
4132 /* For modes without NaNs, if the two operands are equal, we know the
4133 result except if they have side-effects. Even with NaNs we know
4134 the result of unordered comparisons and, if signaling NaNs are
4135 irrelevant, also the result of LT/GT/LTGT. */
4136 if ((! HONOR_NANS (GET_MODE (trueop0))
4137 || code == UNEQ || code == UNLE || code == UNGE
4138 || ((code == LT || code == GT || code == LTGT)
4139 && ! HONOR_SNANS (GET_MODE (trueop0))))
4140 && rtx_equal_p (trueop0, trueop1)
4141 && ! side_effects_p (trueop0))
4142 return comparison_result (code, CMP_EQ);
4144 /* If the operands are floating-point constants, see if we can fold
4145 the result. */
4146 if (GET_CODE (trueop0) == CONST_DOUBLE
4147 && GET_CODE (trueop1) == CONST_DOUBLE
4148 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4150 REAL_VALUE_TYPE d0, d1;
4152 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4153 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4155 /* Comparisons are unordered iff at least one of the values is NaN. */
4156 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4157 switch (code)
4159 case UNEQ:
4160 case UNLT:
4161 case UNGT:
4162 case UNLE:
4163 case UNGE:
4164 case NE:
4165 case UNORDERED:
4166 return const_true_rtx;
4167 case EQ:
4168 case LT:
4169 case GT:
4170 case LE:
4171 case GE:
4172 case LTGT:
4173 case ORDERED:
4174 return const0_rtx;
4175 default:
4176 return 0;
4179 return comparison_result (code,
4180 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4181 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4184 /* Otherwise, see if the operands are both integers. */
4185 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4186 && (GET_CODE (trueop0) == CONST_DOUBLE
4187 || GET_CODE (trueop0) == CONST_INT)
4188 && (GET_CODE (trueop1) == CONST_DOUBLE
4189 || GET_CODE (trueop1) == CONST_INT))
4191 int width = GET_MODE_BITSIZE (mode);
4192 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4193 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4195 /* Get the two words comprising each integer constant. */
4196 if (GET_CODE (trueop0) == CONST_DOUBLE)
4198 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4199 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4201 else
4203 l0u = l0s = INTVAL (trueop0);
4204 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4207 if (GET_CODE (trueop1) == CONST_DOUBLE)
4209 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4210 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4212 else
4214 l1u = l1s = INTVAL (trueop1);
4215 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4218 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4219 we have to sign or zero-extend the values. */
4220 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4222 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
4223 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
4225 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
4226 l0s |= ((HOST_WIDE_INT) (-1) << width);
4228 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
4229 l1s |= ((HOST_WIDE_INT) (-1) << width);
4231 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4232 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4234 if (h0u == h1u && l0u == l1u)
4235 return comparison_result (code, CMP_EQ);
4236 else
4238 int cr;
4239 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
4240 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
4241 return comparison_result (code, cr);
4245 /* Optimize comparisons with upper and lower bounds. */
4246 if (SCALAR_INT_MODE_P (mode)
4247 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4248 && GET_CODE (trueop1) == CONST_INT)
4250 int sign;
4251 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4252 HOST_WIDE_INT val = INTVAL (trueop1);
4253 HOST_WIDE_INT mmin, mmax;
4255 if (code == GEU
4256 || code == LEU
4257 || code == GTU
4258 || code == LTU)
4259 sign = 0;
4260 else
4261 sign = 1;
4263 /* Get a reduced range if the sign bit is zero. */
4264 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4266 mmin = 0;
4267 mmax = nonzero;
4269 else
4271 rtx mmin_rtx, mmax_rtx;
4272 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4274 mmin = INTVAL (mmin_rtx);
4275 mmax = INTVAL (mmax_rtx);
4276 if (sign)
4278 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4280 mmin >>= (sign_copies - 1);
4281 mmax >>= (sign_copies - 1);
4285 switch (code)
4287 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4288 case GEU:
4289 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4290 return const_true_rtx;
4291 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4292 return const0_rtx;
4293 break;
4294 case GE:
4295 if (val <= mmin)
4296 return const_true_rtx;
4297 if (val > mmax)
4298 return const0_rtx;
4299 break;
4301 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4302 case LEU:
4303 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4304 return const_true_rtx;
4305 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4306 return const0_rtx;
4307 break;
4308 case LE:
4309 if (val >= mmax)
4310 return const_true_rtx;
4311 if (val < mmin)
4312 return const0_rtx;
4313 break;
4315 case EQ:
4316 /* x == y is always false for y out of range. */
4317 if (val < mmin || val > mmax)
4318 return const0_rtx;
4319 break;
4321 /* x > y is always false for y >= mmax, always true for y < mmin. */
4322 case GTU:
4323 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4324 return const0_rtx;
4325 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4326 return const_true_rtx;
4327 break;
4328 case GT:
4329 if (val >= mmax)
4330 return const0_rtx;
4331 if (val < mmin)
4332 return const_true_rtx;
4333 break;
4335 /* x < y is always false for y <= mmin, always true for y > mmax. */
4336 case LTU:
4337 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4338 return const0_rtx;
4339 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4340 return const_true_rtx;
4341 break;
4342 case LT:
4343 if (val <= mmin)
4344 return const0_rtx;
4345 if (val > mmax)
4346 return const_true_rtx;
4347 break;
4349 case NE:
4350 /* x != y is always true for y out of range. */
4351 if (val < mmin || val > mmax)
4352 return const_true_rtx;
4353 break;
4355 default:
4356 break;
4360 /* Optimize integer comparisons with zero. */
4361 if (trueop1 == const0_rtx)
4363 /* Some addresses are known to be nonzero. We don't know
4364 their sign, but equality comparisons are known. */
4365 if (nonzero_address_p (trueop0))
4367 if (code == EQ || code == LEU)
4368 return const0_rtx;
4369 if (code == NE || code == GTU)
4370 return const_true_rtx;
4373 /* See if the first operand is an IOR with a constant. If so, we
4374 may be able to determine the result of this comparison. */
4375 if (GET_CODE (op0) == IOR)
4377 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
4378 if (GET_CODE (inner_const) == CONST_INT && inner_const != const0_rtx)
4380 int sign_bitnum = GET_MODE_BITSIZE (mode) - 1;
4381 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
4382 && (INTVAL (inner_const)
4383 & ((HOST_WIDE_INT) 1 << sign_bitnum)));
4385 switch (code)
4387 case EQ:
4388 case LEU:
4389 return const0_rtx;
4390 case NE:
4391 case GTU:
4392 return const_true_rtx;
4393 case LT:
4394 case LE:
4395 if (has_sign)
4396 return const_true_rtx;
4397 break;
4398 case GT:
4399 case GE:
4400 if (has_sign)
4401 return const0_rtx;
4402 break;
4403 default:
4404 break;
4410 /* Optimize comparison of ABS with zero. */
4411 if (trueop1 == CONST0_RTX (mode)
4412 && (GET_CODE (trueop0) == ABS
4413 || (GET_CODE (trueop0) == FLOAT_EXTEND
4414 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
4416 switch (code)
4418 case LT:
4419 /* Optimize abs(x) < 0.0. */
4420 if (!HONOR_SNANS (mode)
4421 && (!INTEGRAL_MODE_P (mode)
4422 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4424 if (INTEGRAL_MODE_P (mode)
4425 && (issue_strict_overflow_warning
4426 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4427 warning (OPT_Wstrict_overflow,
4428 ("assuming signed overflow does not occur when "
4429 "assuming abs (x) < 0 is false"));
4430 return const0_rtx;
4432 break;
4434 case GE:
4435 /* Optimize abs(x) >= 0.0. */
4436 if (!HONOR_NANS (mode)
4437 && (!INTEGRAL_MODE_P (mode)
4438 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4440 if (INTEGRAL_MODE_P (mode)
4441 && (issue_strict_overflow_warning
4442 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4443 warning (OPT_Wstrict_overflow,
4444 ("assuming signed overflow does not occur when "
4445 "assuming abs (x) >= 0 is true"));
4446 return const_true_rtx;
4448 break;
4450 case UNGE:
4451 /* Optimize ! (abs(x) < 0.0). */
4452 return const_true_rtx;
4454 default:
4455 break;
4459 return 0;
4462 /* Simplify CODE, an operation with result mode MODE and three operands,
4463 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4464 a constant. Return 0 if no simplifications is possible. */
4467 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4468 enum machine_mode op0_mode, rtx op0, rtx op1,
4469 rtx op2)
4471 unsigned int width = GET_MODE_BITSIZE (mode);
4473 /* VOIDmode means "infinite" precision. */
4474 if (width == 0)
4475 width = HOST_BITS_PER_WIDE_INT;
4477 switch (code)
4479 case SIGN_EXTRACT:
4480 case ZERO_EXTRACT:
4481 if (GET_CODE (op0) == CONST_INT
4482 && GET_CODE (op1) == CONST_INT
4483 && GET_CODE (op2) == CONST_INT
4484 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4485 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4487 /* Extracting a bit-field from a constant */
4488 HOST_WIDE_INT val = INTVAL (op0);
4490 if (BITS_BIG_ENDIAN)
4491 val >>= (GET_MODE_BITSIZE (op0_mode)
4492 - INTVAL (op2) - INTVAL (op1));
4493 else
4494 val >>= INTVAL (op2);
4496 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4498 /* First zero-extend. */
4499 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4500 /* If desired, propagate sign bit. */
4501 if (code == SIGN_EXTRACT
4502 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4503 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4506 /* Clear the bits that don't belong in our mode,
4507 unless they and our sign bit are all one.
4508 So we get either a reasonable negative value or a reasonable
4509 unsigned value for this mode. */
4510 if (width < HOST_BITS_PER_WIDE_INT
4511 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4512 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4513 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4515 return gen_int_mode (val, mode);
4517 break;
4519 case IF_THEN_ELSE:
4520 if (GET_CODE (op0) == CONST_INT)
4521 return op0 != const0_rtx ? op1 : op2;
4523 /* Convert c ? a : a into "a". */
4524 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4525 return op1;
4527 /* Convert a != b ? a : b into "a". */
4528 if (GET_CODE (op0) == NE
4529 && ! side_effects_p (op0)
4530 && ! HONOR_NANS (mode)
4531 && ! HONOR_SIGNED_ZEROS (mode)
4532 && ((rtx_equal_p (XEXP (op0, 0), op1)
4533 && rtx_equal_p (XEXP (op0, 1), op2))
4534 || (rtx_equal_p (XEXP (op0, 0), op2)
4535 && rtx_equal_p (XEXP (op0, 1), op1))))
4536 return op1;
4538 /* Convert a == b ? a : b into "b". */
4539 if (GET_CODE (op0) == EQ
4540 && ! side_effects_p (op0)
4541 && ! HONOR_NANS (mode)
4542 && ! HONOR_SIGNED_ZEROS (mode)
4543 && ((rtx_equal_p (XEXP (op0, 0), op1)
4544 && rtx_equal_p (XEXP (op0, 1), op2))
4545 || (rtx_equal_p (XEXP (op0, 0), op2)
4546 && rtx_equal_p (XEXP (op0, 1), op1))))
4547 return op2;
4549 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4551 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4552 ? GET_MODE (XEXP (op0, 1))
4553 : GET_MODE (XEXP (op0, 0)));
4554 rtx temp;
4556 /* Look for happy constants in op1 and op2. */
4557 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
4559 HOST_WIDE_INT t = INTVAL (op1);
4560 HOST_WIDE_INT f = INTVAL (op2);
4562 if (t == STORE_FLAG_VALUE && f == 0)
4563 code = GET_CODE (op0);
4564 else if (t == 0 && f == STORE_FLAG_VALUE)
4566 enum rtx_code tmp;
4567 tmp = reversed_comparison_code (op0, NULL_RTX);
4568 if (tmp == UNKNOWN)
4569 break;
4570 code = tmp;
4572 else
4573 break;
4575 return simplify_gen_relational (code, mode, cmp_mode,
4576 XEXP (op0, 0), XEXP (op0, 1));
4579 if (cmp_mode == VOIDmode)
4580 cmp_mode = op0_mode;
4581 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4582 cmp_mode, XEXP (op0, 0),
4583 XEXP (op0, 1));
4585 /* See if any simplifications were possible. */
4586 if (temp)
4588 if (GET_CODE (temp) == CONST_INT)
4589 return temp == const0_rtx ? op2 : op1;
4590 else if (temp)
4591 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4594 break;
4596 case VEC_MERGE:
4597 gcc_assert (GET_MODE (op0) == mode);
4598 gcc_assert (GET_MODE (op1) == mode);
4599 gcc_assert (VECTOR_MODE_P (mode));
4600 op2 = avoid_constant_pool_reference (op2);
4601 if (GET_CODE (op2) == CONST_INT)
4603 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4604 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4605 int mask = (1 << n_elts) - 1;
4607 if (!(INTVAL (op2) & mask))
4608 return op1;
4609 if ((INTVAL (op2) & mask) == mask)
4610 return op0;
4612 op0 = avoid_constant_pool_reference (op0);
4613 op1 = avoid_constant_pool_reference (op1);
4614 if (GET_CODE (op0) == CONST_VECTOR
4615 && GET_CODE (op1) == CONST_VECTOR)
4617 rtvec v = rtvec_alloc (n_elts);
4618 unsigned int i;
4620 for (i = 0; i < n_elts; i++)
4621 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4622 ? CONST_VECTOR_ELT (op0, i)
4623 : CONST_VECTOR_ELT (op1, i));
4624 return gen_rtx_CONST_VECTOR (mode, v);
4627 break;
4629 default:
4630 gcc_unreachable ();
4633 return 0;
4636 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
4637 or CONST_VECTOR,
4638 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
4640 Works by unpacking OP into a collection of 8-bit values
4641 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4642 and then repacking them again for OUTERMODE. */
4644 static rtx
4645 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4646 enum machine_mode innermode, unsigned int byte)
4648 /* We support up to 512-bit values (for V8DFmode). */
4649 enum {
4650 max_bitsize = 512,
4651 value_bit = 8,
4652 value_mask = (1 << value_bit) - 1
4654 unsigned char value[max_bitsize / value_bit];
4655 int value_start;
4656 int i;
4657 int elem;
4659 int num_elem;
4660 rtx * elems;
4661 int elem_bitsize;
4662 rtx result_s;
4663 rtvec result_v = NULL;
4664 enum mode_class outer_class;
4665 enum machine_mode outer_submode;
4667 /* Some ports misuse CCmode. */
4668 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
4669 return op;
4671 /* We have no way to represent a complex constant at the rtl level. */
4672 if (COMPLEX_MODE_P (outermode))
4673 return NULL_RTX;
4675 /* Unpack the value. */
4677 if (GET_CODE (op) == CONST_VECTOR)
4679 num_elem = CONST_VECTOR_NUNITS (op);
4680 elems = &CONST_VECTOR_ELT (op, 0);
4681 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4683 else
4685 num_elem = 1;
4686 elems = &op;
4687 elem_bitsize = max_bitsize;
4689 /* If this asserts, it is too complicated; reducing value_bit may help. */
4690 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4691 /* I don't know how to handle endianness of sub-units. */
4692 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4694 for (elem = 0; elem < num_elem; elem++)
4696 unsigned char * vp;
4697 rtx el = elems[elem];
4699 /* Vectors are kept in target memory order. (This is probably
4700 a mistake.) */
4702 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4703 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4704 / BITS_PER_UNIT);
4705 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4706 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4707 unsigned bytele = (subword_byte % UNITS_PER_WORD
4708 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4709 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4712 switch (GET_CODE (el))
4714 case CONST_INT:
4715 for (i = 0;
4716 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4717 i += value_bit)
4718 *vp++ = INTVAL (el) >> i;
4719 /* CONST_INTs are always logically sign-extended. */
4720 for (; i < elem_bitsize; i += value_bit)
4721 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4722 break;
4724 case CONST_DOUBLE:
4725 if (GET_MODE (el) == VOIDmode)
4727 /* If this triggers, someone should have generated a
4728 CONST_INT instead. */
4729 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4731 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4732 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4733 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4735 *vp++
4736 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4737 i += value_bit;
4739 /* It shouldn't matter what's done here, so fill it with
4740 zero. */
4741 for (; i < elem_bitsize; i += value_bit)
4742 *vp++ = 0;
4744 else
4746 long tmp[max_bitsize / 32];
4747 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4749 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4750 gcc_assert (bitsize <= elem_bitsize);
4751 gcc_assert (bitsize % value_bit == 0);
4753 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4754 GET_MODE (el));
4756 /* real_to_target produces its result in words affected by
4757 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4758 and use WORDS_BIG_ENDIAN instead; see the documentation
4759 of SUBREG in rtl.texi. */
4760 for (i = 0; i < bitsize; i += value_bit)
4762 int ibase;
4763 if (WORDS_BIG_ENDIAN)
4764 ibase = bitsize - 1 - i;
4765 else
4766 ibase = i;
4767 *vp++ = tmp[ibase / 32] >> i % 32;
4770 /* It shouldn't matter what's done here, so fill it with
4771 zero. */
4772 for (; i < elem_bitsize; i += value_bit)
4773 *vp++ = 0;
4775 break;
4777 case CONST_FIXED:
4778 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4780 for (i = 0; i < elem_bitsize; i += value_bit)
4781 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
4783 else
4785 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4786 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
4787 for (; i < 2 * HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4788 i += value_bit)
4789 *vp++ = CONST_FIXED_VALUE_HIGH (el)
4790 >> (i - HOST_BITS_PER_WIDE_INT);
4791 for (; i < elem_bitsize; i += value_bit)
4792 *vp++ = 0;
4794 break;
4796 default:
4797 gcc_unreachable ();
4801 /* Now, pick the right byte to start with. */
4802 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4803 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4804 will already have offset 0. */
4805 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4807 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4808 - byte);
4809 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4810 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4811 byte = (subword_byte % UNITS_PER_WORD
4812 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4815 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4816 so if it's become negative it will instead be very large.) */
4817 gcc_assert (byte < GET_MODE_SIZE (innermode));
4819 /* Convert from bytes to chunks of size value_bit. */
4820 value_start = byte * (BITS_PER_UNIT / value_bit);
4822 /* Re-pack the value. */
4824 if (VECTOR_MODE_P (outermode))
4826 num_elem = GET_MODE_NUNITS (outermode);
4827 result_v = rtvec_alloc (num_elem);
4828 elems = &RTVEC_ELT (result_v, 0);
4829 outer_submode = GET_MODE_INNER (outermode);
4831 else
4833 num_elem = 1;
4834 elems = &result_s;
4835 outer_submode = outermode;
4838 outer_class = GET_MODE_CLASS (outer_submode);
4839 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
4841 gcc_assert (elem_bitsize % value_bit == 0);
4842 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
4844 for (elem = 0; elem < num_elem; elem++)
4846 unsigned char *vp;
4848 /* Vectors are stored in target memory order. (This is probably
4849 a mistake.) */
4851 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4852 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4853 / BITS_PER_UNIT);
4854 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4855 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4856 unsigned bytele = (subword_byte % UNITS_PER_WORD
4857 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4858 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
4861 switch (outer_class)
4863 case MODE_INT:
4864 case MODE_PARTIAL_INT:
4866 unsigned HOST_WIDE_INT hi = 0, lo = 0;
4868 for (i = 0;
4869 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4870 i += value_bit)
4871 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
4872 for (; i < elem_bitsize; i += value_bit)
4873 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
4874 << (i - HOST_BITS_PER_WIDE_INT));
4876 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4877 know why. */
4878 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4879 elems[elem] = gen_int_mode (lo, outer_submode);
4880 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
4881 elems[elem] = immed_double_const (lo, hi, outer_submode);
4882 else
4883 return NULL_RTX;
4885 break;
4887 case MODE_FLOAT:
4888 case MODE_DECIMAL_FLOAT:
4890 REAL_VALUE_TYPE r;
4891 long tmp[max_bitsize / 32];
4893 /* real_from_target wants its input in words affected by
4894 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4895 and use WORDS_BIG_ENDIAN instead; see the documentation
4896 of SUBREG in rtl.texi. */
4897 for (i = 0; i < max_bitsize / 32; i++)
4898 tmp[i] = 0;
4899 for (i = 0; i < elem_bitsize; i += value_bit)
4901 int ibase;
4902 if (WORDS_BIG_ENDIAN)
4903 ibase = elem_bitsize - 1 - i;
4904 else
4905 ibase = i;
4906 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
4909 real_from_target (&r, tmp, outer_submode);
4910 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
4912 break;
4914 case MODE_FRACT:
4915 case MODE_UFRACT:
4916 case MODE_ACCUM:
4917 case MODE_UACCUM:
4919 FIXED_VALUE_TYPE f;
4920 f.data.low = 0;
4921 f.data.high = 0;
4922 f.mode = outer_submode;
4924 for (i = 0;
4925 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4926 i += value_bit)
4927 f.data.low |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
4928 for (; i < elem_bitsize; i += value_bit)
4929 f.data.high |= ((HOST_WIDE_INT)(*vp++ & value_mask)
4930 << (i - HOST_BITS_PER_WIDE_INT));
4932 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
4934 break;
4936 default:
4937 gcc_unreachable ();
4940 if (VECTOR_MODE_P (outermode))
4941 return gen_rtx_CONST_VECTOR (outermode, result_v);
4942 else
4943 return result_s;
4946 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
4947 Return 0 if no simplifications are possible. */
4949 simplify_subreg (enum machine_mode outermode, rtx op,
4950 enum machine_mode innermode, unsigned int byte)
4952 /* Little bit of sanity checking. */
4953 gcc_assert (innermode != VOIDmode);
4954 gcc_assert (outermode != VOIDmode);
4955 gcc_assert (innermode != BLKmode);
4956 gcc_assert (outermode != BLKmode);
4958 gcc_assert (GET_MODE (op) == innermode
4959 || GET_MODE (op) == VOIDmode);
4961 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
4962 gcc_assert (byte < GET_MODE_SIZE (innermode));
4964 if (outermode == innermode && !byte)
4965 return op;
4967 if (GET_CODE (op) == CONST_INT
4968 || GET_CODE (op) == CONST_DOUBLE
4969 || GET_CODE (op) == CONST_FIXED
4970 || GET_CODE (op) == CONST_VECTOR)
4971 return simplify_immed_subreg (outermode, op, innermode, byte);
4973 /* Changing mode twice with SUBREG => just change it once,
4974 or not at all if changing back op starting mode. */
4975 if (GET_CODE (op) == SUBREG)
4977 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
4978 int final_offset = byte + SUBREG_BYTE (op);
4979 rtx newx;
4981 if (outermode == innermostmode
4982 && byte == 0 && SUBREG_BYTE (op) == 0)
4983 return SUBREG_REG (op);
4985 /* The SUBREG_BYTE represents offset, as if the value were stored
4986 in memory. Irritating exception is paradoxical subreg, where
4987 we define SUBREG_BYTE to be 0. On big endian machines, this
4988 value should be negative. For a moment, undo this exception. */
4989 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4991 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
4992 if (WORDS_BIG_ENDIAN)
4993 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4994 if (BYTES_BIG_ENDIAN)
4995 final_offset += difference % UNITS_PER_WORD;
4997 if (SUBREG_BYTE (op) == 0
4998 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5000 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5001 if (WORDS_BIG_ENDIAN)
5002 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5003 if (BYTES_BIG_ENDIAN)
5004 final_offset += difference % UNITS_PER_WORD;
5007 /* See whether resulting subreg will be paradoxical. */
5008 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5010 /* In nonparadoxical subregs we can't handle negative offsets. */
5011 if (final_offset < 0)
5012 return NULL_RTX;
5013 /* Bail out in case resulting subreg would be incorrect. */
5014 if (final_offset % GET_MODE_SIZE (outermode)
5015 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5016 return NULL_RTX;
5018 else
5020 int offset = 0;
5021 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5023 /* In paradoxical subreg, see if we are still looking on lower part.
5024 If so, our SUBREG_BYTE will be 0. */
5025 if (WORDS_BIG_ENDIAN)
5026 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5027 if (BYTES_BIG_ENDIAN)
5028 offset += difference % UNITS_PER_WORD;
5029 if (offset == final_offset)
5030 final_offset = 0;
5031 else
5032 return NULL_RTX;
5035 /* Recurse for further possible simplifications. */
5036 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5037 final_offset);
5038 if (newx)
5039 return newx;
5040 if (validate_subreg (outermode, innermostmode,
5041 SUBREG_REG (op), final_offset))
5043 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5044 if (SUBREG_PROMOTED_VAR_P (op)
5045 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5046 && GET_MODE_CLASS (outermode) == MODE_INT
5047 && IN_RANGE (GET_MODE_SIZE (outermode),
5048 GET_MODE_SIZE (innermode),
5049 GET_MODE_SIZE (innermostmode))
5050 && subreg_lowpart_p (newx))
5052 SUBREG_PROMOTED_VAR_P (newx) = 1;
5053 SUBREG_PROMOTED_UNSIGNED_SET
5054 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5056 return newx;
5058 return NULL_RTX;
5061 /* Merge implicit and explicit truncations. */
5063 if (GET_CODE (op) == TRUNCATE
5064 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
5065 && subreg_lowpart_offset (outermode, innermode) == byte)
5066 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
5067 GET_MODE (XEXP (op, 0)));
5069 /* SUBREG of a hard register => just change the register number
5070 and/or mode. If the hard register is not valid in that mode,
5071 suppress this simplification. If the hard register is the stack,
5072 frame, or argument pointer, leave this as a SUBREG. */
5074 if (REG_P (op) && HARD_REGISTER_P (op))
5076 unsigned int regno, final_regno;
5078 regno = REGNO (op);
5079 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5080 if (HARD_REGISTER_NUM_P (final_regno))
5082 rtx x;
5083 int final_offset = byte;
5085 /* Adjust offset for paradoxical subregs. */
5086 if (byte == 0
5087 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5089 int difference = (GET_MODE_SIZE (innermode)
5090 - GET_MODE_SIZE (outermode));
5091 if (WORDS_BIG_ENDIAN)
5092 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5093 if (BYTES_BIG_ENDIAN)
5094 final_offset += difference % UNITS_PER_WORD;
5097 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5099 /* Propagate original regno. We don't have any way to specify
5100 the offset inside original regno, so do so only for lowpart.
5101 The information is used only by alias analysis that can not
5102 grog partial register anyway. */
5104 if (subreg_lowpart_offset (outermode, innermode) == byte)
5105 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5106 return x;
5110 /* If we have a SUBREG of a register that we are replacing and we are
5111 replacing it with a MEM, make a new MEM and try replacing the
5112 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5113 or if we would be widening it. */
5115 if (MEM_P (op)
5116 && ! mode_dependent_address_p (XEXP (op, 0))
5117 /* Allow splitting of volatile memory references in case we don't
5118 have instruction to move the whole thing. */
5119 && (! MEM_VOLATILE_P (op)
5120 || ! have_insn_for (SET, innermode))
5121 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5122 return adjust_address_nv (op, outermode, byte);
5124 /* Handle complex values represented as CONCAT
5125 of real and imaginary part. */
5126 if (GET_CODE (op) == CONCAT)
5128 unsigned int part_size, final_offset;
5129 rtx part, res;
5131 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5132 if (byte < part_size)
5134 part = XEXP (op, 0);
5135 final_offset = byte;
5137 else
5139 part = XEXP (op, 1);
5140 final_offset = byte - part_size;
5143 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5144 return NULL_RTX;
5146 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5147 if (res)
5148 return res;
5149 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5150 return gen_rtx_SUBREG (outermode, part, final_offset);
5151 return NULL_RTX;
5154 /* Optimize SUBREG truncations of zero and sign extended values. */
5155 if ((GET_CODE (op) == ZERO_EXTEND
5156 || GET_CODE (op) == SIGN_EXTEND)
5157 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
5159 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5161 /* If we're requesting the lowpart of a zero or sign extension,
5162 there are three possibilities. If the outermode is the same
5163 as the origmode, we can omit both the extension and the subreg.
5164 If the outermode is not larger than the origmode, we can apply
5165 the truncation without the extension. Finally, if the outermode
5166 is larger than the origmode, but both are integer modes, we
5167 can just extend to the appropriate mode. */
5168 if (bitpos == 0)
5170 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
5171 if (outermode == origmode)
5172 return XEXP (op, 0);
5173 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
5174 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
5175 subreg_lowpart_offset (outermode,
5176 origmode));
5177 if (SCALAR_INT_MODE_P (outermode))
5178 return simplify_gen_unary (GET_CODE (op), outermode,
5179 XEXP (op, 0), origmode);
5182 /* A SUBREG resulting from a zero extension may fold to zero if
5183 it extracts higher bits that the ZERO_EXTEND's source bits. */
5184 if (GET_CODE (op) == ZERO_EXTEND
5185 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
5186 return CONST0_RTX (outermode);
5189 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5190 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5191 the outer subreg is effectively a truncation to the original mode. */
5192 if ((GET_CODE (op) == LSHIFTRT
5193 || GET_CODE (op) == ASHIFTRT)
5194 && SCALAR_INT_MODE_P (outermode)
5195 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5196 to avoid the possibility that an outer LSHIFTRT shifts by more
5197 than the sign extension's sign_bit_copies and introduces zeros
5198 into the high bits of the result. */
5199 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
5200 && GET_CODE (XEXP (op, 1)) == CONST_INT
5201 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
5202 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5203 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5204 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5205 return simplify_gen_binary (ASHIFTRT, outermode,
5206 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5208 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5209 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5210 the outer subreg is effectively a truncation to the original mode. */
5211 if ((GET_CODE (op) == LSHIFTRT
5212 || GET_CODE (op) == ASHIFTRT)
5213 && SCALAR_INT_MODE_P (outermode)
5214 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5215 && GET_CODE (XEXP (op, 1)) == CONST_INT
5216 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5217 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5218 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5219 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5220 return simplify_gen_binary (LSHIFTRT, outermode,
5221 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5223 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5224 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5225 the outer subreg is effectively a truncation to the original mode. */
5226 if (GET_CODE (op) == ASHIFT
5227 && SCALAR_INT_MODE_P (outermode)
5228 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5229 && GET_CODE (XEXP (op, 1)) == CONST_INT
5230 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5231 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
5232 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5233 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5234 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5235 return simplify_gen_binary (ASHIFT, outermode,
5236 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5238 /* Recognize a word extraction from a multi-word subreg. */
5239 if ((GET_CODE (op) == LSHIFTRT
5240 || GET_CODE (op) == ASHIFTRT)
5241 && SCALAR_INT_MODE_P (outermode)
5242 && GET_MODE_BITSIZE (outermode) >= BITS_PER_WORD
5243 && GET_MODE_BITSIZE (innermode) >= (2 * GET_MODE_BITSIZE (outermode))
5244 && GET_CODE (XEXP (op, 1)) == CONST_INT
5245 && (INTVAL (XEXP (op, 1)) & (GET_MODE_BITSIZE (outermode) - 1)) == 0
5246 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
5247 && byte == subreg_lowpart_offset (outermode, innermode))
5249 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5250 return simplify_gen_subreg (outermode, XEXP (op, 0), innermode,
5251 (WORDS_BIG_ENDIAN
5252 ? byte - shifted_bytes : byte + shifted_bytes));
5255 return NULL_RTX;
5258 /* Make a SUBREG operation or equivalent if it folds. */
5261 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5262 enum machine_mode innermode, unsigned int byte)
5264 rtx newx;
5266 newx = simplify_subreg (outermode, op, innermode, byte);
5267 if (newx)
5268 return newx;
5270 if (GET_CODE (op) == SUBREG
5271 || GET_CODE (op) == CONCAT
5272 || GET_MODE (op) == VOIDmode)
5273 return NULL_RTX;
5275 if (validate_subreg (outermode, innermode, op, byte))
5276 return gen_rtx_SUBREG (outermode, op, byte);
5278 return NULL_RTX;
5281 /* Simplify X, an rtx expression.
5283 Return the simplified expression or NULL if no simplifications
5284 were possible.
5286 This is the preferred entry point into the simplification routines;
5287 however, we still allow passes to call the more specific routines.
5289 Right now GCC has three (yes, three) major bodies of RTL simplification
5290 code that need to be unified.
5292 1. fold_rtx in cse.c. This code uses various CSE specific
5293 information to aid in RTL simplification.
5295 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5296 it uses combine specific information to aid in RTL
5297 simplification.
5299 3. The routines in this file.
5302 Long term we want to only have one body of simplification code; to
5303 get to that state I recommend the following steps:
5305 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5306 which are not pass dependent state into these routines.
5308 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5309 use this routine whenever possible.
5311 3. Allow for pass dependent state to be provided to these
5312 routines and add simplifications based on the pass dependent
5313 state. Remove code from cse.c & combine.c that becomes
5314 redundant/dead.
5316 It will take time, but ultimately the compiler will be easier to
5317 maintain and improve. It's totally silly that when we add a
5318 simplification that it needs to be added to 4 places (3 for RTL
5319 simplification and 1 for tree simplification. */
5322 simplify_rtx (const_rtx x)
5324 const enum rtx_code code = GET_CODE (x);
5325 const enum machine_mode mode = GET_MODE (x);
5327 switch (GET_RTX_CLASS (code))
5329 case RTX_UNARY:
5330 return simplify_unary_operation (code, mode,
5331 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5332 case RTX_COMM_ARITH:
5333 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5334 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5336 /* Fall through.... */
5338 case RTX_BIN_ARITH:
5339 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5341 case RTX_TERNARY:
5342 case RTX_BITFIELD_OPS:
5343 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5344 XEXP (x, 0), XEXP (x, 1),
5345 XEXP (x, 2));
5347 case RTX_COMPARE:
5348 case RTX_COMM_COMPARE:
5349 return simplify_relational_operation (code, mode,
5350 ((GET_MODE (XEXP (x, 0))
5351 != VOIDmode)
5352 ? GET_MODE (XEXP (x, 0))
5353 : GET_MODE (XEXP (x, 1))),
5354 XEXP (x, 0),
5355 XEXP (x, 1));
5357 case RTX_EXTRA:
5358 if (code == SUBREG)
5359 return simplify_subreg (mode, SUBREG_REG (x),
5360 GET_MODE (SUBREG_REG (x)),
5361 SUBREG_BYTE (x));
5362 break;
5364 case RTX_OBJ:
5365 if (code == LO_SUM)
5367 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5368 if (GET_CODE (XEXP (x, 0)) == HIGH
5369 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5370 return XEXP (x, 1);
5372 break;
5374 default:
5375 break;
5377 return NULL;