* config/bfin/bfin.h (TARGET_CPU_CPP_BUILTINS): Define
[official-gcc/alias-decl.git] / gcc / simplify-rtx.c
blobd9cf023c7c2ed5d4d2676a395411987544ff62c1
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, const_rtx);
53 static bool plus_minus_operand_p (const_rtx);
54 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
57 unsigned int);
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
59 rtx, rtx);
60 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
61 enum machine_mode, rtx, rtx);
62 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
63 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
64 rtx, rtx, rtx, rtx);
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
68 static rtx
69 neg_const_int (enum machine_mode mode, const_rtx i)
71 return gen_int_mode (- INTVAL (i), mode);
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
77 bool
78 mode_signbit_p (enum machine_mode mode, const_rtx x)
80 unsigned HOST_WIDE_INT val;
81 unsigned int width;
83 if (GET_MODE_CLASS (mode) != MODE_INT)
84 return false;
86 width = GET_MODE_BITSIZE (mode);
87 if (width == 0)
88 return false;
90 if (width <= HOST_BITS_PER_WIDE_INT
91 && GET_CODE (x) == CONST_INT)
92 val = INTVAL (x);
93 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x) == 0)
97 val = CONST_DOUBLE_HIGH (x);
98 width -= HOST_BITS_PER_WIDE_INT;
100 else
101 return false;
103 if (width < HOST_BITS_PER_WIDE_INT)
104 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
105 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
112 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
113 rtx op1)
115 rtx tem;
117 /* If this simplifies, do it. */
118 tem = simplify_binary_operation (code, mode, op0, op1);
119 if (tem)
120 return tem;
122 /* Put complex operands first and constants second if commutative. */
123 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
124 && swap_commutative_operands_p (op0, op1))
125 tem = op0, op0 = op1, op1 = tem;
127 return gen_rtx_fmt_ee (code, mode, op0, op1);
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
133 avoid_constant_pool_reference (rtx x)
135 rtx c, tmp, addr;
136 enum machine_mode cmode;
137 HOST_WIDE_INT offset = 0;
139 switch (GET_CODE (x))
141 case MEM:
142 break;
144 case FLOAT_EXTEND:
145 /* Handle float extensions of constant pool references. */
146 tmp = XEXP (x, 0);
147 c = avoid_constant_pool_reference (tmp);
148 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
150 REAL_VALUE_TYPE d;
152 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
155 return x;
157 default:
158 return x;
161 if (GET_MODE (x) == BLKmode)
162 return x;
164 addr = XEXP (x, 0);
166 /* Call target hook to avoid the effects of -fpic etc.... */
167 addr = targetm.delegitimize_address (addr);
169 /* Split the address into a base and integer offset. */
170 if (GET_CODE (addr) == CONST
171 && GET_CODE (XEXP (addr, 0)) == PLUS
172 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
174 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
175 addr = XEXP (XEXP (addr, 0), 0);
178 if (GET_CODE (addr) == LO_SUM)
179 addr = XEXP (addr, 1);
181 /* If this is a constant pool reference, we can turn it into its
182 constant and hope that simplifications happen. */
183 if (GET_CODE (addr) == SYMBOL_REF
184 && CONSTANT_POOL_ADDRESS_P (addr))
186 c = get_pool_constant (addr);
187 cmode = get_pool_mode (addr);
189 /* If we're accessing the constant in a different mode than it was
190 originally stored, attempt to fix that up via subreg simplifications.
191 If that fails we have no choice but to return the original memory. */
192 if (offset != 0 || cmode != GET_MODE (x))
194 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
195 if (tem && CONSTANT_P (tem))
196 return tem;
198 else
199 return c;
202 return x;
205 /* Make a unary operation by first seeing if it folds and otherwise making
206 the specified operation. */
209 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
210 enum machine_mode op_mode)
212 rtx tem;
214 /* If this simplifies, use it. */
215 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
216 return tem;
218 return gen_rtx_fmt_e (code, mode, op);
221 /* Likewise for ternary operations. */
224 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
225 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
227 rtx tem;
229 /* If this simplifies, use it. */
230 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
231 op0, op1, op2)))
232 return tem;
234 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
237 /* Likewise, for relational operations.
238 CMP_MODE specifies mode comparison is done in. */
241 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
242 enum machine_mode cmp_mode, rtx op0, rtx op1)
244 rtx tem;
246 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
247 op0, op1)))
248 return tem;
250 return gen_rtx_fmt_ee (code, mode, op0, op1);
253 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
254 resulting RTX. Return a new RTX which is as simplified as possible. */
257 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
259 enum rtx_code code = GET_CODE (x);
260 enum machine_mode mode = GET_MODE (x);
261 enum machine_mode op_mode;
262 rtx op0, op1, op2;
264 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
265 to build a new expression substituting recursively. If we can't do
266 anything, return our input. */
268 if (x == old_rtx)
269 return new_rtx;
271 switch (GET_RTX_CLASS (code))
273 case RTX_UNARY:
274 op0 = XEXP (x, 0);
275 op_mode = GET_MODE (op0);
276 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
277 if (op0 == XEXP (x, 0))
278 return x;
279 return simplify_gen_unary (code, mode, op0, op_mode);
281 case RTX_BIN_ARITH:
282 case RTX_COMM_ARITH:
283 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
284 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
285 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
286 return x;
287 return simplify_gen_binary (code, mode, op0, op1);
289 case RTX_COMPARE:
290 case RTX_COMM_COMPARE:
291 op0 = XEXP (x, 0);
292 op1 = XEXP (x, 1);
293 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
294 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
295 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
296 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
297 return x;
298 return simplify_gen_relational (code, mode, op_mode, op0, op1);
300 case RTX_TERNARY:
301 case RTX_BITFIELD_OPS:
302 op0 = XEXP (x, 0);
303 op_mode = GET_MODE (op0);
304 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
305 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
306 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
307 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
308 return x;
309 if (op_mode == VOIDmode)
310 op_mode = GET_MODE (op0);
311 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
313 case RTX_EXTRA:
314 /* The only case we try to handle is a SUBREG. */
315 if (code == SUBREG)
317 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
318 if (op0 == SUBREG_REG (x))
319 return x;
320 op0 = simplify_gen_subreg (GET_MODE (x), op0,
321 GET_MODE (SUBREG_REG (x)),
322 SUBREG_BYTE (x));
323 return op0 ? op0 : x;
325 break;
327 case RTX_OBJ:
328 if (code == MEM)
330 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
331 if (op0 == XEXP (x, 0))
332 return x;
333 return replace_equiv_address_nv (x, op0);
335 else if (code == LO_SUM)
337 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
338 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
340 /* (lo_sum (high x) x) -> x */
341 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
342 return op1;
344 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
345 return x;
346 return gen_rtx_LO_SUM (mode, op0, op1);
348 else if (code == REG)
350 if (rtx_equal_p (x, old_rtx))
351 return new_rtx;
353 break;
355 default:
356 break;
358 return x;
361 /* Try to simplify a unary operation CODE whose output mode is to be
362 MODE with input operand OP whose mode was originally OP_MODE.
363 Return zero if no simplification can be made. */
365 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
366 rtx op, enum machine_mode op_mode)
368 rtx trueop, tem;
370 if (GET_CODE (op) == CONST)
371 op = XEXP (op, 0);
373 trueop = avoid_constant_pool_reference (op);
375 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
376 if (tem)
377 return tem;
379 return simplify_unary_operation_1 (code, mode, op);
382 /* Perform some simplifications we can do even if the operands
383 aren't constant. */
384 static rtx
385 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
387 enum rtx_code reversed;
388 rtx temp;
390 switch (code)
392 case NOT:
393 /* (not (not X)) == X. */
394 if (GET_CODE (op) == NOT)
395 return XEXP (op, 0);
397 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
398 comparison is all ones. */
399 if (COMPARISON_P (op)
400 && (mode == BImode || STORE_FLAG_VALUE == -1)
401 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
402 return simplify_gen_relational (reversed, mode, VOIDmode,
403 XEXP (op, 0), XEXP (op, 1));
405 /* (not (plus X -1)) can become (neg X). */
406 if (GET_CODE (op) == PLUS
407 && XEXP (op, 1) == constm1_rtx)
408 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
410 /* Similarly, (not (neg X)) is (plus X -1). */
411 if (GET_CODE (op) == NEG)
412 return plus_constant (XEXP (op, 0), -1);
414 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
415 if (GET_CODE (op) == XOR
416 && GET_CODE (XEXP (op, 1)) == CONST_INT
417 && (temp = simplify_unary_operation (NOT, mode,
418 XEXP (op, 1), mode)) != 0)
419 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
421 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
422 if (GET_CODE (op) == PLUS
423 && GET_CODE (XEXP (op, 1)) == CONST_INT
424 && mode_signbit_p (mode, XEXP (op, 1))
425 && (temp = simplify_unary_operation (NOT, mode,
426 XEXP (op, 1), mode)) != 0)
427 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
430 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
431 operands other than 1, but that is not valid. We could do a
432 similar simplification for (not (lshiftrt C X)) where C is
433 just the sign bit, but this doesn't seem common enough to
434 bother with. */
435 if (GET_CODE (op) == ASHIFT
436 && XEXP (op, 0) == const1_rtx)
438 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
439 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
442 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
443 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
444 so we can perform the above simplification. */
446 if (STORE_FLAG_VALUE == -1
447 && GET_CODE (op) == ASHIFTRT
448 && GET_CODE (XEXP (op, 1)) == CONST_INT
449 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
450 return simplify_gen_relational (GE, mode, VOIDmode,
451 XEXP (op, 0), const0_rtx);
454 if (GET_CODE (op) == SUBREG
455 && subreg_lowpart_p (op)
456 && (GET_MODE_SIZE (GET_MODE (op))
457 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
458 && GET_CODE (SUBREG_REG (op)) == ASHIFT
459 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
461 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
462 rtx x;
464 x = gen_rtx_ROTATE (inner_mode,
465 simplify_gen_unary (NOT, inner_mode, const1_rtx,
466 inner_mode),
467 XEXP (SUBREG_REG (op), 1));
468 return rtl_hooks.gen_lowpart_no_emit (mode, x);
471 /* Apply De Morgan's laws to reduce number of patterns for machines
472 with negating logical insns (and-not, nand, etc.). If result has
473 only one NOT, put it first, since that is how the patterns are
474 coded. */
476 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
478 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
479 enum machine_mode op_mode;
481 op_mode = GET_MODE (in1);
482 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
484 op_mode = GET_MODE (in2);
485 if (op_mode == VOIDmode)
486 op_mode = mode;
487 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
489 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
491 rtx tem = in2;
492 in2 = in1; in1 = tem;
495 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
496 mode, in1, in2);
498 break;
500 case NEG:
501 /* (neg (neg X)) == X. */
502 if (GET_CODE (op) == NEG)
503 return XEXP (op, 0);
505 /* (neg (plus X 1)) can become (not X). */
506 if (GET_CODE (op) == PLUS
507 && XEXP (op, 1) == const1_rtx)
508 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
510 /* Similarly, (neg (not X)) is (plus X 1). */
511 if (GET_CODE (op) == NOT)
512 return plus_constant (XEXP (op, 0), 1);
514 /* (neg (minus X Y)) can become (minus Y X). This transformation
515 isn't safe for modes with signed zeros, since if X and Y are
516 both +0, (minus Y X) is the same as (minus X Y). If the
517 rounding mode is towards +infinity (or -infinity) then the two
518 expressions will be rounded differently. */
519 if (GET_CODE (op) == MINUS
520 && !HONOR_SIGNED_ZEROS (mode)
521 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
522 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
524 if (GET_CODE (op) == PLUS
525 && !HONOR_SIGNED_ZEROS (mode)
526 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
528 /* (neg (plus A C)) is simplified to (minus -C A). */
529 if (GET_CODE (XEXP (op, 1)) == CONST_INT
530 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
532 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
533 if (temp)
534 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
537 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
538 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
539 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
542 /* (neg (mult A B)) becomes (mult (neg A) B).
543 This works even for floating-point values. */
544 if (GET_CODE (op) == MULT
545 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
547 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
548 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
551 /* NEG commutes with ASHIFT since it is multiplication. Only do
552 this if we can then eliminate the NEG (e.g., if the operand
553 is a constant). */
554 if (GET_CODE (op) == ASHIFT)
556 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
557 if (temp)
558 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
561 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
562 C is equal to the width of MODE minus 1. */
563 if (GET_CODE (op) == ASHIFTRT
564 && GET_CODE (XEXP (op, 1)) == CONST_INT
565 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
566 return simplify_gen_binary (LSHIFTRT, mode,
567 XEXP (op, 0), XEXP (op, 1));
569 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
570 C is equal to the width of MODE minus 1. */
571 if (GET_CODE (op) == LSHIFTRT
572 && GET_CODE (XEXP (op, 1)) == CONST_INT
573 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
574 return simplify_gen_binary (ASHIFTRT, mode,
575 XEXP (op, 0), XEXP (op, 1));
577 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
578 if (GET_CODE (op) == XOR
579 && XEXP (op, 1) == const1_rtx
580 && nonzero_bits (XEXP (op, 0), mode) == 1)
581 return plus_constant (XEXP (op, 0), -1);
583 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
584 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
585 if (GET_CODE (op) == LT
586 && XEXP (op, 1) == const0_rtx)
588 enum machine_mode inner = GET_MODE (XEXP (op, 0));
589 int isize = GET_MODE_BITSIZE (inner);
590 if (STORE_FLAG_VALUE == 1)
592 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
593 GEN_INT (isize - 1));
594 if (mode == inner)
595 return temp;
596 if (GET_MODE_BITSIZE (mode) > isize)
597 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
598 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
600 else if (STORE_FLAG_VALUE == -1)
602 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
603 GEN_INT (isize - 1));
604 if (mode == inner)
605 return temp;
606 if (GET_MODE_BITSIZE (mode) > isize)
607 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
608 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
611 break;
613 case TRUNCATE:
614 /* We can't handle truncation to a partial integer mode here
615 because we don't know the real bitsize of the partial
616 integer mode. */
617 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
618 break;
620 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
621 if ((GET_CODE (op) == SIGN_EXTEND
622 || GET_CODE (op) == ZERO_EXTEND)
623 && GET_MODE (XEXP (op, 0)) == mode)
624 return XEXP (op, 0);
626 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
627 (OP:SI foo:SI) if OP is NEG or ABS. */
628 if ((GET_CODE (op) == ABS
629 || GET_CODE (op) == NEG)
630 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
631 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
632 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
633 return simplify_gen_unary (GET_CODE (op), mode,
634 XEXP (XEXP (op, 0), 0), mode);
636 /* (truncate:A (subreg:B (truncate:C X) 0)) is
637 (truncate:A X). */
638 if (GET_CODE (op) == SUBREG
639 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
640 && subreg_lowpart_p (op))
641 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
642 GET_MODE (XEXP (SUBREG_REG (op), 0)));
644 /* If we know that the value is already truncated, we can
645 replace the TRUNCATE with a SUBREG. Note that this is also
646 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
647 modes we just have to apply a different definition for
648 truncation. But don't do this for an (LSHIFTRT (MULT ...))
649 since this will cause problems with the umulXi3_highpart
650 patterns. */
651 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
652 GET_MODE_BITSIZE (GET_MODE (op)))
653 ? (num_sign_bit_copies (op, GET_MODE (op))
654 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
655 - GET_MODE_BITSIZE (mode)))
656 : truncated_to_mode (mode, op))
657 && ! (GET_CODE (op) == LSHIFTRT
658 && GET_CODE (XEXP (op, 0)) == MULT))
659 return rtl_hooks.gen_lowpart_no_emit (mode, op);
661 /* A truncate of a comparison can be replaced with a subreg if
662 STORE_FLAG_VALUE permits. This is like the previous test,
663 but it works even if the comparison is done in a mode larger
664 than HOST_BITS_PER_WIDE_INT. */
665 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
666 && COMPARISON_P (op)
667 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
668 return rtl_hooks.gen_lowpart_no_emit (mode, op);
669 break;
671 case FLOAT_TRUNCATE:
672 if (DECIMAL_FLOAT_MODE_P (mode))
673 break;
675 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
676 if (GET_CODE (op) == FLOAT_EXTEND
677 && GET_MODE (XEXP (op, 0)) == mode)
678 return XEXP (op, 0);
680 /* (float_truncate:SF (float_truncate:DF foo:XF))
681 = (float_truncate:SF foo:XF).
682 This may eliminate double rounding, so it is unsafe.
684 (float_truncate:SF (float_extend:XF foo:DF))
685 = (float_truncate:SF foo:DF).
687 (float_truncate:DF (float_extend:XF foo:SF))
688 = (float_extend:SF foo:DF). */
689 if ((GET_CODE (op) == FLOAT_TRUNCATE
690 && flag_unsafe_math_optimizations)
691 || GET_CODE (op) == FLOAT_EXTEND)
692 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
693 0)))
694 > GET_MODE_SIZE (mode)
695 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
696 mode,
697 XEXP (op, 0), mode);
699 /* (float_truncate (float x)) is (float x) */
700 if (GET_CODE (op) == FLOAT
701 && (flag_unsafe_math_optimizations
702 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
703 && ((unsigned)significand_size (GET_MODE (op))
704 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
705 - num_sign_bit_copies (XEXP (op, 0),
706 GET_MODE (XEXP (op, 0))))))))
707 return simplify_gen_unary (FLOAT, mode,
708 XEXP (op, 0),
709 GET_MODE (XEXP (op, 0)));
711 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
712 (OP:SF foo:SF) if OP is NEG or ABS. */
713 if ((GET_CODE (op) == ABS
714 || GET_CODE (op) == NEG)
715 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
716 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
717 return simplify_gen_unary (GET_CODE (op), mode,
718 XEXP (XEXP (op, 0), 0), mode);
720 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
721 is (float_truncate:SF x). */
722 if (GET_CODE (op) == SUBREG
723 && subreg_lowpart_p (op)
724 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
725 return SUBREG_REG (op);
726 break;
728 case FLOAT_EXTEND:
729 if (DECIMAL_FLOAT_MODE_P (mode))
730 break;
732 /* (float_extend (float_extend x)) is (float_extend x)
734 (float_extend (float x)) is (float x) assuming that double
735 rounding can't happen.
737 if (GET_CODE (op) == FLOAT_EXTEND
738 || (GET_CODE (op) == FLOAT
739 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
740 && ((unsigned)significand_size (GET_MODE (op))
741 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
742 - num_sign_bit_copies (XEXP (op, 0),
743 GET_MODE (XEXP (op, 0)))))))
744 return simplify_gen_unary (GET_CODE (op), mode,
745 XEXP (op, 0),
746 GET_MODE (XEXP (op, 0)));
748 break;
750 case ABS:
751 /* (abs (neg <foo>)) -> (abs <foo>) */
752 if (GET_CODE (op) == NEG)
753 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
754 GET_MODE (XEXP (op, 0)));
756 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
757 do nothing. */
758 if (GET_MODE (op) == VOIDmode)
759 break;
761 /* If operand is something known to be positive, ignore the ABS. */
762 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
763 || ((GET_MODE_BITSIZE (GET_MODE (op))
764 <= HOST_BITS_PER_WIDE_INT)
765 && ((nonzero_bits (op, GET_MODE (op))
766 & ((HOST_WIDE_INT) 1
767 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
768 == 0)))
769 return op;
771 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
772 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
773 return gen_rtx_NEG (mode, op);
775 break;
777 case FFS:
778 /* (ffs (*_extend <X>)) = (ffs <X>) */
779 if (GET_CODE (op) == SIGN_EXTEND
780 || GET_CODE (op) == ZERO_EXTEND)
781 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
782 GET_MODE (XEXP (op, 0)));
783 break;
785 case POPCOUNT:
786 switch (GET_CODE (op))
788 case BSWAP:
789 case ZERO_EXTEND:
790 /* (popcount (zero_extend <X>)) = (popcount <X>) */
791 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
792 GET_MODE (XEXP (op, 0)));
794 case ROTATE:
795 case ROTATERT:
796 /* Rotations don't affect popcount. */
797 if (!side_effects_p (XEXP (op, 1)))
798 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
799 GET_MODE (XEXP (op, 0)));
800 break;
802 default:
803 break;
805 break;
807 case PARITY:
808 switch (GET_CODE (op))
810 case NOT:
811 case BSWAP:
812 case ZERO_EXTEND:
813 case SIGN_EXTEND:
814 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
815 GET_MODE (XEXP (op, 0)));
817 case ROTATE:
818 case ROTATERT:
819 /* Rotations don't affect parity. */
820 if (!side_effects_p (XEXP (op, 1)))
821 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
822 GET_MODE (XEXP (op, 0)));
823 break;
825 default:
826 break;
828 break;
830 case BSWAP:
831 /* (bswap (bswap x)) -> x. */
832 if (GET_CODE (op) == BSWAP)
833 return XEXP (op, 0);
834 break;
836 case FLOAT:
837 /* (float (sign_extend <X>)) = (float <X>). */
838 if (GET_CODE (op) == SIGN_EXTEND)
839 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
840 GET_MODE (XEXP (op, 0)));
841 break;
843 case SIGN_EXTEND:
844 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
845 becomes just the MINUS if its mode is MODE. This allows
846 folding switch statements on machines using casesi (such as
847 the VAX). */
848 if (GET_CODE (op) == TRUNCATE
849 && GET_MODE (XEXP (op, 0)) == mode
850 && GET_CODE (XEXP (op, 0)) == MINUS
851 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
852 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
853 return XEXP (op, 0);
855 /* Check for a sign extension of a subreg of a promoted
856 variable, where the promotion is sign-extended, and the
857 target mode is the same as the variable's promotion. */
858 if (GET_CODE (op) == SUBREG
859 && SUBREG_PROMOTED_VAR_P (op)
860 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
861 && GET_MODE (XEXP (op, 0)) == mode)
862 return XEXP (op, 0);
864 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
865 if (! POINTERS_EXTEND_UNSIGNED
866 && mode == Pmode && GET_MODE (op) == ptr_mode
867 && (CONSTANT_P (op)
868 || (GET_CODE (op) == SUBREG
869 && REG_P (SUBREG_REG (op))
870 && REG_POINTER (SUBREG_REG (op))
871 && GET_MODE (SUBREG_REG (op)) == Pmode)))
872 return convert_memory_address (Pmode, op);
873 #endif
874 break;
876 case ZERO_EXTEND:
877 /* Check for a zero extension of a subreg of a promoted
878 variable, where the promotion is zero-extended, and the
879 target mode is the same as the variable's promotion. */
880 if (GET_CODE (op) == SUBREG
881 && SUBREG_PROMOTED_VAR_P (op)
882 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
883 && GET_MODE (XEXP (op, 0)) == mode)
884 return XEXP (op, 0);
886 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
887 if (POINTERS_EXTEND_UNSIGNED > 0
888 && mode == Pmode && GET_MODE (op) == ptr_mode
889 && (CONSTANT_P (op)
890 || (GET_CODE (op) == SUBREG
891 && REG_P (SUBREG_REG (op))
892 && REG_POINTER (SUBREG_REG (op))
893 && GET_MODE (SUBREG_REG (op)) == Pmode)))
894 return convert_memory_address (Pmode, op);
895 #endif
896 break;
898 default:
899 break;
902 return 0;
905 /* Try to compute the value of a unary operation CODE whose output mode is to
906 be MODE with input operand OP whose mode was originally OP_MODE.
907 Return zero if the value cannot be computed. */
909 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
910 rtx op, enum machine_mode op_mode)
912 unsigned int width = GET_MODE_BITSIZE (mode);
914 if (code == VEC_DUPLICATE)
916 gcc_assert (VECTOR_MODE_P (mode));
917 if (GET_MODE (op) != VOIDmode)
919 if (!VECTOR_MODE_P (GET_MODE (op)))
920 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
921 else
922 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
923 (GET_MODE (op)));
925 if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
926 || GET_CODE (op) == CONST_VECTOR)
928 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
929 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
930 rtvec v = rtvec_alloc (n_elts);
931 unsigned int i;
933 if (GET_CODE (op) != CONST_VECTOR)
934 for (i = 0; i < n_elts; i++)
935 RTVEC_ELT (v, i) = op;
936 else
938 enum machine_mode inmode = GET_MODE (op);
939 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
940 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
942 gcc_assert (in_n_elts < n_elts);
943 gcc_assert ((n_elts % in_n_elts) == 0);
944 for (i = 0; i < n_elts; i++)
945 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
947 return gen_rtx_CONST_VECTOR (mode, v);
951 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
953 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
954 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
955 enum machine_mode opmode = GET_MODE (op);
956 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
957 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
958 rtvec v = rtvec_alloc (n_elts);
959 unsigned int i;
961 gcc_assert (op_n_elts == n_elts);
962 for (i = 0; i < n_elts; i++)
964 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
965 CONST_VECTOR_ELT (op, i),
966 GET_MODE_INNER (opmode));
967 if (!x)
968 return 0;
969 RTVEC_ELT (v, i) = x;
971 return gen_rtx_CONST_VECTOR (mode, v);
974 /* The order of these tests is critical so that, for example, we don't
975 check the wrong mode (input vs. output) for a conversion operation,
976 such as FIX. At some point, this should be simplified. */
978 if (code == FLOAT && GET_MODE (op) == VOIDmode
979 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
981 HOST_WIDE_INT hv, lv;
982 REAL_VALUE_TYPE d;
984 if (GET_CODE (op) == CONST_INT)
985 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
986 else
987 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
989 REAL_VALUE_FROM_INT (d, lv, hv, mode);
990 d = real_value_truncate (mode, d);
991 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
993 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
994 && (GET_CODE (op) == CONST_DOUBLE
995 || GET_CODE (op) == CONST_INT))
997 HOST_WIDE_INT hv, lv;
998 REAL_VALUE_TYPE d;
1000 if (GET_CODE (op) == CONST_INT)
1001 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1002 else
1003 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1005 if (op_mode == VOIDmode)
1007 /* We don't know how to interpret negative-looking numbers in
1008 this case, so don't try to fold those. */
1009 if (hv < 0)
1010 return 0;
1012 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
1014 else
1015 hv = 0, lv &= GET_MODE_MASK (op_mode);
1017 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1018 d = real_value_truncate (mode, d);
1019 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1022 if (GET_CODE (op) == CONST_INT
1023 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1025 HOST_WIDE_INT arg0 = INTVAL (op);
1026 HOST_WIDE_INT val;
1028 switch (code)
1030 case NOT:
1031 val = ~ arg0;
1032 break;
1034 case NEG:
1035 val = - arg0;
1036 break;
1038 case ABS:
1039 val = (arg0 >= 0 ? arg0 : - arg0);
1040 break;
1042 case FFS:
1043 /* Don't use ffs here. Instead, get low order bit and then its
1044 number. If arg0 is zero, this will return 0, as desired. */
1045 arg0 &= GET_MODE_MASK (mode);
1046 val = exact_log2 (arg0 & (- arg0)) + 1;
1047 break;
1049 case CLZ:
1050 arg0 &= GET_MODE_MASK (mode);
1051 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1053 else
1054 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1055 break;
1057 case CTZ:
1058 arg0 &= GET_MODE_MASK (mode);
1059 if (arg0 == 0)
1061 /* Even if the value at zero is undefined, we have to come
1062 up with some replacement. Seems good enough. */
1063 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1064 val = GET_MODE_BITSIZE (mode);
1066 else
1067 val = exact_log2 (arg0 & -arg0);
1068 break;
1070 case POPCOUNT:
1071 arg0 &= GET_MODE_MASK (mode);
1072 val = 0;
1073 while (arg0)
1074 val++, arg0 &= arg0 - 1;
1075 break;
1077 case PARITY:
1078 arg0 &= GET_MODE_MASK (mode);
1079 val = 0;
1080 while (arg0)
1081 val++, arg0 &= arg0 - 1;
1082 val &= 1;
1083 break;
1085 case BSWAP:
1087 unsigned int s;
1089 val = 0;
1090 for (s = 0; s < width; s += 8)
1092 unsigned int d = width - s - 8;
1093 unsigned HOST_WIDE_INT byte;
1094 byte = (arg0 >> s) & 0xff;
1095 val |= byte << d;
1098 break;
1100 case TRUNCATE:
1101 val = arg0;
1102 break;
1104 case ZERO_EXTEND:
1105 /* When zero-extending a CONST_INT, we need to know its
1106 original mode. */
1107 gcc_assert (op_mode != VOIDmode);
1108 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1110 /* If we were really extending the mode,
1111 we would have to distinguish between zero-extension
1112 and sign-extension. */
1113 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1114 val = arg0;
1116 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1117 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1118 else
1119 return 0;
1120 break;
1122 case SIGN_EXTEND:
1123 if (op_mode == VOIDmode)
1124 op_mode = mode;
1125 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1127 /* If we were really extending the mode,
1128 we would have to distinguish between zero-extension
1129 and sign-extension. */
1130 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1131 val = arg0;
1133 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1136 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1137 if (val
1138 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1139 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1141 else
1142 return 0;
1143 break;
1145 case SQRT:
1146 case FLOAT_EXTEND:
1147 case FLOAT_TRUNCATE:
1148 case SS_TRUNCATE:
1149 case US_TRUNCATE:
1150 case SS_NEG:
1151 return 0;
1153 default:
1154 gcc_unreachable ();
1157 return gen_int_mode (val, mode);
1160 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1161 for a DImode operation on a CONST_INT. */
1162 else if (GET_MODE (op) == VOIDmode
1163 && width <= HOST_BITS_PER_WIDE_INT * 2
1164 && (GET_CODE (op) == CONST_DOUBLE
1165 || GET_CODE (op) == CONST_INT))
1167 unsigned HOST_WIDE_INT l1, lv;
1168 HOST_WIDE_INT h1, hv;
1170 if (GET_CODE (op) == CONST_DOUBLE)
1171 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1172 else
1173 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1175 switch (code)
1177 case NOT:
1178 lv = ~ l1;
1179 hv = ~ h1;
1180 break;
1182 case NEG:
1183 neg_double (l1, h1, &lv, &hv);
1184 break;
1186 case ABS:
1187 if (h1 < 0)
1188 neg_double (l1, h1, &lv, &hv);
1189 else
1190 lv = l1, hv = h1;
1191 break;
1193 case FFS:
1194 hv = 0;
1195 if (l1 == 0)
1197 if (h1 == 0)
1198 lv = 0;
1199 else
1200 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1202 else
1203 lv = exact_log2 (l1 & -l1) + 1;
1204 break;
1206 case CLZ:
1207 hv = 0;
1208 if (h1 != 0)
1209 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1210 - HOST_BITS_PER_WIDE_INT;
1211 else if (l1 != 0)
1212 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1213 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1214 lv = GET_MODE_BITSIZE (mode);
1215 break;
1217 case CTZ:
1218 hv = 0;
1219 if (l1 != 0)
1220 lv = exact_log2 (l1 & -l1);
1221 else if (h1 != 0)
1222 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1223 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1224 lv = GET_MODE_BITSIZE (mode);
1225 break;
1227 case POPCOUNT:
1228 hv = 0;
1229 lv = 0;
1230 while (l1)
1231 lv++, l1 &= l1 - 1;
1232 while (h1)
1233 lv++, h1 &= h1 - 1;
1234 break;
1236 case PARITY:
1237 hv = 0;
1238 lv = 0;
1239 while (l1)
1240 lv++, l1 &= l1 - 1;
1241 while (h1)
1242 lv++, h1 &= h1 - 1;
1243 lv &= 1;
1244 break;
1246 case BSWAP:
1248 unsigned int s;
1250 hv = 0;
1251 lv = 0;
1252 for (s = 0; s < width; s += 8)
1254 unsigned int d = width - s - 8;
1255 unsigned HOST_WIDE_INT byte;
1257 if (s < HOST_BITS_PER_WIDE_INT)
1258 byte = (l1 >> s) & 0xff;
1259 else
1260 byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1262 if (d < HOST_BITS_PER_WIDE_INT)
1263 lv |= byte << d;
1264 else
1265 hv |= byte << (d - HOST_BITS_PER_WIDE_INT);
1268 break;
1270 case TRUNCATE:
1271 /* This is just a change-of-mode, so do nothing. */
1272 lv = l1, hv = h1;
1273 break;
1275 case ZERO_EXTEND:
1276 gcc_assert (op_mode != VOIDmode);
1278 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1279 return 0;
1281 hv = 0;
1282 lv = l1 & GET_MODE_MASK (op_mode);
1283 break;
1285 case SIGN_EXTEND:
1286 if (op_mode == VOIDmode
1287 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1288 return 0;
1289 else
1291 lv = l1 & GET_MODE_MASK (op_mode);
1292 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1293 && (lv & ((HOST_WIDE_INT) 1
1294 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1295 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1297 hv = HWI_SIGN_EXTEND (lv);
1299 break;
1301 case SQRT:
1302 return 0;
1304 default:
1305 return 0;
1308 return immed_double_const (lv, hv, mode);
1311 else if (GET_CODE (op) == CONST_DOUBLE
1312 && SCALAR_FLOAT_MODE_P (mode))
1314 REAL_VALUE_TYPE d, t;
1315 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1317 switch (code)
1319 case SQRT:
1320 if (HONOR_SNANS (mode) && real_isnan (&d))
1321 return 0;
1322 real_sqrt (&t, mode, &d);
1323 d = t;
1324 break;
1325 case ABS:
1326 d = REAL_VALUE_ABS (d);
1327 break;
1328 case NEG:
1329 d = REAL_VALUE_NEGATE (d);
1330 break;
1331 case FLOAT_TRUNCATE:
1332 d = real_value_truncate (mode, d);
1333 break;
1334 case FLOAT_EXTEND:
1335 /* All this does is change the mode. */
1336 break;
1337 case FIX:
1338 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1339 break;
1340 case NOT:
1342 long tmp[4];
1343 int i;
1345 real_to_target (tmp, &d, GET_MODE (op));
1346 for (i = 0; i < 4; i++)
1347 tmp[i] = ~tmp[i];
1348 real_from_target (&d, tmp, mode);
1349 break;
1351 default:
1352 gcc_unreachable ();
1354 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1357 else if (GET_CODE (op) == CONST_DOUBLE
1358 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1359 && GET_MODE_CLASS (mode) == MODE_INT
1360 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1362 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1363 operators are intentionally left unspecified (to ease implementation
1364 by target backends), for consistency, this routine implements the
1365 same semantics for constant folding as used by the middle-end. */
1367 /* This was formerly used only for non-IEEE float.
1368 eggert@twinsun.com says it is safe for IEEE also. */
1369 HOST_WIDE_INT xh, xl, th, tl;
1370 REAL_VALUE_TYPE x, t;
1371 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1372 switch (code)
1374 case FIX:
1375 if (REAL_VALUE_ISNAN (x))
1376 return const0_rtx;
1378 /* Test against the signed upper bound. */
1379 if (width > HOST_BITS_PER_WIDE_INT)
1381 th = ((unsigned HOST_WIDE_INT) 1
1382 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1383 tl = -1;
1385 else
1387 th = 0;
1388 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1390 real_from_integer (&t, VOIDmode, tl, th, 0);
1391 if (REAL_VALUES_LESS (t, x))
1393 xh = th;
1394 xl = tl;
1395 break;
1398 /* Test against the signed lower bound. */
1399 if (width > HOST_BITS_PER_WIDE_INT)
1401 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1402 tl = 0;
1404 else
1406 th = -1;
1407 tl = (HOST_WIDE_INT) -1 << (width - 1);
1409 real_from_integer (&t, VOIDmode, tl, th, 0);
1410 if (REAL_VALUES_LESS (x, t))
1412 xh = th;
1413 xl = tl;
1414 break;
1416 REAL_VALUE_TO_INT (&xl, &xh, x);
1417 break;
1419 case UNSIGNED_FIX:
1420 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1421 return const0_rtx;
1423 /* Test against the unsigned upper bound. */
1424 if (width == 2*HOST_BITS_PER_WIDE_INT)
1426 th = -1;
1427 tl = -1;
1429 else if (width >= HOST_BITS_PER_WIDE_INT)
1431 th = ((unsigned HOST_WIDE_INT) 1
1432 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1433 tl = -1;
1435 else
1437 th = 0;
1438 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1440 real_from_integer (&t, VOIDmode, tl, th, 1);
1441 if (REAL_VALUES_LESS (t, x))
1443 xh = th;
1444 xl = tl;
1445 break;
1448 REAL_VALUE_TO_INT (&xl, &xh, x);
1449 break;
1451 default:
1452 gcc_unreachable ();
1454 return immed_double_const (xl, xh, mode);
1457 return NULL_RTX;
1460 /* Subroutine of simplify_binary_operation to simplify a commutative,
1461 associative binary operation CODE with result mode MODE, operating
1462 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1463 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1464 canonicalization is possible. */
1466 static rtx
1467 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1468 rtx op0, rtx op1)
1470 rtx tem;
1472 /* Linearize the operator to the left. */
1473 if (GET_CODE (op1) == code)
1475 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1476 if (GET_CODE (op0) == code)
1478 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1479 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1482 /* "a op (b op c)" becomes "(b op c) op a". */
1483 if (! swap_commutative_operands_p (op1, op0))
1484 return simplify_gen_binary (code, mode, op1, op0);
1486 tem = op0;
1487 op0 = op1;
1488 op1 = tem;
1491 if (GET_CODE (op0) == code)
1493 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1494 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1496 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1497 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1500 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1501 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1502 if (tem != 0)
1503 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1505 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1506 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1507 if (tem != 0)
1508 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1511 return 0;
1515 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1516 and OP1. Return 0 if no simplification is possible.
1518 Don't use this for relational operations such as EQ or LT.
1519 Use simplify_relational_operation instead. */
1521 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1522 rtx op0, rtx op1)
1524 rtx trueop0, trueop1;
1525 rtx tem;
1527 /* Relational operations don't work here. We must know the mode
1528 of the operands in order to do the comparison correctly.
1529 Assuming a full word can give incorrect results.
1530 Consider comparing 128 with -128 in QImode. */
1531 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1532 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1534 /* Make sure the constant is second. */
1535 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1536 && swap_commutative_operands_p (op0, op1))
1538 tem = op0, op0 = op1, op1 = tem;
1541 trueop0 = avoid_constant_pool_reference (op0);
1542 trueop1 = avoid_constant_pool_reference (op1);
1544 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1545 if (tem)
1546 return tem;
1547 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1550 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1551 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1552 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1553 actual constants. */
1555 static rtx
1556 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1557 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1559 rtx tem, reversed, opleft, opright;
1560 HOST_WIDE_INT val;
1561 unsigned int width = GET_MODE_BITSIZE (mode);
1563 /* Even if we can't compute a constant result,
1564 there are some cases worth simplifying. */
1566 switch (code)
1568 case PLUS:
1569 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1570 when x is NaN, infinite, or finite and nonzero. They aren't
1571 when x is -0 and the rounding mode is not towards -infinity,
1572 since (-0) + 0 is then 0. */
1573 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1574 return op0;
1576 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1577 transformations are safe even for IEEE. */
1578 if (GET_CODE (op0) == NEG)
1579 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1580 else if (GET_CODE (op1) == NEG)
1581 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1583 /* (~a) + 1 -> -a */
1584 if (INTEGRAL_MODE_P (mode)
1585 && GET_CODE (op0) == NOT
1586 && trueop1 == const1_rtx)
1587 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1589 /* Handle both-operands-constant cases. We can only add
1590 CONST_INTs to constants since the sum of relocatable symbols
1591 can't be handled by most assemblers. Don't add CONST_INT
1592 to CONST_INT since overflow won't be computed properly if wider
1593 than HOST_BITS_PER_WIDE_INT. */
1595 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1596 && GET_CODE (op1) == CONST_INT)
1597 return plus_constant (op0, INTVAL (op1));
1598 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1599 && GET_CODE (op0) == CONST_INT)
1600 return plus_constant (op1, INTVAL (op0));
1602 /* See if this is something like X * C - X or vice versa or
1603 if the multiplication is written as a shift. If so, we can
1604 distribute and make a new multiply, shift, or maybe just
1605 have X (if C is 2 in the example above). But don't make
1606 something more expensive than we had before. */
1608 if (SCALAR_INT_MODE_P (mode))
1610 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1611 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1612 rtx lhs = op0, rhs = op1;
1614 if (GET_CODE (lhs) == NEG)
1616 coeff0l = -1;
1617 coeff0h = -1;
1618 lhs = XEXP (lhs, 0);
1620 else if (GET_CODE (lhs) == MULT
1621 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1623 coeff0l = INTVAL (XEXP (lhs, 1));
1624 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1625 lhs = XEXP (lhs, 0);
1627 else if (GET_CODE (lhs) == ASHIFT
1628 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1629 && INTVAL (XEXP (lhs, 1)) >= 0
1630 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1632 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1633 coeff0h = 0;
1634 lhs = XEXP (lhs, 0);
1637 if (GET_CODE (rhs) == NEG)
1639 coeff1l = -1;
1640 coeff1h = -1;
1641 rhs = XEXP (rhs, 0);
1643 else if (GET_CODE (rhs) == MULT
1644 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1646 coeff1l = INTVAL (XEXP (rhs, 1));
1647 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1648 rhs = XEXP (rhs, 0);
1650 else if (GET_CODE (rhs) == ASHIFT
1651 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1652 && INTVAL (XEXP (rhs, 1)) >= 0
1653 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1655 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1656 coeff1h = 0;
1657 rhs = XEXP (rhs, 0);
1660 if (rtx_equal_p (lhs, rhs))
1662 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1663 rtx coeff;
1664 unsigned HOST_WIDE_INT l;
1665 HOST_WIDE_INT h;
1667 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1668 coeff = immed_double_const (l, h, mode);
1670 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1671 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1672 ? tem : 0;
1676 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1677 if ((GET_CODE (op1) == CONST_INT
1678 || GET_CODE (op1) == CONST_DOUBLE)
1679 && GET_CODE (op0) == XOR
1680 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1681 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1682 && mode_signbit_p (mode, op1))
1683 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1684 simplify_gen_binary (XOR, mode, op1,
1685 XEXP (op0, 1)));
1687 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1688 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1689 && GET_CODE (op0) == MULT
1690 && GET_CODE (XEXP (op0, 0)) == NEG)
1692 rtx in1, in2;
1694 in1 = XEXP (XEXP (op0, 0), 0);
1695 in2 = XEXP (op0, 1);
1696 return simplify_gen_binary (MINUS, mode, op1,
1697 simplify_gen_binary (MULT, mode,
1698 in1, in2));
1701 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1702 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1703 is 1. */
1704 if (COMPARISON_P (op0)
1705 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1706 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1707 && (reversed = reversed_comparison (op0, mode)))
1708 return
1709 simplify_gen_unary (NEG, mode, reversed, mode);
1711 /* If one of the operands is a PLUS or a MINUS, see if we can
1712 simplify this by the associative law.
1713 Don't use the associative law for floating point.
1714 The inaccuracy makes it nonassociative,
1715 and subtle programs can break if operations are associated. */
1717 if (INTEGRAL_MODE_P (mode)
1718 && (plus_minus_operand_p (op0)
1719 || plus_minus_operand_p (op1))
1720 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1721 return tem;
1723 /* Reassociate floating point addition only when the user
1724 specifies unsafe math optimizations. */
1725 if (FLOAT_MODE_P (mode)
1726 && flag_unsafe_math_optimizations)
1728 tem = simplify_associative_operation (code, mode, op0, op1);
1729 if (tem)
1730 return tem;
1732 break;
1734 case COMPARE:
1735 #ifdef HAVE_cc0
1736 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1737 using cc0, in which case we want to leave it as a COMPARE
1738 so we can distinguish it from a register-register-copy.
1740 In IEEE floating point, x-0 is not the same as x. */
1742 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1743 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1744 && trueop1 == CONST0_RTX (mode))
1745 return op0;
1746 #endif
1748 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1749 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1750 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1751 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1753 rtx xop00 = XEXP (op0, 0);
1754 rtx xop10 = XEXP (op1, 0);
1756 #ifdef HAVE_cc0
1757 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1758 #else
1759 if (REG_P (xop00) && REG_P (xop10)
1760 && GET_MODE (xop00) == GET_MODE (xop10)
1761 && REGNO (xop00) == REGNO (xop10)
1762 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1763 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1764 #endif
1765 return xop00;
1767 break;
1769 case MINUS:
1770 /* We can't assume x-x is 0 even with non-IEEE floating point,
1771 but since it is zero except in very strange circumstances, we
1772 will treat it as zero with -ffinite-math-only. */
1773 if (rtx_equal_p (trueop0, trueop1)
1774 && ! side_effects_p (op0)
1775 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
1776 return CONST0_RTX (mode);
1778 /* Change subtraction from zero into negation. (0 - x) is the
1779 same as -x when x is NaN, infinite, or finite and nonzero.
1780 But if the mode has signed zeros, and does not round towards
1781 -infinity, then 0 - 0 is 0, not -0. */
1782 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1783 return simplify_gen_unary (NEG, mode, op1, mode);
1785 /* (-1 - a) is ~a. */
1786 if (trueop0 == constm1_rtx)
1787 return simplify_gen_unary (NOT, mode, op1, mode);
1789 /* Subtracting 0 has no effect unless the mode has signed zeros
1790 and supports rounding towards -infinity. In such a case,
1791 0 - 0 is -0. */
1792 if (!(HONOR_SIGNED_ZEROS (mode)
1793 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1794 && trueop1 == CONST0_RTX (mode))
1795 return op0;
1797 /* See if this is something like X * C - X or vice versa or
1798 if the multiplication is written as a shift. If so, we can
1799 distribute and make a new multiply, shift, or maybe just
1800 have X (if C is 2 in the example above). But don't make
1801 something more expensive than we had before. */
1803 if (SCALAR_INT_MODE_P (mode))
1805 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1806 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1807 rtx lhs = op0, rhs = op1;
1809 if (GET_CODE (lhs) == NEG)
1811 coeff0l = -1;
1812 coeff0h = -1;
1813 lhs = XEXP (lhs, 0);
1815 else if (GET_CODE (lhs) == MULT
1816 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1818 coeff0l = INTVAL (XEXP (lhs, 1));
1819 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1820 lhs = XEXP (lhs, 0);
1822 else if (GET_CODE (lhs) == ASHIFT
1823 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1824 && INTVAL (XEXP (lhs, 1)) >= 0
1825 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1827 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1828 coeff0h = 0;
1829 lhs = XEXP (lhs, 0);
1832 if (GET_CODE (rhs) == NEG)
1834 negcoeff1l = 1;
1835 negcoeff1h = 0;
1836 rhs = XEXP (rhs, 0);
1838 else if (GET_CODE (rhs) == MULT
1839 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1841 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1842 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1843 rhs = XEXP (rhs, 0);
1845 else if (GET_CODE (rhs) == ASHIFT
1846 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1847 && INTVAL (XEXP (rhs, 1)) >= 0
1848 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1850 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1851 negcoeff1h = -1;
1852 rhs = XEXP (rhs, 0);
1855 if (rtx_equal_p (lhs, rhs))
1857 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1858 rtx coeff;
1859 unsigned HOST_WIDE_INT l;
1860 HOST_WIDE_INT h;
1862 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1863 coeff = immed_double_const (l, h, mode);
1865 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1866 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1867 ? tem : 0;
1871 /* (a - (-b)) -> (a + b). True even for IEEE. */
1872 if (GET_CODE (op1) == NEG)
1873 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1875 /* (-x - c) may be simplified as (-c - x). */
1876 if (GET_CODE (op0) == NEG
1877 && (GET_CODE (op1) == CONST_INT
1878 || GET_CODE (op1) == CONST_DOUBLE))
1880 tem = simplify_unary_operation (NEG, mode, op1, mode);
1881 if (tem)
1882 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1885 /* Don't let a relocatable value get a negative coeff. */
1886 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1887 return simplify_gen_binary (PLUS, mode,
1888 op0,
1889 neg_const_int (mode, op1));
1891 /* (x - (x & y)) -> (x & ~y) */
1892 if (GET_CODE (op1) == AND)
1894 if (rtx_equal_p (op0, XEXP (op1, 0)))
1896 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1897 GET_MODE (XEXP (op1, 1)));
1898 return simplify_gen_binary (AND, mode, op0, tem);
1900 if (rtx_equal_p (op0, XEXP (op1, 1)))
1902 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1903 GET_MODE (XEXP (op1, 0)));
1904 return simplify_gen_binary (AND, mode, op0, tem);
1908 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1909 by reversing the comparison code if valid. */
1910 if (STORE_FLAG_VALUE == 1
1911 && trueop0 == const1_rtx
1912 && COMPARISON_P (op1)
1913 && (reversed = reversed_comparison (op1, mode)))
1914 return reversed;
1916 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
1917 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1918 && GET_CODE (op1) == MULT
1919 && GET_CODE (XEXP (op1, 0)) == NEG)
1921 rtx in1, in2;
1923 in1 = XEXP (XEXP (op1, 0), 0);
1924 in2 = XEXP (op1, 1);
1925 return simplify_gen_binary (PLUS, mode,
1926 simplify_gen_binary (MULT, mode,
1927 in1, in2),
1928 op0);
1931 /* Canonicalize (minus (neg A) (mult B C)) to
1932 (minus (mult (neg B) C) A). */
1933 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1934 && GET_CODE (op1) == MULT
1935 && GET_CODE (op0) == NEG)
1937 rtx in1, in2;
1939 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
1940 in2 = XEXP (op1, 1);
1941 return simplify_gen_binary (MINUS, mode,
1942 simplify_gen_binary (MULT, mode,
1943 in1, in2),
1944 XEXP (op0, 0));
1947 /* If one of the operands is a PLUS or a MINUS, see if we can
1948 simplify this by the associative law. This will, for example,
1949 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1950 Don't use the associative law for floating point.
1951 The inaccuracy makes it nonassociative,
1952 and subtle programs can break if operations are associated. */
1954 if (INTEGRAL_MODE_P (mode)
1955 && (plus_minus_operand_p (op0)
1956 || plus_minus_operand_p (op1))
1957 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1958 return tem;
1959 break;
1961 case MULT:
1962 if (trueop1 == constm1_rtx)
1963 return simplify_gen_unary (NEG, mode, op0, mode);
1965 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1966 x is NaN, since x * 0 is then also NaN. Nor is it valid
1967 when the mode has signed zeros, since multiplying a negative
1968 number by 0 will give -0, not 0. */
1969 if (!HONOR_NANS (mode)
1970 && !HONOR_SIGNED_ZEROS (mode)
1971 && trueop1 == CONST0_RTX (mode)
1972 && ! side_effects_p (op0))
1973 return op1;
1975 /* In IEEE floating point, x*1 is not equivalent to x for
1976 signalling NaNs. */
1977 if (!HONOR_SNANS (mode)
1978 && trueop1 == CONST1_RTX (mode))
1979 return op0;
1981 /* Convert multiply by constant power of two into shift unless
1982 we are still generating RTL. This test is a kludge. */
1983 if (GET_CODE (trueop1) == CONST_INT
1984 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1985 /* If the mode is larger than the host word size, and the
1986 uppermost bit is set, then this isn't a power of two due
1987 to implicit sign extension. */
1988 && (width <= HOST_BITS_PER_WIDE_INT
1989 || val != HOST_BITS_PER_WIDE_INT - 1))
1990 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1992 /* Likewise for multipliers wider than a word. */
1993 if (GET_CODE (trueop1) == CONST_DOUBLE
1994 && (GET_MODE (trueop1) == VOIDmode
1995 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
1996 && GET_MODE (op0) == mode
1997 && CONST_DOUBLE_LOW (trueop1) == 0
1998 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
1999 return simplify_gen_binary (ASHIFT, mode, op0,
2000 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2002 /* x*2 is x+x and x*(-1) is -x */
2003 if (GET_CODE (trueop1) == CONST_DOUBLE
2004 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2005 && GET_MODE (op0) == mode)
2007 REAL_VALUE_TYPE d;
2008 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2010 if (REAL_VALUES_EQUAL (d, dconst2))
2011 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2013 if (!HONOR_SNANS (mode)
2014 && REAL_VALUES_EQUAL (d, dconstm1))
2015 return simplify_gen_unary (NEG, mode, op0, mode);
2018 /* Optimize -x * -x as x * x. */
2019 if (FLOAT_MODE_P (mode)
2020 && GET_CODE (op0) == NEG
2021 && GET_CODE (op1) == NEG
2022 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2023 && !side_effects_p (XEXP (op0, 0)))
2024 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2026 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2027 if (SCALAR_FLOAT_MODE_P (mode)
2028 && GET_CODE (op0) == ABS
2029 && GET_CODE (op1) == ABS
2030 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2031 && !side_effects_p (XEXP (op0, 0)))
2032 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2034 /* Reassociate multiplication, but for floating point MULTs
2035 only when the user specifies unsafe math optimizations. */
2036 if (! FLOAT_MODE_P (mode)
2037 || flag_unsafe_math_optimizations)
2039 tem = simplify_associative_operation (code, mode, op0, op1);
2040 if (tem)
2041 return tem;
2043 break;
2045 case IOR:
2046 if (trueop1 == const0_rtx)
2047 return op0;
2048 if (GET_CODE (trueop1) == CONST_INT
2049 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2050 == GET_MODE_MASK (mode)))
2051 return op1;
2052 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2053 return op0;
2054 /* A | (~A) -> -1 */
2055 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2056 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2057 && ! side_effects_p (op0)
2058 && SCALAR_INT_MODE_P (mode))
2059 return constm1_rtx;
2061 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2062 if (GET_CODE (op1) == CONST_INT
2063 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2064 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
2065 return op1;
2067 /* Canonicalize (X & C1) | C2. */
2068 if (GET_CODE (op0) == AND
2069 && GET_CODE (trueop1) == CONST_INT
2070 && GET_CODE (XEXP (op0, 1)) == CONST_INT)
2072 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2073 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2074 HOST_WIDE_INT c2 = INTVAL (trueop1);
2076 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2077 if ((c1 & c2) == c1
2078 && !side_effects_p (XEXP (op0, 0)))
2079 return trueop1;
2081 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2082 if (((c1|c2) & mask) == mask)
2083 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2085 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2086 if (((c1 & ~c2) & mask) != (c1 & mask))
2088 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2089 gen_int_mode (c1 & ~c2, mode));
2090 return simplify_gen_binary (IOR, mode, tem, op1);
2094 /* Convert (A & B) | A to A. */
2095 if (GET_CODE (op0) == AND
2096 && (rtx_equal_p (XEXP (op0, 0), op1)
2097 || rtx_equal_p (XEXP (op0, 1), op1))
2098 && ! side_effects_p (XEXP (op0, 0))
2099 && ! side_effects_p (XEXP (op0, 1)))
2100 return op1;
2102 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2103 mode size to (rotate A CX). */
2105 if (GET_CODE (op1) == ASHIFT
2106 || GET_CODE (op1) == SUBREG)
2108 opleft = op1;
2109 opright = op0;
2111 else
2113 opright = op1;
2114 opleft = op0;
2117 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2118 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2119 && GET_CODE (XEXP (opleft, 1)) == CONST_INT
2120 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2121 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2122 == GET_MODE_BITSIZE (mode)))
2123 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2125 /* Same, but for ashift that has been "simplified" to a wider mode
2126 by simplify_shift_const. */
2128 if (GET_CODE (opleft) == SUBREG
2129 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2130 && GET_CODE (opright) == LSHIFTRT
2131 && GET_CODE (XEXP (opright, 0)) == SUBREG
2132 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2133 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2134 && (GET_MODE_SIZE (GET_MODE (opleft))
2135 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2136 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2137 SUBREG_REG (XEXP (opright, 0)))
2138 && GET_CODE (XEXP (SUBREG_REG (opleft), 1)) == CONST_INT
2139 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2140 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2141 == GET_MODE_BITSIZE (mode)))
2142 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2143 XEXP (SUBREG_REG (opleft), 1));
2145 /* If we have (ior (and (X C1) C2)), simplify this by making
2146 C1 as small as possible if C1 actually changes. */
2147 if (GET_CODE (op1) == CONST_INT
2148 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2149 || INTVAL (op1) > 0)
2150 && GET_CODE (op0) == AND
2151 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2152 && GET_CODE (op1) == CONST_INT
2153 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2154 return simplify_gen_binary (IOR, mode,
2155 simplify_gen_binary
2156 (AND, mode, XEXP (op0, 0),
2157 GEN_INT (INTVAL (XEXP (op0, 1))
2158 & ~INTVAL (op1))),
2159 op1);
2161 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2162 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2163 the PLUS does not affect any of the bits in OP1: then we can do
2164 the IOR as a PLUS and we can associate. This is valid if OP1
2165 can be safely shifted left C bits. */
2166 if (GET_CODE (trueop1) == CONST_INT && GET_CODE (op0) == ASHIFTRT
2167 && GET_CODE (XEXP (op0, 0)) == PLUS
2168 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
2169 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2170 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2172 int count = INTVAL (XEXP (op0, 1));
2173 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2175 if (mask >> count == INTVAL (trueop1)
2176 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2177 return simplify_gen_binary (ASHIFTRT, mode,
2178 plus_constant (XEXP (op0, 0), mask),
2179 XEXP (op0, 1));
2182 tem = simplify_associative_operation (code, mode, op0, op1);
2183 if (tem)
2184 return tem;
2185 break;
2187 case XOR:
2188 if (trueop1 == const0_rtx)
2189 return op0;
2190 if (GET_CODE (trueop1) == CONST_INT
2191 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2192 == GET_MODE_MASK (mode)))
2193 return simplify_gen_unary (NOT, mode, op0, mode);
2194 if (rtx_equal_p (trueop0, trueop1)
2195 && ! side_effects_p (op0)
2196 && GET_MODE_CLASS (mode) != MODE_CC)
2197 return CONST0_RTX (mode);
2199 /* Canonicalize XOR of the most significant bit to PLUS. */
2200 if ((GET_CODE (op1) == CONST_INT
2201 || GET_CODE (op1) == CONST_DOUBLE)
2202 && mode_signbit_p (mode, op1))
2203 return simplify_gen_binary (PLUS, mode, op0, op1);
2204 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2205 if ((GET_CODE (op1) == CONST_INT
2206 || GET_CODE (op1) == CONST_DOUBLE)
2207 && GET_CODE (op0) == PLUS
2208 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
2209 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2210 && mode_signbit_p (mode, XEXP (op0, 1)))
2211 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2212 simplify_gen_binary (XOR, mode, op1,
2213 XEXP (op0, 1)));
2215 /* If we are XORing two things that have no bits in common,
2216 convert them into an IOR. This helps to detect rotation encoded
2217 using those methods and possibly other simplifications. */
2219 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2220 && (nonzero_bits (op0, mode)
2221 & nonzero_bits (op1, mode)) == 0)
2222 return (simplify_gen_binary (IOR, mode, op0, op1));
2224 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2225 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2226 (NOT y). */
2228 int num_negated = 0;
2230 if (GET_CODE (op0) == NOT)
2231 num_negated++, op0 = XEXP (op0, 0);
2232 if (GET_CODE (op1) == NOT)
2233 num_negated++, op1 = XEXP (op1, 0);
2235 if (num_negated == 2)
2236 return simplify_gen_binary (XOR, mode, op0, op1);
2237 else if (num_negated == 1)
2238 return simplify_gen_unary (NOT, mode,
2239 simplify_gen_binary (XOR, mode, op0, op1),
2240 mode);
2243 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2244 correspond to a machine insn or result in further simplifications
2245 if B is a constant. */
2247 if (GET_CODE (op0) == AND
2248 && rtx_equal_p (XEXP (op0, 1), op1)
2249 && ! side_effects_p (op1))
2250 return simplify_gen_binary (AND, mode,
2251 simplify_gen_unary (NOT, mode,
2252 XEXP (op0, 0), mode),
2253 op1);
2255 else if (GET_CODE (op0) == AND
2256 && rtx_equal_p (XEXP (op0, 0), op1)
2257 && ! side_effects_p (op1))
2258 return simplify_gen_binary (AND, mode,
2259 simplify_gen_unary (NOT, mode,
2260 XEXP (op0, 1), mode),
2261 op1);
2263 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2264 comparison if STORE_FLAG_VALUE is 1. */
2265 if (STORE_FLAG_VALUE == 1
2266 && trueop1 == const1_rtx
2267 && COMPARISON_P (op0)
2268 && (reversed = reversed_comparison (op0, mode)))
2269 return reversed;
2271 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2272 is (lt foo (const_int 0)), so we can perform the above
2273 simplification if STORE_FLAG_VALUE is 1. */
2275 if (STORE_FLAG_VALUE == 1
2276 && trueop1 == const1_rtx
2277 && GET_CODE (op0) == LSHIFTRT
2278 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2279 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2280 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2282 /* (xor (comparison foo bar) (const_int sign-bit))
2283 when STORE_FLAG_VALUE is the sign bit. */
2284 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2285 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2286 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2287 && trueop1 == const_true_rtx
2288 && COMPARISON_P (op0)
2289 && (reversed = reversed_comparison (op0, mode)))
2290 return reversed;
2292 tem = simplify_associative_operation (code, mode, op0, op1);
2293 if (tem)
2294 return tem;
2295 break;
2297 case AND:
2298 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2299 return trueop1;
2300 /* If we are turning off bits already known off in OP0, we need
2301 not do an AND. */
2302 if (GET_CODE (trueop1) == CONST_INT
2303 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2304 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
2305 return op0;
2306 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2307 && GET_MODE_CLASS (mode) != MODE_CC)
2308 return op0;
2309 /* A & (~A) -> 0 */
2310 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2311 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2312 && ! side_effects_p (op0)
2313 && GET_MODE_CLASS (mode) != MODE_CC)
2314 return CONST0_RTX (mode);
2316 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2317 there are no nonzero bits of C outside of X's mode. */
2318 if ((GET_CODE (op0) == SIGN_EXTEND
2319 || GET_CODE (op0) == ZERO_EXTEND)
2320 && GET_CODE (trueop1) == CONST_INT
2321 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2322 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2323 & INTVAL (trueop1)) == 0)
2325 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2326 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2327 gen_int_mode (INTVAL (trueop1),
2328 imode));
2329 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2332 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2333 if (GET_CODE (op0) == IOR
2334 && GET_CODE (trueop1) == CONST_INT
2335 && GET_CODE (XEXP (op0, 1)) == CONST_INT)
2337 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2338 return simplify_gen_binary (IOR, mode,
2339 simplify_gen_binary (AND, mode,
2340 XEXP (op0, 0), op1),
2341 gen_int_mode (tmp, mode));
2344 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2345 insn (and may simplify more). */
2346 if (GET_CODE (op0) == XOR
2347 && rtx_equal_p (XEXP (op0, 0), op1)
2348 && ! side_effects_p (op1))
2349 return simplify_gen_binary (AND, mode,
2350 simplify_gen_unary (NOT, mode,
2351 XEXP (op0, 1), mode),
2352 op1);
2354 if (GET_CODE (op0) == XOR
2355 && rtx_equal_p (XEXP (op0, 1), op1)
2356 && ! side_effects_p (op1))
2357 return simplify_gen_binary (AND, mode,
2358 simplify_gen_unary (NOT, mode,
2359 XEXP (op0, 0), mode),
2360 op1);
2362 /* Similarly for (~(A ^ B)) & A. */
2363 if (GET_CODE (op0) == NOT
2364 && GET_CODE (XEXP (op0, 0)) == XOR
2365 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2366 && ! side_effects_p (op1))
2367 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2369 if (GET_CODE (op0) == NOT
2370 && GET_CODE (XEXP (op0, 0)) == XOR
2371 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2372 && ! side_effects_p (op1))
2373 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2375 /* Convert (A | B) & A to A. */
2376 if (GET_CODE (op0) == IOR
2377 && (rtx_equal_p (XEXP (op0, 0), op1)
2378 || rtx_equal_p (XEXP (op0, 1), op1))
2379 && ! side_effects_p (XEXP (op0, 0))
2380 && ! side_effects_p (XEXP (op0, 1)))
2381 return op1;
2383 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2384 ((A & N) + B) & M -> (A + B) & M
2385 Similarly if (N & M) == 0,
2386 ((A | N) + B) & M -> (A + B) & M
2387 and for - instead of + and/or ^ instead of |. */
2388 if (GET_CODE (trueop1) == CONST_INT
2389 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2390 && ~INTVAL (trueop1)
2391 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2392 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2394 rtx pmop[2];
2395 int which;
2397 pmop[0] = XEXP (op0, 0);
2398 pmop[1] = XEXP (op0, 1);
2400 for (which = 0; which < 2; which++)
2402 tem = pmop[which];
2403 switch (GET_CODE (tem))
2405 case AND:
2406 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2407 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2408 == INTVAL (trueop1))
2409 pmop[which] = XEXP (tem, 0);
2410 break;
2411 case IOR:
2412 case XOR:
2413 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2414 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2415 pmop[which] = XEXP (tem, 0);
2416 break;
2417 default:
2418 break;
2422 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2424 tem = simplify_gen_binary (GET_CODE (op0), mode,
2425 pmop[0], pmop[1]);
2426 return simplify_gen_binary (code, mode, tem, op1);
2429 tem = simplify_associative_operation (code, mode, op0, op1);
2430 if (tem)
2431 return tem;
2432 break;
2434 case UDIV:
2435 /* 0/x is 0 (or x&0 if x has side-effects). */
2436 if (trueop0 == CONST0_RTX (mode))
2438 if (side_effects_p (op1))
2439 return simplify_gen_binary (AND, mode, op1, trueop0);
2440 return trueop0;
2442 /* x/1 is x. */
2443 if (trueop1 == CONST1_RTX (mode))
2444 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2445 /* Convert divide by power of two into shift. */
2446 if (GET_CODE (trueop1) == CONST_INT
2447 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2448 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2449 break;
2451 case DIV:
2452 /* Handle floating point and integers separately. */
2453 if (SCALAR_FLOAT_MODE_P (mode))
2455 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2456 safe for modes with NaNs, since 0.0 / 0.0 will then be
2457 NaN rather than 0.0. Nor is it safe for modes with signed
2458 zeros, since dividing 0 by a negative number gives -0.0 */
2459 if (trueop0 == CONST0_RTX (mode)
2460 && !HONOR_NANS (mode)
2461 && !HONOR_SIGNED_ZEROS (mode)
2462 && ! side_effects_p (op1))
2463 return op0;
2464 /* x/1.0 is x. */
2465 if (trueop1 == CONST1_RTX (mode)
2466 && !HONOR_SNANS (mode))
2467 return op0;
2469 if (GET_CODE (trueop1) == CONST_DOUBLE
2470 && trueop1 != CONST0_RTX (mode))
2472 REAL_VALUE_TYPE d;
2473 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2475 /* x/-1.0 is -x. */
2476 if (REAL_VALUES_EQUAL (d, dconstm1)
2477 && !HONOR_SNANS (mode))
2478 return simplify_gen_unary (NEG, mode, op0, mode);
2480 /* Change FP division by a constant into multiplication.
2481 Only do this with -funsafe-math-optimizations. */
2482 if (flag_unsafe_math_optimizations
2483 && !REAL_VALUES_EQUAL (d, dconst0))
2485 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2486 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2487 return simplify_gen_binary (MULT, mode, op0, tem);
2491 else
2493 /* 0/x is 0 (or x&0 if x has side-effects). */
2494 if (trueop0 == CONST0_RTX (mode))
2496 if (side_effects_p (op1))
2497 return simplify_gen_binary (AND, mode, op1, trueop0);
2498 return trueop0;
2500 /* x/1 is x. */
2501 if (trueop1 == CONST1_RTX (mode))
2502 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2503 /* x/-1 is -x. */
2504 if (trueop1 == constm1_rtx)
2506 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2507 return simplify_gen_unary (NEG, mode, x, mode);
2510 break;
2512 case UMOD:
2513 /* 0%x is 0 (or x&0 if x has side-effects). */
2514 if (trueop0 == CONST0_RTX (mode))
2516 if (side_effects_p (op1))
2517 return simplify_gen_binary (AND, mode, op1, trueop0);
2518 return trueop0;
2520 /* x%1 is 0 (of x&0 if x has side-effects). */
2521 if (trueop1 == CONST1_RTX (mode))
2523 if (side_effects_p (op0))
2524 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2525 return CONST0_RTX (mode);
2527 /* Implement modulus by power of two as AND. */
2528 if (GET_CODE (trueop1) == CONST_INT
2529 && exact_log2 (INTVAL (trueop1)) > 0)
2530 return simplify_gen_binary (AND, mode, op0,
2531 GEN_INT (INTVAL (op1) - 1));
2532 break;
2534 case MOD:
2535 /* 0%x is 0 (or x&0 if x has side-effects). */
2536 if (trueop0 == CONST0_RTX (mode))
2538 if (side_effects_p (op1))
2539 return simplify_gen_binary (AND, mode, op1, trueop0);
2540 return trueop0;
2542 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2543 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2545 if (side_effects_p (op0))
2546 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2547 return CONST0_RTX (mode);
2549 break;
2551 case ROTATERT:
2552 case ROTATE:
2553 case ASHIFTRT:
2554 if (trueop1 == CONST0_RTX (mode))
2555 return op0;
2556 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2557 return op0;
2558 /* Rotating ~0 always results in ~0. */
2559 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2560 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2561 && ! side_effects_p (op1))
2562 return op0;
2563 break;
2565 case ASHIFT:
2566 case SS_ASHIFT:
2567 if (trueop1 == CONST0_RTX (mode))
2568 return op0;
2569 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2570 return op0;
2571 break;
2573 case LSHIFTRT:
2574 if (trueop1 == CONST0_RTX (mode))
2575 return op0;
2576 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2577 return op0;
2578 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2579 if (GET_CODE (op0) == CLZ
2580 && GET_CODE (trueop1) == CONST_INT
2581 && STORE_FLAG_VALUE == 1
2582 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2584 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2585 unsigned HOST_WIDE_INT zero_val = 0;
2587 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2588 && zero_val == GET_MODE_BITSIZE (imode)
2589 && INTVAL (trueop1) == exact_log2 (zero_val))
2590 return simplify_gen_relational (EQ, mode, imode,
2591 XEXP (op0, 0), const0_rtx);
2593 break;
2595 case SMIN:
2596 if (width <= HOST_BITS_PER_WIDE_INT
2597 && GET_CODE (trueop1) == CONST_INT
2598 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2599 && ! side_effects_p (op0))
2600 return op1;
2601 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2602 return op0;
2603 tem = simplify_associative_operation (code, mode, op0, op1);
2604 if (tem)
2605 return tem;
2606 break;
2608 case SMAX:
2609 if (width <= HOST_BITS_PER_WIDE_INT
2610 && GET_CODE (trueop1) == CONST_INT
2611 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2612 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2613 && ! side_effects_p (op0))
2614 return op1;
2615 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2616 return op0;
2617 tem = simplify_associative_operation (code, mode, op0, op1);
2618 if (tem)
2619 return tem;
2620 break;
2622 case UMIN:
2623 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2624 return op1;
2625 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2626 return op0;
2627 tem = simplify_associative_operation (code, mode, op0, op1);
2628 if (tem)
2629 return tem;
2630 break;
2632 case UMAX:
2633 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2634 return op1;
2635 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2636 return op0;
2637 tem = simplify_associative_operation (code, mode, op0, op1);
2638 if (tem)
2639 return tem;
2640 break;
2642 case SS_PLUS:
2643 case US_PLUS:
2644 case SS_MINUS:
2645 case US_MINUS:
2646 /* ??? There are simplifications that can be done. */
2647 return 0;
2649 case VEC_SELECT:
2650 if (!VECTOR_MODE_P (mode))
2652 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2653 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2654 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2655 gcc_assert (XVECLEN (trueop1, 0) == 1);
2656 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2658 if (GET_CODE (trueop0) == CONST_VECTOR)
2659 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2660 (trueop1, 0, 0)));
2662 else
2664 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2665 gcc_assert (GET_MODE_INNER (mode)
2666 == GET_MODE_INNER (GET_MODE (trueop0)));
2667 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2669 if (GET_CODE (trueop0) == CONST_VECTOR)
2671 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2672 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2673 rtvec v = rtvec_alloc (n_elts);
2674 unsigned int i;
2676 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2677 for (i = 0; i < n_elts; i++)
2679 rtx x = XVECEXP (trueop1, 0, i);
2681 gcc_assert (GET_CODE (x) == CONST_INT);
2682 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2683 INTVAL (x));
2686 return gen_rtx_CONST_VECTOR (mode, v);
2690 if (XVECLEN (trueop1, 0) == 1
2691 && GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT
2692 && GET_CODE (trueop0) == VEC_CONCAT)
2694 rtx vec = trueop0;
2695 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2697 /* Try to find the element in the VEC_CONCAT. */
2698 while (GET_MODE (vec) != mode
2699 && GET_CODE (vec) == VEC_CONCAT)
2701 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2702 if (offset < vec_size)
2703 vec = XEXP (vec, 0);
2704 else
2706 offset -= vec_size;
2707 vec = XEXP (vec, 1);
2709 vec = avoid_constant_pool_reference (vec);
2712 if (GET_MODE (vec) == mode)
2713 return vec;
2716 return 0;
2717 case VEC_CONCAT:
2719 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2720 ? GET_MODE (trueop0)
2721 : GET_MODE_INNER (mode));
2722 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2723 ? GET_MODE (trueop1)
2724 : GET_MODE_INNER (mode));
2726 gcc_assert (VECTOR_MODE_P (mode));
2727 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2728 == GET_MODE_SIZE (mode));
2730 if (VECTOR_MODE_P (op0_mode))
2731 gcc_assert (GET_MODE_INNER (mode)
2732 == GET_MODE_INNER (op0_mode));
2733 else
2734 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2736 if (VECTOR_MODE_P (op1_mode))
2737 gcc_assert (GET_MODE_INNER (mode)
2738 == GET_MODE_INNER (op1_mode));
2739 else
2740 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2742 if ((GET_CODE (trueop0) == CONST_VECTOR
2743 || GET_CODE (trueop0) == CONST_INT
2744 || GET_CODE (trueop0) == CONST_DOUBLE)
2745 && (GET_CODE (trueop1) == CONST_VECTOR
2746 || GET_CODE (trueop1) == CONST_INT
2747 || GET_CODE (trueop1) == CONST_DOUBLE))
2749 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2750 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2751 rtvec v = rtvec_alloc (n_elts);
2752 unsigned int i;
2753 unsigned in_n_elts = 1;
2755 if (VECTOR_MODE_P (op0_mode))
2756 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2757 for (i = 0; i < n_elts; i++)
2759 if (i < in_n_elts)
2761 if (!VECTOR_MODE_P (op0_mode))
2762 RTVEC_ELT (v, i) = trueop0;
2763 else
2764 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2766 else
2768 if (!VECTOR_MODE_P (op1_mode))
2769 RTVEC_ELT (v, i) = trueop1;
2770 else
2771 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2772 i - in_n_elts);
2776 return gen_rtx_CONST_VECTOR (mode, v);
2779 return 0;
2781 default:
2782 gcc_unreachable ();
2785 return 0;
2789 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
2790 rtx op0, rtx op1)
2792 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
2793 HOST_WIDE_INT val;
2794 unsigned int width = GET_MODE_BITSIZE (mode);
2796 if (VECTOR_MODE_P (mode)
2797 && code != VEC_CONCAT
2798 && GET_CODE (op0) == CONST_VECTOR
2799 && GET_CODE (op1) == CONST_VECTOR)
2801 unsigned n_elts = GET_MODE_NUNITS (mode);
2802 enum machine_mode op0mode = GET_MODE (op0);
2803 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2804 enum machine_mode op1mode = GET_MODE (op1);
2805 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2806 rtvec v = rtvec_alloc (n_elts);
2807 unsigned int i;
2809 gcc_assert (op0_n_elts == n_elts);
2810 gcc_assert (op1_n_elts == n_elts);
2811 for (i = 0; i < n_elts; i++)
2813 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2814 CONST_VECTOR_ELT (op0, i),
2815 CONST_VECTOR_ELT (op1, i));
2816 if (!x)
2817 return 0;
2818 RTVEC_ELT (v, i) = x;
2821 return gen_rtx_CONST_VECTOR (mode, v);
2824 if (VECTOR_MODE_P (mode)
2825 && code == VEC_CONCAT
2826 && CONSTANT_P (op0) && CONSTANT_P (op1))
2828 unsigned n_elts = GET_MODE_NUNITS (mode);
2829 rtvec v = rtvec_alloc (n_elts);
2831 gcc_assert (n_elts >= 2);
2832 if (n_elts == 2)
2834 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2835 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2837 RTVEC_ELT (v, 0) = op0;
2838 RTVEC_ELT (v, 1) = op1;
2840 else
2842 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2843 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2844 unsigned i;
2846 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2847 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2848 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2850 for (i = 0; i < op0_n_elts; ++i)
2851 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2852 for (i = 0; i < op1_n_elts; ++i)
2853 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2856 return gen_rtx_CONST_VECTOR (mode, v);
2859 if (SCALAR_FLOAT_MODE_P (mode)
2860 && GET_CODE (op0) == CONST_DOUBLE
2861 && GET_CODE (op1) == CONST_DOUBLE
2862 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2864 if (code == AND
2865 || code == IOR
2866 || code == XOR)
2868 long tmp0[4];
2869 long tmp1[4];
2870 REAL_VALUE_TYPE r;
2871 int i;
2873 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
2874 GET_MODE (op0));
2875 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
2876 GET_MODE (op1));
2877 for (i = 0; i < 4; i++)
2879 switch (code)
2881 case AND:
2882 tmp0[i] &= tmp1[i];
2883 break;
2884 case IOR:
2885 tmp0[i] |= tmp1[i];
2886 break;
2887 case XOR:
2888 tmp0[i] ^= tmp1[i];
2889 break;
2890 default:
2891 gcc_unreachable ();
2894 real_from_target (&r, tmp0, mode);
2895 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
2897 else
2899 REAL_VALUE_TYPE f0, f1, value, result;
2900 bool inexact;
2902 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
2903 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
2904 real_convert (&f0, mode, &f0);
2905 real_convert (&f1, mode, &f1);
2907 if (HONOR_SNANS (mode)
2908 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
2909 return 0;
2911 if (code == DIV
2912 && REAL_VALUES_EQUAL (f1, dconst0)
2913 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
2914 return 0;
2916 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2917 && flag_trapping_math
2918 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
2920 int s0 = REAL_VALUE_NEGATIVE (f0);
2921 int s1 = REAL_VALUE_NEGATIVE (f1);
2923 switch (code)
2925 case PLUS:
2926 /* Inf + -Inf = NaN plus exception. */
2927 if (s0 != s1)
2928 return 0;
2929 break;
2930 case MINUS:
2931 /* Inf - Inf = NaN plus exception. */
2932 if (s0 == s1)
2933 return 0;
2934 break;
2935 case DIV:
2936 /* Inf / Inf = NaN plus exception. */
2937 return 0;
2938 default:
2939 break;
2943 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2944 && flag_trapping_math
2945 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
2946 || (REAL_VALUE_ISINF (f1)
2947 && REAL_VALUES_EQUAL (f0, dconst0))))
2948 /* Inf * 0 = NaN plus exception. */
2949 return 0;
2951 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
2952 &f0, &f1);
2953 real_convert (&result, mode, &value);
2955 /* Don't constant fold this floating point operation if
2956 the result has overflowed and flag_trapping_math. */
2958 if (flag_trapping_math
2959 && MODE_HAS_INFINITIES (mode)
2960 && REAL_VALUE_ISINF (result)
2961 && !REAL_VALUE_ISINF (f0)
2962 && !REAL_VALUE_ISINF (f1))
2963 /* Overflow plus exception. */
2964 return 0;
2966 /* Don't constant fold this floating point operation if the
2967 result may dependent upon the run-time rounding mode and
2968 flag_rounding_math is set, or if GCC's software emulation
2969 is unable to accurately represent the result. */
2971 if ((flag_rounding_math
2972 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
2973 && !flag_unsafe_math_optimizations))
2974 && (inexact || !real_identical (&result, &value)))
2975 return NULL_RTX;
2977 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
2981 /* We can fold some multi-word operations. */
2982 if (GET_MODE_CLASS (mode) == MODE_INT
2983 && width == HOST_BITS_PER_WIDE_INT * 2
2984 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
2985 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
2987 unsigned HOST_WIDE_INT l1, l2, lv, lt;
2988 HOST_WIDE_INT h1, h2, hv, ht;
2990 if (GET_CODE (op0) == CONST_DOUBLE)
2991 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
2992 else
2993 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
2995 if (GET_CODE (op1) == CONST_DOUBLE)
2996 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
2997 else
2998 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
3000 switch (code)
3002 case MINUS:
3003 /* A - B == A + (-B). */
3004 neg_double (l2, h2, &lv, &hv);
3005 l2 = lv, h2 = hv;
3007 /* Fall through.... */
3009 case PLUS:
3010 add_double (l1, h1, l2, h2, &lv, &hv);
3011 break;
3013 case MULT:
3014 mul_double (l1, h1, l2, h2, &lv, &hv);
3015 break;
3017 case DIV:
3018 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3019 &lv, &hv, &lt, &ht))
3020 return 0;
3021 break;
3023 case MOD:
3024 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3025 &lt, &ht, &lv, &hv))
3026 return 0;
3027 break;
3029 case UDIV:
3030 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3031 &lv, &hv, &lt, &ht))
3032 return 0;
3033 break;
3035 case UMOD:
3036 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3037 &lt, &ht, &lv, &hv))
3038 return 0;
3039 break;
3041 case AND:
3042 lv = l1 & l2, hv = h1 & h2;
3043 break;
3045 case IOR:
3046 lv = l1 | l2, hv = h1 | h2;
3047 break;
3049 case XOR:
3050 lv = l1 ^ l2, hv = h1 ^ h2;
3051 break;
3053 case SMIN:
3054 if (h1 < h2
3055 || (h1 == h2
3056 && ((unsigned HOST_WIDE_INT) l1
3057 < (unsigned HOST_WIDE_INT) l2)))
3058 lv = l1, hv = h1;
3059 else
3060 lv = l2, hv = h2;
3061 break;
3063 case SMAX:
3064 if (h1 > h2
3065 || (h1 == h2
3066 && ((unsigned HOST_WIDE_INT) l1
3067 > (unsigned HOST_WIDE_INT) l2)))
3068 lv = l1, hv = h1;
3069 else
3070 lv = l2, hv = h2;
3071 break;
3073 case UMIN:
3074 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
3075 || (h1 == h2
3076 && ((unsigned HOST_WIDE_INT) l1
3077 < (unsigned HOST_WIDE_INT) l2)))
3078 lv = l1, hv = h1;
3079 else
3080 lv = l2, hv = h2;
3081 break;
3083 case UMAX:
3084 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
3085 || (h1 == h2
3086 && ((unsigned HOST_WIDE_INT) l1
3087 > (unsigned HOST_WIDE_INT) l2)))
3088 lv = l1, hv = h1;
3089 else
3090 lv = l2, hv = h2;
3091 break;
3093 case LSHIFTRT: case ASHIFTRT:
3094 case ASHIFT:
3095 case ROTATE: case ROTATERT:
3096 if (SHIFT_COUNT_TRUNCATED)
3097 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
3099 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
3100 return 0;
3102 if (code == LSHIFTRT || code == ASHIFTRT)
3103 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
3104 code == ASHIFTRT);
3105 else if (code == ASHIFT)
3106 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
3107 else if (code == ROTATE)
3108 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3109 else /* code == ROTATERT */
3110 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3111 break;
3113 default:
3114 return 0;
3117 return immed_double_const (lv, hv, mode);
3120 if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
3121 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3123 /* Get the integer argument values in two forms:
3124 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3126 arg0 = INTVAL (op0);
3127 arg1 = INTVAL (op1);
3129 if (width < HOST_BITS_PER_WIDE_INT)
3131 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
3132 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
3134 arg0s = arg0;
3135 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3136 arg0s |= ((HOST_WIDE_INT) (-1) << width);
3138 arg1s = arg1;
3139 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3140 arg1s |= ((HOST_WIDE_INT) (-1) << width);
3142 else
3144 arg0s = arg0;
3145 arg1s = arg1;
3148 /* Compute the value of the arithmetic. */
3150 switch (code)
3152 case PLUS:
3153 val = arg0s + arg1s;
3154 break;
3156 case MINUS:
3157 val = arg0s - arg1s;
3158 break;
3160 case MULT:
3161 val = arg0s * arg1s;
3162 break;
3164 case DIV:
3165 if (arg1s == 0
3166 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3167 && arg1s == -1))
3168 return 0;
3169 val = arg0s / arg1s;
3170 break;
3172 case MOD:
3173 if (arg1s == 0
3174 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3175 && arg1s == -1))
3176 return 0;
3177 val = arg0s % arg1s;
3178 break;
3180 case UDIV:
3181 if (arg1 == 0
3182 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3183 && arg1s == -1))
3184 return 0;
3185 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3186 break;
3188 case UMOD:
3189 if (arg1 == 0
3190 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3191 && arg1s == -1))
3192 return 0;
3193 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3194 break;
3196 case AND:
3197 val = arg0 & arg1;
3198 break;
3200 case IOR:
3201 val = arg0 | arg1;
3202 break;
3204 case XOR:
3205 val = arg0 ^ arg1;
3206 break;
3208 case LSHIFTRT:
3209 case ASHIFT:
3210 case ASHIFTRT:
3211 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3212 the value is in range. We can't return any old value for
3213 out-of-range arguments because either the middle-end (via
3214 shift_truncation_mask) or the back-end might be relying on
3215 target-specific knowledge. Nor can we rely on
3216 shift_truncation_mask, since the shift might not be part of an
3217 ashlM3, lshrM3 or ashrM3 instruction. */
3218 if (SHIFT_COUNT_TRUNCATED)
3219 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3220 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3221 return 0;
3223 val = (code == ASHIFT
3224 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3225 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3227 /* Sign-extend the result for arithmetic right shifts. */
3228 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3229 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3230 break;
3232 case ROTATERT:
3233 if (arg1 < 0)
3234 return 0;
3236 arg1 %= width;
3237 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3238 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3239 break;
3241 case ROTATE:
3242 if (arg1 < 0)
3243 return 0;
3245 arg1 %= width;
3246 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3247 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3248 break;
3250 case COMPARE:
3251 /* Do nothing here. */
3252 return 0;
3254 case SMIN:
3255 val = arg0s <= arg1s ? arg0s : arg1s;
3256 break;
3258 case UMIN:
3259 val = ((unsigned HOST_WIDE_INT) arg0
3260 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3261 break;
3263 case SMAX:
3264 val = arg0s > arg1s ? arg0s : arg1s;
3265 break;
3267 case UMAX:
3268 val = ((unsigned HOST_WIDE_INT) arg0
3269 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3270 break;
3272 case SS_PLUS:
3273 case US_PLUS:
3274 case SS_MINUS:
3275 case US_MINUS:
3276 case SS_ASHIFT:
3277 /* ??? There are simplifications that can be done. */
3278 return 0;
3280 default:
3281 gcc_unreachable ();
3284 return gen_int_mode (val, mode);
3287 return NULL_RTX;
3292 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3293 PLUS or MINUS.
3295 Rather than test for specific case, we do this by a brute-force method
3296 and do all possible simplifications until no more changes occur. Then
3297 we rebuild the operation. */
3299 struct simplify_plus_minus_op_data
3301 rtx op;
3302 short neg;
3305 static bool
3306 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3308 int result;
3310 result = (commutative_operand_precedence (y)
3311 - commutative_operand_precedence (x));
3312 if (result)
3313 return result > 0;
3315 /* Group together equal REGs to do more simplification. */
3316 if (REG_P (x) && REG_P (y))
3317 return REGNO (x) > REGNO (y);
3318 else
3319 return false;
3322 static rtx
3323 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3324 rtx op1)
3326 struct simplify_plus_minus_op_data ops[8];
3327 rtx result, tem;
3328 int n_ops = 2, input_ops = 2;
3329 int changed, n_constants = 0, canonicalized = 0;
3330 int i, j;
3332 memset (ops, 0, sizeof ops);
3334 /* Set up the two operands and then expand them until nothing has been
3335 changed. If we run out of room in our array, give up; this should
3336 almost never happen. */
3338 ops[0].op = op0;
3339 ops[0].neg = 0;
3340 ops[1].op = op1;
3341 ops[1].neg = (code == MINUS);
3345 changed = 0;
3347 for (i = 0; i < n_ops; i++)
3349 rtx this_op = ops[i].op;
3350 int this_neg = ops[i].neg;
3351 enum rtx_code this_code = GET_CODE (this_op);
3353 switch (this_code)
3355 case PLUS:
3356 case MINUS:
3357 if (n_ops == 7)
3358 return NULL_RTX;
3360 ops[n_ops].op = XEXP (this_op, 1);
3361 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3362 n_ops++;
3364 ops[i].op = XEXP (this_op, 0);
3365 input_ops++;
3366 changed = 1;
3367 canonicalized |= this_neg;
3368 break;
3370 case NEG:
3371 ops[i].op = XEXP (this_op, 0);
3372 ops[i].neg = ! this_neg;
3373 changed = 1;
3374 canonicalized = 1;
3375 break;
3377 case CONST:
3378 if (n_ops < 7
3379 && GET_CODE (XEXP (this_op, 0)) == PLUS
3380 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3381 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3383 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3384 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3385 ops[n_ops].neg = this_neg;
3386 n_ops++;
3387 changed = 1;
3388 canonicalized = 1;
3390 break;
3392 case NOT:
3393 /* ~a -> (-a - 1) */
3394 if (n_ops != 7)
3396 ops[n_ops].op = constm1_rtx;
3397 ops[n_ops++].neg = this_neg;
3398 ops[i].op = XEXP (this_op, 0);
3399 ops[i].neg = !this_neg;
3400 changed = 1;
3401 canonicalized = 1;
3403 break;
3405 case CONST_INT:
3406 n_constants++;
3407 if (this_neg)
3409 ops[i].op = neg_const_int (mode, this_op);
3410 ops[i].neg = 0;
3411 changed = 1;
3412 canonicalized = 1;
3414 break;
3416 default:
3417 break;
3421 while (changed);
3423 if (n_constants > 1)
3424 canonicalized = 1;
3426 gcc_assert (n_ops >= 2);
3428 /* If we only have two operands, we can avoid the loops. */
3429 if (n_ops == 2)
3431 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3432 rtx lhs, rhs;
3434 /* Get the two operands. Be careful with the order, especially for
3435 the cases where code == MINUS. */
3436 if (ops[0].neg && ops[1].neg)
3438 lhs = gen_rtx_NEG (mode, ops[0].op);
3439 rhs = ops[1].op;
3441 else if (ops[0].neg)
3443 lhs = ops[1].op;
3444 rhs = ops[0].op;
3446 else
3448 lhs = ops[0].op;
3449 rhs = ops[1].op;
3452 return simplify_const_binary_operation (code, mode, lhs, rhs);
3455 /* Now simplify each pair of operands until nothing changes. */
3458 /* Insertion sort is good enough for an eight-element array. */
3459 for (i = 1; i < n_ops; i++)
3461 struct simplify_plus_minus_op_data save;
3462 j = i - 1;
3463 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
3464 continue;
3466 canonicalized = 1;
3467 save = ops[i];
3469 ops[j + 1] = ops[j];
3470 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
3471 ops[j + 1] = save;
3474 /* This is only useful the first time through. */
3475 if (!canonicalized)
3476 return NULL_RTX;
3478 changed = 0;
3479 for (i = n_ops - 1; i > 0; i--)
3480 for (j = i - 1; j >= 0; j--)
3482 rtx lhs = ops[j].op, rhs = ops[i].op;
3483 int lneg = ops[j].neg, rneg = ops[i].neg;
3485 if (lhs != 0 && rhs != 0)
3487 enum rtx_code ncode = PLUS;
3489 if (lneg != rneg)
3491 ncode = MINUS;
3492 if (lneg)
3493 tem = lhs, lhs = rhs, rhs = tem;
3495 else if (swap_commutative_operands_p (lhs, rhs))
3496 tem = lhs, lhs = rhs, rhs = tem;
3498 if ((GET_CODE (lhs) == CONST || GET_CODE (lhs) == CONST_INT)
3499 && (GET_CODE (rhs) == CONST || GET_CODE (rhs) == CONST_INT))
3501 rtx tem_lhs, tem_rhs;
3503 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3504 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3505 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3507 if (tem && !CONSTANT_P (tem))
3508 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3510 else
3511 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3513 /* Reject "simplifications" that just wrap the two
3514 arguments in a CONST. Failure to do so can result
3515 in infinite recursion with simplify_binary_operation
3516 when it calls us to simplify CONST operations. */
3517 if (tem
3518 && ! (GET_CODE (tem) == CONST
3519 && GET_CODE (XEXP (tem, 0)) == ncode
3520 && XEXP (XEXP (tem, 0), 0) == lhs
3521 && XEXP (XEXP (tem, 0), 1) == rhs))
3523 lneg &= rneg;
3524 if (GET_CODE (tem) == NEG)
3525 tem = XEXP (tem, 0), lneg = !lneg;
3526 if (GET_CODE (tem) == CONST_INT && lneg)
3527 tem = neg_const_int (mode, tem), lneg = 0;
3529 ops[i].op = tem;
3530 ops[i].neg = lneg;
3531 ops[j].op = NULL_RTX;
3532 changed = 1;
3537 /* Pack all the operands to the lower-numbered entries. */
3538 for (i = 0, j = 0; j < n_ops; j++)
3539 if (ops[j].op)
3541 ops[i] = ops[j];
3542 i++;
3544 n_ops = i;
3546 while (changed);
3548 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3549 if (n_ops == 2
3550 && GET_CODE (ops[1].op) == CONST_INT
3551 && CONSTANT_P (ops[0].op)
3552 && ops[0].neg)
3553 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3555 /* We suppressed creation of trivial CONST expressions in the
3556 combination loop to avoid recursion. Create one manually now.
3557 The combination loop should have ensured that there is exactly
3558 one CONST_INT, and the sort will have ensured that it is last
3559 in the array and that any other constant will be next-to-last. */
3561 if (n_ops > 1
3562 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
3563 && CONSTANT_P (ops[n_ops - 2].op))
3565 rtx value = ops[n_ops - 1].op;
3566 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3567 value = neg_const_int (mode, value);
3568 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3569 n_ops--;
3572 /* Put a non-negated operand first, if possible. */
3574 for (i = 0; i < n_ops && ops[i].neg; i++)
3575 continue;
3576 if (i == n_ops)
3577 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3578 else if (i != 0)
3580 tem = ops[0].op;
3581 ops[0] = ops[i];
3582 ops[i].op = tem;
3583 ops[i].neg = 1;
3586 /* Now make the result by performing the requested operations. */
3587 result = ops[0].op;
3588 for (i = 1; i < n_ops; i++)
3589 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3590 mode, result, ops[i].op);
3592 return result;
3595 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3596 static bool
3597 plus_minus_operand_p (const_rtx x)
3599 return GET_CODE (x) == PLUS
3600 || GET_CODE (x) == MINUS
3601 || (GET_CODE (x) == CONST
3602 && GET_CODE (XEXP (x, 0)) == PLUS
3603 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3604 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3607 /* Like simplify_binary_operation except used for relational operators.
3608 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3609 not also be VOIDmode.
3611 CMP_MODE specifies in which mode the comparison is done in, so it is
3612 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3613 the operands or, if both are VOIDmode, the operands are compared in
3614 "infinite precision". */
3616 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3617 enum machine_mode cmp_mode, rtx op0, rtx op1)
3619 rtx tem, trueop0, trueop1;
3621 if (cmp_mode == VOIDmode)
3622 cmp_mode = GET_MODE (op0);
3623 if (cmp_mode == VOIDmode)
3624 cmp_mode = GET_MODE (op1);
3626 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3627 if (tem)
3629 if (SCALAR_FLOAT_MODE_P (mode))
3631 if (tem == const0_rtx)
3632 return CONST0_RTX (mode);
3633 #ifdef FLOAT_STORE_FLAG_VALUE
3635 REAL_VALUE_TYPE val;
3636 val = FLOAT_STORE_FLAG_VALUE (mode);
3637 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3639 #else
3640 return NULL_RTX;
3641 #endif
3643 if (VECTOR_MODE_P (mode))
3645 if (tem == const0_rtx)
3646 return CONST0_RTX (mode);
3647 #ifdef VECTOR_STORE_FLAG_VALUE
3649 int i, units;
3650 rtvec v;
3652 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3653 if (val == NULL_RTX)
3654 return NULL_RTX;
3655 if (val == const1_rtx)
3656 return CONST1_RTX (mode);
3658 units = GET_MODE_NUNITS (mode);
3659 v = rtvec_alloc (units);
3660 for (i = 0; i < units; i++)
3661 RTVEC_ELT (v, i) = val;
3662 return gen_rtx_raw_CONST_VECTOR (mode, v);
3664 #else
3665 return NULL_RTX;
3666 #endif
3669 return tem;
3672 /* For the following tests, ensure const0_rtx is op1. */
3673 if (swap_commutative_operands_p (op0, op1)
3674 || (op0 == const0_rtx && op1 != const0_rtx))
3675 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3677 /* If op0 is a compare, extract the comparison arguments from it. */
3678 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3679 return simplify_relational_operation (code, mode, VOIDmode,
3680 XEXP (op0, 0), XEXP (op0, 1));
3682 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
3683 || CC0_P (op0))
3684 return NULL_RTX;
3686 trueop0 = avoid_constant_pool_reference (op0);
3687 trueop1 = avoid_constant_pool_reference (op1);
3688 return simplify_relational_operation_1 (code, mode, cmp_mode,
3689 trueop0, trueop1);
3692 /* This part of simplify_relational_operation is only used when CMP_MODE
3693 is not in class MODE_CC (i.e. it is a real comparison).
3695 MODE is the mode of the result, while CMP_MODE specifies in which
3696 mode the comparison is done in, so it is the mode of the operands. */
3698 static rtx
3699 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
3700 enum machine_mode cmp_mode, rtx op0, rtx op1)
3702 enum rtx_code op0code = GET_CODE (op0);
3704 if (op1 == const0_rtx && COMPARISON_P (op0))
3706 /* If op0 is a comparison, extract the comparison arguments
3707 from it. */
3708 if (code == NE)
3710 if (GET_MODE (op0) == mode)
3711 return simplify_rtx (op0);
3712 else
3713 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
3714 XEXP (op0, 0), XEXP (op0, 1));
3716 else if (code == EQ)
3718 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
3719 if (new_code != UNKNOWN)
3720 return simplify_gen_relational (new_code, mode, VOIDmode,
3721 XEXP (op0, 0), XEXP (op0, 1));
3725 if (op1 == const0_rtx)
3727 /* Canonicalize (GTU x 0) as (NE x 0). */
3728 if (code == GTU)
3729 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
3730 /* Canonicalize (LEU x 0) as (EQ x 0). */
3731 if (code == LEU)
3732 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
3734 else if (op1 == const1_rtx)
3736 switch (code)
3738 case GE:
3739 /* Canonicalize (GE x 1) as (GT x 0). */
3740 return simplify_gen_relational (GT, mode, cmp_mode,
3741 op0, const0_rtx);
3742 case GEU:
3743 /* Canonicalize (GEU x 1) as (NE x 0). */
3744 return simplify_gen_relational (NE, mode, cmp_mode,
3745 op0, const0_rtx);
3746 case LT:
3747 /* Canonicalize (LT x 1) as (LE x 0). */
3748 return simplify_gen_relational (LE, mode, cmp_mode,
3749 op0, const0_rtx);
3750 case LTU:
3751 /* Canonicalize (LTU x 1) as (EQ x 0). */
3752 return simplify_gen_relational (EQ, mode, cmp_mode,
3753 op0, const0_rtx);
3754 default:
3755 break;
3758 else if (op1 == constm1_rtx)
3760 /* Canonicalize (LE x -1) as (LT x 0). */
3761 if (code == LE)
3762 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
3763 /* Canonicalize (GT x -1) as (GE x 0). */
3764 if (code == GT)
3765 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
3768 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
3769 if ((code == EQ || code == NE)
3770 && (op0code == PLUS || op0code == MINUS)
3771 && CONSTANT_P (op1)
3772 && CONSTANT_P (XEXP (op0, 1))
3773 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
3775 rtx x = XEXP (op0, 0);
3776 rtx c = XEXP (op0, 1);
3778 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
3779 cmp_mode, op1, c);
3780 return simplify_gen_relational (code, mode, cmp_mode, x, c);
3783 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3784 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3785 if (code == NE
3786 && op1 == const0_rtx
3787 && GET_MODE_CLASS (mode) == MODE_INT
3788 && cmp_mode != VOIDmode
3789 /* ??? Work-around BImode bugs in the ia64 backend. */
3790 && mode != BImode
3791 && cmp_mode != BImode
3792 && nonzero_bits (op0, cmp_mode) == 1
3793 && STORE_FLAG_VALUE == 1)
3794 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
3795 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
3796 : lowpart_subreg (mode, op0, cmp_mode);
3798 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
3799 if ((code == EQ || code == NE)
3800 && op1 == const0_rtx
3801 && op0code == XOR)
3802 return simplify_gen_relational (code, mode, cmp_mode,
3803 XEXP (op0, 0), XEXP (op0, 1));
3805 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
3806 if ((code == EQ || code == NE)
3807 && op0code == XOR
3808 && rtx_equal_p (XEXP (op0, 0), op1)
3809 && !side_effects_p (XEXP (op0, 0)))
3810 return simplify_gen_relational (code, mode, cmp_mode,
3811 XEXP (op0, 1), const0_rtx);
3813 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
3814 if ((code == EQ || code == NE)
3815 && op0code == XOR
3816 && rtx_equal_p (XEXP (op0, 1), op1)
3817 && !side_effects_p (XEXP (op0, 1)))
3818 return simplify_gen_relational (code, mode, cmp_mode,
3819 XEXP (op0, 0), const0_rtx);
3821 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
3822 if ((code == EQ || code == NE)
3823 && op0code == XOR
3824 && (GET_CODE (op1) == CONST_INT
3825 || GET_CODE (op1) == CONST_DOUBLE)
3826 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
3827 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
3828 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
3829 simplify_gen_binary (XOR, cmp_mode,
3830 XEXP (op0, 1), op1));
3832 if (op0code == POPCOUNT && op1 == const0_rtx)
3833 switch (code)
3835 case EQ:
3836 case LE:
3837 case LEU:
3838 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
3839 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
3840 XEXP (op0, 0), const0_rtx);
3842 case NE:
3843 case GT:
3844 case GTU:
3845 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
3846 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
3847 XEXP (op0, 0), const0_rtx);
3849 default:
3850 break;
3853 return NULL_RTX;
3856 /* Check if the given comparison (done in the given MODE) is actually a
3857 tautology or a contradiction.
3858 If no simplification is possible, this function returns zero.
3859 Otherwise, it returns either const_true_rtx or const0_rtx. */
3862 simplify_const_relational_operation (enum rtx_code code,
3863 enum machine_mode mode,
3864 rtx op0, rtx op1)
3866 int equal, op0lt, op0ltu, op1lt, op1ltu;
3867 rtx tem;
3868 rtx trueop0;
3869 rtx trueop1;
3871 gcc_assert (mode != VOIDmode
3872 || (GET_MODE (op0) == VOIDmode
3873 && GET_MODE (op1) == VOIDmode));
3875 /* If op0 is a compare, extract the comparison arguments from it. */
3876 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3878 op1 = XEXP (op0, 1);
3879 op0 = XEXP (op0, 0);
3881 if (GET_MODE (op0) != VOIDmode)
3882 mode = GET_MODE (op0);
3883 else if (GET_MODE (op1) != VOIDmode)
3884 mode = GET_MODE (op1);
3885 else
3886 return 0;
3889 /* We can't simplify MODE_CC values since we don't know what the
3890 actual comparison is. */
3891 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
3892 return 0;
3894 /* Make sure the constant is second. */
3895 if (swap_commutative_operands_p (op0, op1))
3897 tem = op0, op0 = op1, op1 = tem;
3898 code = swap_condition (code);
3901 trueop0 = avoid_constant_pool_reference (op0);
3902 trueop1 = avoid_constant_pool_reference (op1);
3904 /* For integer comparisons of A and B maybe we can simplify A - B and can
3905 then simplify a comparison of that with zero. If A and B are both either
3906 a register or a CONST_INT, this can't help; testing for these cases will
3907 prevent infinite recursion here and speed things up.
3909 We can only do this for EQ and NE comparisons as otherwise we may
3910 lose or introduce overflow which we cannot disregard as undefined as
3911 we do not know the signedness of the operation on either the left or
3912 the right hand side of the comparison. */
3914 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
3915 && (code == EQ || code == NE)
3916 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
3917 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
3918 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
3919 /* We cannot do this if tem is a nonzero address. */
3920 && ! nonzero_address_p (tem))
3921 return simplify_const_relational_operation (signed_condition (code),
3922 mode, tem, const0_rtx);
3924 if (! HONOR_NANS (mode) && code == ORDERED)
3925 return const_true_rtx;
3927 if (! HONOR_NANS (mode) && code == UNORDERED)
3928 return const0_rtx;
3930 /* For modes without NaNs, if the two operands are equal, we know the
3931 result except if they have side-effects. */
3932 if (! HONOR_NANS (GET_MODE (trueop0))
3933 && rtx_equal_p (trueop0, trueop1)
3934 && ! side_effects_p (trueop0))
3935 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
3937 /* If the operands are floating-point constants, see if we can fold
3938 the result. */
3939 else if (GET_CODE (trueop0) == CONST_DOUBLE
3940 && GET_CODE (trueop1) == CONST_DOUBLE
3941 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
3943 REAL_VALUE_TYPE d0, d1;
3945 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
3946 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
3948 /* Comparisons are unordered iff at least one of the values is NaN. */
3949 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
3950 switch (code)
3952 case UNEQ:
3953 case UNLT:
3954 case UNGT:
3955 case UNLE:
3956 case UNGE:
3957 case NE:
3958 case UNORDERED:
3959 return const_true_rtx;
3960 case EQ:
3961 case LT:
3962 case GT:
3963 case LE:
3964 case GE:
3965 case LTGT:
3966 case ORDERED:
3967 return const0_rtx;
3968 default:
3969 return 0;
3972 equal = REAL_VALUES_EQUAL (d0, d1);
3973 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
3974 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
3977 /* Otherwise, see if the operands are both integers. */
3978 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
3979 && (GET_CODE (trueop0) == CONST_DOUBLE
3980 || GET_CODE (trueop0) == CONST_INT)
3981 && (GET_CODE (trueop1) == CONST_DOUBLE
3982 || GET_CODE (trueop1) == CONST_INT))
3984 int width = GET_MODE_BITSIZE (mode);
3985 HOST_WIDE_INT l0s, h0s, l1s, h1s;
3986 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
3988 /* Get the two words comprising each integer constant. */
3989 if (GET_CODE (trueop0) == CONST_DOUBLE)
3991 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
3992 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
3994 else
3996 l0u = l0s = INTVAL (trueop0);
3997 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4000 if (GET_CODE (trueop1) == CONST_DOUBLE)
4002 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4003 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4005 else
4007 l1u = l1s = INTVAL (trueop1);
4008 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4011 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4012 we have to sign or zero-extend the values. */
4013 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4015 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
4016 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
4018 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
4019 l0s |= ((HOST_WIDE_INT) (-1) << width);
4021 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
4022 l1s |= ((HOST_WIDE_INT) (-1) << width);
4024 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4025 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4027 equal = (h0u == h1u && l0u == l1u);
4028 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
4029 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
4030 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
4031 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
4034 /* Otherwise, there are some code-specific tests we can make. */
4035 else
4037 /* Optimize comparisons with upper and lower bounds. */
4038 if (SCALAR_INT_MODE_P (mode)
4039 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
4041 rtx mmin, mmax;
4042 int sign;
4044 if (code == GEU
4045 || code == LEU
4046 || code == GTU
4047 || code == LTU)
4048 sign = 0;
4049 else
4050 sign = 1;
4052 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
4054 tem = NULL_RTX;
4055 switch (code)
4057 case GEU:
4058 case GE:
4059 /* x >= min is always true. */
4060 if (rtx_equal_p (trueop1, mmin))
4061 tem = const_true_rtx;
4062 else
4063 break;
4065 case LEU:
4066 case LE:
4067 /* x <= max is always true. */
4068 if (rtx_equal_p (trueop1, mmax))
4069 tem = const_true_rtx;
4070 break;
4072 case GTU:
4073 case GT:
4074 /* x > max is always false. */
4075 if (rtx_equal_p (trueop1, mmax))
4076 tem = const0_rtx;
4077 break;
4079 case LTU:
4080 case LT:
4081 /* x < min is always false. */
4082 if (rtx_equal_p (trueop1, mmin))
4083 tem = const0_rtx;
4084 break;
4086 default:
4087 break;
4089 if (tem == const0_rtx
4090 || tem == const_true_rtx)
4091 return tem;
4094 switch (code)
4096 case EQ:
4097 if (trueop1 == const0_rtx && nonzero_address_p (op0))
4098 return const0_rtx;
4099 break;
4101 case NE:
4102 if (trueop1 == const0_rtx && nonzero_address_p (op0))
4103 return const_true_rtx;
4104 break;
4106 case LT:
4107 /* Optimize abs(x) < 0.0. */
4108 if (trueop1 == CONST0_RTX (mode)
4109 && !HONOR_SNANS (mode)
4110 && (!INTEGRAL_MODE_P (mode)
4111 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4113 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
4114 : trueop0;
4115 if (GET_CODE (tem) == ABS)
4117 if (INTEGRAL_MODE_P (mode)
4118 && (issue_strict_overflow_warning
4119 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4120 warning (OPT_Wstrict_overflow,
4121 ("assuming signed overflow does not occur when "
4122 "assuming abs (x) < 0 is false"));
4123 return const0_rtx;
4127 /* Optimize popcount (x) < 0. */
4128 if (GET_CODE (trueop0) == POPCOUNT && trueop1 == const0_rtx)
4129 return const_true_rtx;
4130 break;
4132 case GE:
4133 /* Optimize abs(x) >= 0.0. */
4134 if (trueop1 == CONST0_RTX (mode)
4135 && !HONOR_NANS (mode)
4136 && (!INTEGRAL_MODE_P (mode)
4137 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4139 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
4140 : trueop0;
4141 if (GET_CODE (tem) == ABS)
4143 if (INTEGRAL_MODE_P (mode)
4144 && (issue_strict_overflow_warning
4145 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4146 warning (OPT_Wstrict_overflow,
4147 ("assuming signed overflow does not occur when "
4148 "assuming abs (x) >= 0 is true"));
4149 return const_true_rtx;
4153 /* Optimize popcount (x) >= 0. */
4154 if (GET_CODE (trueop0) == POPCOUNT && trueop1 == const0_rtx)
4155 return const_true_rtx;
4156 break;
4158 case UNGE:
4159 /* Optimize ! (abs(x) < 0.0). */
4160 if (trueop1 == CONST0_RTX (mode))
4162 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
4163 : trueop0;
4164 if (GET_CODE (tem) == ABS)
4165 return const_true_rtx;
4167 break;
4169 default:
4170 break;
4173 return 0;
4176 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
4177 as appropriate. */
4178 switch (code)
4180 case EQ:
4181 case UNEQ:
4182 return equal ? const_true_rtx : const0_rtx;
4183 case NE:
4184 case LTGT:
4185 return ! equal ? const_true_rtx : const0_rtx;
4186 case LT:
4187 case UNLT:
4188 return op0lt ? const_true_rtx : const0_rtx;
4189 case GT:
4190 case UNGT:
4191 return op1lt ? const_true_rtx : const0_rtx;
4192 case LTU:
4193 return op0ltu ? const_true_rtx : const0_rtx;
4194 case GTU:
4195 return op1ltu ? const_true_rtx : const0_rtx;
4196 case LE:
4197 case UNLE:
4198 return equal || op0lt ? const_true_rtx : const0_rtx;
4199 case GE:
4200 case UNGE:
4201 return equal || op1lt ? const_true_rtx : const0_rtx;
4202 case LEU:
4203 return equal || op0ltu ? const_true_rtx : const0_rtx;
4204 case GEU:
4205 return equal || op1ltu ? const_true_rtx : const0_rtx;
4206 case ORDERED:
4207 return const_true_rtx;
4208 case UNORDERED:
4209 return const0_rtx;
4210 default:
4211 gcc_unreachable ();
4215 /* Simplify CODE, an operation with result mode MODE and three operands,
4216 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4217 a constant. Return 0 if no simplifications is possible. */
4220 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4221 enum machine_mode op0_mode, rtx op0, rtx op1,
4222 rtx op2)
4224 unsigned int width = GET_MODE_BITSIZE (mode);
4226 /* VOIDmode means "infinite" precision. */
4227 if (width == 0)
4228 width = HOST_BITS_PER_WIDE_INT;
4230 switch (code)
4232 case SIGN_EXTRACT:
4233 case ZERO_EXTRACT:
4234 if (GET_CODE (op0) == CONST_INT
4235 && GET_CODE (op1) == CONST_INT
4236 && GET_CODE (op2) == CONST_INT
4237 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4238 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4240 /* Extracting a bit-field from a constant */
4241 HOST_WIDE_INT val = INTVAL (op0);
4243 if (BITS_BIG_ENDIAN)
4244 val >>= (GET_MODE_BITSIZE (op0_mode)
4245 - INTVAL (op2) - INTVAL (op1));
4246 else
4247 val >>= INTVAL (op2);
4249 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4251 /* First zero-extend. */
4252 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4253 /* If desired, propagate sign bit. */
4254 if (code == SIGN_EXTRACT
4255 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4256 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4259 /* Clear the bits that don't belong in our mode,
4260 unless they and our sign bit are all one.
4261 So we get either a reasonable negative value or a reasonable
4262 unsigned value for this mode. */
4263 if (width < HOST_BITS_PER_WIDE_INT
4264 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4265 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4266 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4268 return gen_int_mode (val, mode);
4270 break;
4272 case IF_THEN_ELSE:
4273 if (GET_CODE (op0) == CONST_INT)
4274 return op0 != const0_rtx ? op1 : op2;
4276 /* Convert c ? a : a into "a". */
4277 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4278 return op1;
4280 /* Convert a != b ? a : b into "a". */
4281 if (GET_CODE (op0) == NE
4282 && ! side_effects_p (op0)
4283 && ! HONOR_NANS (mode)
4284 && ! HONOR_SIGNED_ZEROS (mode)
4285 && ((rtx_equal_p (XEXP (op0, 0), op1)
4286 && rtx_equal_p (XEXP (op0, 1), op2))
4287 || (rtx_equal_p (XEXP (op0, 0), op2)
4288 && rtx_equal_p (XEXP (op0, 1), op1))))
4289 return op1;
4291 /* Convert a == b ? a : b into "b". */
4292 if (GET_CODE (op0) == EQ
4293 && ! side_effects_p (op0)
4294 && ! HONOR_NANS (mode)
4295 && ! HONOR_SIGNED_ZEROS (mode)
4296 && ((rtx_equal_p (XEXP (op0, 0), op1)
4297 && rtx_equal_p (XEXP (op0, 1), op2))
4298 || (rtx_equal_p (XEXP (op0, 0), op2)
4299 && rtx_equal_p (XEXP (op0, 1), op1))))
4300 return op2;
4302 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4304 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4305 ? GET_MODE (XEXP (op0, 1))
4306 : GET_MODE (XEXP (op0, 0)));
4307 rtx temp;
4309 /* Look for happy constants in op1 and op2. */
4310 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
4312 HOST_WIDE_INT t = INTVAL (op1);
4313 HOST_WIDE_INT f = INTVAL (op2);
4315 if (t == STORE_FLAG_VALUE && f == 0)
4316 code = GET_CODE (op0);
4317 else if (t == 0 && f == STORE_FLAG_VALUE)
4319 enum rtx_code tmp;
4320 tmp = reversed_comparison_code (op0, NULL_RTX);
4321 if (tmp == UNKNOWN)
4322 break;
4323 code = tmp;
4325 else
4326 break;
4328 return simplify_gen_relational (code, mode, cmp_mode,
4329 XEXP (op0, 0), XEXP (op0, 1));
4332 if (cmp_mode == VOIDmode)
4333 cmp_mode = op0_mode;
4334 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4335 cmp_mode, XEXP (op0, 0),
4336 XEXP (op0, 1));
4338 /* See if any simplifications were possible. */
4339 if (temp)
4341 if (GET_CODE (temp) == CONST_INT)
4342 return temp == const0_rtx ? op2 : op1;
4343 else if (temp)
4344 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4347 break;
4349 case VEC_MERGE:
4350 gcc_assert (GET_MODE (op0) == mode);
4351 gcc_assert (GET_MODE (op1) == mode);
4352 gcc_assert (VECTOR_MODE_P (mode));
4353 op2 = avoid_constant_pool_reference (op2);
4354 if (GET_CODE (op2) == CONST_INT)
4356 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4357 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4358 int mask = (1 << n_elts) - 1;
4360 if (!(INTVAL (op2) & mask))
4361 return op1;
4362 if ((INTVAL (op2) & mask) == mask)
4363 return op0;
4365 op0 = avoid_constant_pool_reference (op0);
4366 op1 = avoid_constant_pool_reference (op1);
4367 if (GET_CODE (op0) == CONST_VECTOR
4368 && GET_CODE (op1) == CONST_VECTOR)
4370 rtvec v = rtvec_alloc (n_elts);
4371 unsigned int i;
4373 for (i = 0; i < n_elts; i++)
4374 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4375 ? CONST_VECTOR_ELT (op0, i)
4376 : CONST_VECTOR_ELT (op1, i));
4377 return gen_rtx_CONST_VECTOR (mode, v);
4380 break;
4382 default:
4383 gcc_unreachable ();
4386 return 0;
4389 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
4390 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
4392 Works by unpacking OP into a collection of 8-bit values
4393 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4394 and then repacking them again for OUTERMODE. */
4396 static rtx
4397 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4398 enum machine_mode innermode, unsigned int byte)
4400 /* We support up to 512-bit values (for V8DFmode). */
4401 enum {
4402 max_bitsize = 512,
4403 value_bit = 8,
4404 value_mask = (1 << value_bit) - 1
4406 unsigned char value[max_bitsize / value_bit];
4407 int value_start;
4408 int i;
4409 int elem;
4411 int num_elem;
4412 rtx * elems;
4413 int elem_bitsize;
4414 rtx result_s;
4415 rtvec result_v = NULL;
4416 enum mode_class outer_class;
4417 enum machine_mode outer_submode;
4419 /* Some ports misuse CCmode. */
4420 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
4421 return op;
4423 /* We have no way to represent a complex constant at the rtl level. */
4424 if (COMPLEX_MODE_P (outermode))
4425 return NULL_RTX;
4427 /* Unpack the value. */
4429 if (GET_CODE (op) == CONST_VECTOR)
4431 num_elem = CONST_VECTOR_NUNITS (op);
4432 elems = &CONST_VECTOR_ELT (op, 0);
4433 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4435 else
4437 num_elem = 1;
4438 elems = &op;
4439 elem_bitsize = max_bitsize;
4441 /* If this asserts, it is too complicated; reducing value_bit may help. */
4442 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4443 /* I don't know how to handle endianness of sub-units. */
4444 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4446 for (elem = 0; elem < num_elem; elem++)
4448 unsigned char * vp;
4449 rtx el = elems[elem];
4451 /* Vectors are kept in target memory order. (This is probably
4452 a mistake.) */
4454 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4455 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4456 / BITS_PER_UNIT);
4457 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4458 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4459 unsigned bytele = (subword_byte % UNITS_PER_WORD
4460 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4461 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4464 switch (GET_CODE (el))
4466 case CONST_INT:
4467 for (i = 0;
4468 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4469 i += value_bit)
4470 *vp++ = INTVAL (el) >> i;
4471 /* CONST_INTs are always logically sign-extended. */
4472 for (; i < elem_bitsize; i += value_bit)
4473 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4474 break;
4476 case CONST_DOUBLE:
4477 if (GET_MODE (el) == VOIDmode)
4479 /* If this triggers, someone should have generated a
4480 CONST_INT instead. */
4481 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4483 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4484 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4485 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4487 *vp++
4488 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4489 i += value_bit;
4491 /* It shouldn't matter what's done here, so fill it with
4492 zero. */
4493 for (; i < elem_bitsize; i += value_bit)
4494 *vp++ = 0;
4496 else
4498 long tmp[max_bitsize / 32];
4499 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4501 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4502 gcc_assert (bitsize <= elem_bitsize);
4503 gcc_assert (bitsize % value_bit == 0);
4505 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4506 GET_MODE (el));
4508 /* real_to_target produces its result in words affected by
4509 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4510 and use WORDS_BIG_ENDIAN instead; see the documentation
4511 of SUBREG in rtl.texi. */
4512 for (i = 0; i < bitsize; i += value_bit)
4514 int ibase;
4515 if (WORDS_BIG_ENDIAN)
4516 ibase = bitsize - 1 - i;
4517 else
4518 ibase = i;
4519 *vp++ = tmp[ibase / 32] >> i % 32;
4522 /* It shouldn't matter what's done here, so fill it with
4523 zero. */
4524 for (; i < elem_bitsize; i += value_bit)
4525 *vp++ = 0;
4527 break;
4529 default:
4530 gcc_unreachable ();
4534 /* Now, pick the right byte to start with. */
4535 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4536 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4537 will already have offset 0. */
4538 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4540 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4541 - byte);
4542 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4543 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4544 byte = (subword_byte % UNITS_PER_WORD
4545 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4548 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4549 so if it's become negative it will instead be very large.) */
4550 gcc_assert (byte < GET_MODE_SIZE (innermode));
4552 /* Convert from bytes to chunks of size value_bit. */
4553 value_start = byte * (BITS_PER_UNIT / value_bit);
4555 /* Re-pack the value. */
4557 if (VECTOR_MODE_P (outermode))
4559 num_elem = GET_MODE_NUNITS (outermode);
4560 result_v = rtvec_alloc (num_elem);
4561 elems = &RTVEC_ELT (result_v, 0);
4562 outer_submode = GET_MODE_INNER (outermode);
4564 else
4566 num_elem = 1;
4567 elems = &result_s;
4568 outer_submode = outermode;
4571 outer_class = GET_MODE_CLASS (outer_submode);
4572 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
4574 gcc_assert (elem_bitsize % value_bit == 0);
4575 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
4577 for (elem = 0; elem < num_elem; elem++)
4579 unsigned char *vp;
4581 /* Vectors are stored in target memory order. (This is probably
4582 a mistake.) */
4584 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4585 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4586 / BITS_PER_UNIT);
4587 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4588 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4589 unsigned bytele = (subword_byte % UNITS_PER_WORD
4590 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4591 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
4594 switch (outer_class)
4596 case MODE_INT:
4597 case MODE_PARTIAL_INT:
4599 unsigned HOST_WIDE_INT hi = 0, lo = 0;
4601 for (i = 0;
4602 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4603 i += value_bit)
4604 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
4605 for (; i < elem_bitsize; i += value_bit)
4606 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
4607 << (i - HOST_BITS_PER_WIDE_INT));
4609 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4610 know why. */
4611 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4612 elems[elem] = gen_int_mode (lo, outer_submode);
4613 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
4614 elems[elem] = immed_double_const (lo, hi, outer_submode);
4615 else
4616 return NULL_RTX;
4618 break;
4620 case MODE_FLOAT:
4621 case MODE_DECIMAL_FLOAT:
4623 REAL_VALUE_TYPE r;
4624 long tmp[max_bitsize / 32];
4626 /* real_from_target wants its input in words affected by
4627 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4628 and use WORDS_BIG_ENDIAN instead; see the documentation
4629 of SUBREG in rtl.texi. */
4630 for (i = 0; i < max_bitsize / 32; i++)
4631 tmp[i] = 0;
4632 for (i = 0; i < elem_bitsize; i += value_bit)
4634 int ibase;
4635 if (WORDS_BIG_ENDIAN)
4636 ibase = elem_bitsize - 1 - i;
4637 else
4638 ibase = i;
4639 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
4642 real_from_target (&r, tmp, outer_submode);
4643 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
4645 break;
4647 default:
4648 gcc_unreachable ();
4651 if (VECTOR_MODE_P (outermode))
4652 return gen_rtx_CONST_VECTOR (outermode, result_v);
4653 else
4654 return result_s;
4657 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
4658 Return 0 if no simplifications are possible. */
4660 simplify_subreg (enum machine_mode outermode, rtx op,
4661 enum machine_mode innermode, unsigned int byte)
4663 /* Little bit of sanity checking. */
4664 gcc_assert (innermode != VOIDmode);
4665 gcc_assert (outermode != VOIDmode);
4666 gcc_assert (innermode != BLKmode);
4667 gcc_assert (outermode != BLKmode);
4669 gcc_assert (GET_MODE (op) == innermode
4670 || GET_MODE (op) == VOIDmode);
4672 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
4673 gcc_assert (byte < GET_MODE_SIZE (innermode));
4675 if (outermode == innermode && !byte)
4676 return op;
4678 if (GET_CODE (op) == CONST_INT
4679 || GET_CODE (op) == CONST_DOUBLE
4680 || GET_CODE (op) == CONST_VECTOR)
4681 return simplify_immed_subreg (outermode, op, innermode, byte);
4683 /* Changing mode twice with SUBREG => just change it once,
4684 or not at all if changing back op starting mode. */
4685 if (GET_CODE (op) == SUBREG)
4687 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
4688 int final_offset = byte + SUBREG_BYTE (op);
4689 rtx newx;
4691 if (outermode == innermostmode
4692 && byte == 0 && SUBREG_BYTE (op) == 0)
4693 return SUBREG_REG (op);
4695 /* The SUBREG_BYTE represents offset, as if the value were stored
4696 in memory. Irritating exception is paradoxical subreg, where
4697 we define SUBREG_BYTE to be 0. On big endian machines, this
4698 value should be negative. For a moment, undo this exception. */
4699 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4701 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
4702 if (WORDS_BIG_ENDIAN)
4703 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4704 if (BYTES_BIG_ENDIAN)
4705 final_offset += difference % UNITS_PER_WORD;
4707 if (SUBREG_BYTE (op) == 0
4708 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
4710 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
4711 if (WORDS_BIG_ENDIAN)
4712 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4713 if (BYTES_BIG_ENDIAN)
4714 final_offset += difference % UNITS_PER_WORD;
4717 /* See whether resulting subreg will be paradoxical. */
4718 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
4720 /* In nonparadoxical subregs we can't handle negative offsets. */
4721 if (final_offset < 0)
4722 return NULL_RTX;
4723 /* Bail out in case resulting subreg would be incorrect. */
4724 if (final_offset % GET_MODE_SIZE (outermode)
4725 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
4726 return NULL_RTX;
4728 else
4730 int offset = 0;
4731 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
4733 /* In paradoxical subreg, see if we are still looking on lower part.
4734 If so, our SUBREG_BYTE will be 0. */
4735 if (WORDS_BIG_ENDIAN)
4736 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4737 if (BYTES_BIG_ENDIAN)
4738 offset += difference % UNITS_PER_WORD;
4739 if (offset == final_offset)
4740 final_offset = 0;
4741 else
4742 return NULL_RTX;
4745 /* Recurse for further possible simplifications. */
4746 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
4747 final_offset);
4748 if (newx)
4749 return newx;
4750 if (validate_subreg (outermode, innermostmode,
4751 SUBREG_REG (op), final_offset))
4752 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
4753 return NULL_RTX;
4756 /* Merge implicit and explicit truncations. */
4758 if (GET_CODE (op) == TRUNCATE
4759 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
4760 && subreg_lowpart_offset (outermode, innermode) == byte)
4761 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
4762 GET_MODE (XEXP (op, 0)));
4764 /* SUBREG of a hard register => just change the register number
4765 and/or mode. If the hard register is not valid in that mode,
4766 suppress this simplification. If the hard register is the stack,
4767 frame, or argument pointer, leave this as a SUBREG. */
4769 if (REG_P (op)
4770 && REGNO (op) < FIRST_PSEUDO_REGISTER
4771 #ifdef CANNOT_CHANGE_MODE_CLASS
4772 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
4773 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
4774 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
4775 #endif
4776 && ((reload_completed && !frame_pointer_needed)
4777 || (REGNO (op) != FRAME_POINTER_REGNUM
4778 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
4779 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
4780 #endif
4782 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4783 && REGNO (op) != ARG_POINTER_REGNUM
4784 #endif
4785 && REGNO (op) != STACK_POINTER_REGNUM
4786 && subreg_offset_representable_p (REGNO (op), innermode,
4787 byte, outermode))
4789 unsigned int regno = REGNO (op);
4790 unsigned int final_regno
4791 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
4793 /* ??? We do allow it if the current REG is not valid for
4794 its mode. This is a kludge to work around how float/complex
4795 arguments are passed on 32-bit SPARC and should be fixed. */
4796 if (HARD_REGNO_MODE_OK (final_regno, outermode)
4797 || ! HARD_REGNO_MODE_OK (regno, innermode))
4799 rtx x;
4800 int final_offset = byte;
4802 /* Adjust offset for paradoxical subregs. */
4803 if (byte == 0
4804 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4806 int difference = (GET_MODE_SIZE (innermode)
4807 - GET_MODE_SIZE (outermode));
4808 if (WORDS_BIG_ENDIAN)
4809 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4810 if (BYTES_BIG_ENDIAN)
4811 final_offset += difference % UNITS_PER_WORD;
4814 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
4816 /* Propagate original regno. We don't have any way to specify
4817 the offset inside original regno, so do so only for lowpart.
4818 The information is used only by alias analysis that can not
4819 grog partial register anyway. */
4821 if (subreg_lowpart_offset (outermode, innermode) == byte)
4822 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
4823 return x;
4827 /* If we have a SUBREG of a register that we are replacing and we are
4828 replacing it with a MEM, make a new MEM and try replacing the
4829 SUBREG with it. Don't do this if the MEM has a mode-dependent address
4830 or if we would be widening it. */
4832 if (MEM_P (op)
4833 && ! mode_dependent_address_p (XEXP (op, 0))
4834 /* Allow splitting of volatile memory references in case we don't
4835 have instruction to move the whole thing. */
4836 && (! MEM_VOLATILE_P (op)
4837 || ! have_insn_for (SET, innermode))
4838 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
4839 return adjust_address_nv (op, outermode, byte);
4841 /* Handle complex values represented as CONCAT
4842 of real and imaginary part. */
4843 if (GET_CODE (op) == CONCAT)
4845 unsigned int part_size, final_offset;
4846 rtx part, res;
4848 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
4849 if (byte < part_size)
4851 part = XEXP (op, 0);
4852 final_offset = byte;
4854 else
4856 part = XEXP (op, 1);
4857 final_offset = byte - part_size;
4860 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
4861 return NULL_RTX;
4863 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
4864 if (res)
4865 return res;
4866 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
4867 return gen_rtx_SUBREG (outermode, part, final_offset);
4868 return NULL_RTX;
4871 /* Optimize SUBREG truncations of zero and sign extended values. */
4872 if ((GET_CODE (op) == ZERO_EXTEND
4873 || GET_CODE (op) == SIGN_EXTEND)
4874 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
4876 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
4878 /* If we're requesting the lowpart of a zero or sign extension,
4879 there are three possibilities. If the outermode is the same
4880 as the origmode, we can omit both the extension and the subreg.
4881 If the outermode is not larger than the origmode, we can apply
4882 the truncation without the extension. Finally, if the outermode
4883 is larger than the origmode, but both are integer modes, we
4884 can just extend to the appropriate mode. */
4885 if (bitpos == 0)
4887 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
4888 if (outermode == origmode)
4889 return XEXP (op, 0);
4890 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
4891 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
4892 subreg_lowpart_offset (outermode,
4893 origmode));
4894 if (SCALAR_INT_MODE_P (outermode))
4895 return simplify_gen_unary (GET_CODE (op), outermode,
4896 XEXP (op, 0), origmode);
4899 /* A SUBREG resulting from a zero extension may fold to zero if
4900 it extracts higher bits that the ZERO_EXTEND's source bits. */
4901 if (GET_CODE (op) == ZERO_EXTEND
4902 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
4903 return CONST0_RTX (outermode);
4906 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
4907 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
4908 the outer subreg is effectively a truncation to the original mode. */
4909 if ((GET_CODE (op) == LSHIFTRT
4910 || GET_CODE (op) == ASHIFTRT)
4911 && SCALAR_INT_MODE_P (outermode)
4912 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
4913 to avoid the possibility that an outer LSHIFTRT shifts by more
4914 than the sign extension's sign_bit_copies and introduces zeros
4915 into the high bits of the result. */
4916 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
4917 && GET_CODE (XEXP (op, 1)) == CONST_INT
4918 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
4919 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4920 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4921 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4922 return simplify_gen_binary (ASHIFTRT, outermode,
4923 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4925 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
4926 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
4927 the outer subreg is effectively a truncation to the original mode. */
4928 if ((GET_CODE (op) == LSHIFTRT
4929 || GET_CODE (op) == ASHIFTRT)
4930 && SCALAR_INT_MODE_P (outermode)
4931 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4932 && GET_CODE (XEXP (op, 1)) == CONST_INT
4933 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4934 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4935 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4936 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4937 return simplify_gen_binary (LSHIFTRT, outermode,
4938 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4940 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
4941 to (ashift:QI (x:QI) C), where C is a suitable small constant and
4942 the outer subreg is effectively a truncation to the original mode. */
4943 if (GET_CODE (op) == ASHIFT
4944 && SCALAR_INT_MODE_P (outermode)
4945 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4946 && GET_CODE (XEXP (op, 1)) == CONST_INT
4947 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4948 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
4949 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4950 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4951 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4952 return simplify_gen_binary (ASHIFT, outermode,
4953 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4955 return NULL_RTX;
4958 /* Make a SUBREG operation or equivalent if it folds. */
4961 simplify_gen_subreg (enum machine_mode outermode, rtx op,
4962 enum machine_mode innermode, unsigned int byte)
4964 rtx newx;
4966 newx = simplify_subreg (outermode, op, innermode, byte);
4967 if (newx)
4968 return newx;
4970 if (GET_CODE (op) == SUBREG
4971 || GET_CODE (op) == CONCAT
4972 || GET_MODE (op) == VOIDmode)
4973 return NULL_RTX;
4975 if (validate_subreg (outermode, innermode, op, byte))
4976 return gen_rtx_SUBREG (outermode, op, byte);
4978 return NULL_RTX;
4981 /* Simplify X, an rtx expression.
4983 Return the simplified expression or NULL if no simplifications
4984 were possible.
4986 This is the preferred entry point into the simplification routines;
4987 however, we still allow passes to call the more specific routines.
4989 Right now GCC has three (yes, three) major bodies of RTL simplification
4990 code that need to be unified.
4992 1. fold_rtx in cse.c. This code uses various CSE specific
4993 information to aid in RTL simplification.
4995 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
4996 it uses combine specific information to aid in RTL
4997 simplification.
4999 3. The routines in this file.
5002 Long term we want to only have one body of simplification code; to
5003 get to that state I recommend the following steps:
5005 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5006 which are not pass dependent state into these routines.
5008 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5009 use this routine whenever possible.
5011 3. Allow for pass dependent state to be provided to these
5012 routines and add simplifications based on the pass dependent
5013 state. Remove code from cse.c & combine.c that becomes
5014 redundant/dead.
5016 It will take time, but ultimately the compiler will be easier to
5017 maintain and improve. It's totally silly that when we add a
5018 simplification that it needs to be added to 4 places (3 for RTL
5019 simplification and 1 for tree simplification. */
5022 simplify_rtx (rtx x)
5024 enum rtx_code code = GET_CODE (x);
5025 enum machine_mode mode = GET_MODE (x);
5027 switch (GET_RTX_CLASS (code))
5029 case RTX_UNARY:
5030 return simplify_unary_operation (code, mode,
5031 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5032 case RTX_COMM_ARITH:
5033 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5034 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5036 /* Fall through.... */
5038 case RTX_BIN_ARITH:
5039 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5041 case RTX_TERNARY:
5042 case RTX_BITFIELD_OPS:
5043 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5044 XEXP (x, 0), XEXP (x, 1),
5045 XEXP (x, 2));
5047 case RTX_COMPARE:
5048 case RTX_COMM_COMPARE:
5049 return simplify_relational_operation (code, mode,
5050 ((GET_MODE (XEXP (x, 0))
5051 != VOIDmode)
5052 ? GET_MODE (XEXP (x, 0))
5053 : GET_MODE (XEXP (x, 1))),
5054 XEXP (x, 0),
5055 XEXP (x, 1));
5057 case RTX_EXTRA:
5058 if (code == SUBREG)
5059 return simplify_subreg (mode, SUBREG_REG (x),
5060 GET_MODE (SUBREG_REG (x)),
5061 SUBREG_BYTE (x));
5062 break;
5064 case RTX_OBJ:
5065 if (code == LO_SUM)
5067 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5068 if (GET_CODE (XEXP (x, 0)) == HIGH
5069 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5070 return XEXP (x, 1);
5072 break;
5074 default:
5075 break;
5077 return NULL;