* config/i386/i386.md (paritydi2, paritysi2): New expanders.
[official-gcc.git] / gcc / simplify-rtx.c
blobf8e613d98be681704a238403a8b19888c2b6b301
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
21 02110-1301, USA. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "tm_p.h"
31 #include "regs.h"
32 #include "hard-reg-set.h"
33 #include "flags.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "recog.h"
37 #include "function.h"
38 #include "expr.h"
39 #include "toplev.h"
40 #include "output.h"
41 #include "ggc.h"
42 #include "target.h"
44 /* Simplification and canonicalization of RTL. */
46 /* Much code operates on (low, high) pairs; the low value is an
47 unsigned wide int, the high value a signed wide int. We
48 occasionally need to sign extend from low to high as if low were a
49 signed wide int. */
50 #define HWI_SIGN_EXTEND(low) \
51 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
53 static rtx neg_const_int (enum machine_mode, rtx);
54 static bool plus_minus_operand_p (rtx);
55 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
56 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
57 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
58 unsigned int);
59 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
60 rtx, rtx);
61 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
62 enum machine_mode, rtx, rtx);
63 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
64 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
65 rtx, rtx, rtx, rtx);
67 /* Negate a CONST_INT rtx, truncating (because a conversion from a
68 maximally negative number can overflow). */
69 static rtx
70 neg_const_int (enum machine_mode mode, rtx i)
72 return gen_int_mode (- INTVAL (i), mode);
75 /* Test whether expression, X, is an immediate constant that represents
76 the most significant bit of machine mode MODE. */
78 bool
79 mode_signbit_p (enum machine_mode mode, rtx x)
81 unsigned HOST_WIDE_INT val;
82 unsigned int width;
84 if (GET_MODE_CLASS (mode) != MODE_INT)
85 return false;
87 width = GET_MODE_BITSIZE (mode);
88 if (width == 0)
89 return false;
91 if (width <= HOST_BITS_PER_WIDE_INT
92 && GET_CODE (x) == CONST_INT)
93 val = INTVAL (x);
94 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
95 && GET_CODE (x) == CONST_DOUBLE
96 && CONST_DOUBLE_LOW (x) == 0)
98 val = CONST_DOUBLE_HIGH (x);
99 width -= HOST_BITS_PER_WIDE_INT;
101 else
102 return false;
104 if (width < HOST_BITS_PER_WIDE_INT)
105 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
106 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
109 /* Make a binary operation by properly ordering the operands and
110 seeing if the expression folds. */
113 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
114 rtx op1)
116 rtx tem;
118 /* If this simplifies, do it. */
119 tem = simplify_binary_operation (code, mode, op0, op1);
120 if (tem)
121 return tem;
123 /* Put complex operands first and constants second if commutative. */
124 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
125 && swap_commutative_operands_p (op0, op1))
126 tem = op0, op0 = op1, op1 = tem;
128 return gen_rtx_fmt_ee (code, mode, op0, op1);
131 /* If X is a MEM referencing the constant pool, return the real value.
132 Otherwise return X. */
134 avoid_constant_pool_reference (rtx x)
136 rtx c, tmp, addr;
137 enum machine_mode cmode;
138 HOST_WIDE_INT offset = 0;
140 switch (GET_CODE (x))
142 case MEM:
143 break;
145 case FLOAT_EXTEND:
146 /* Handle float extensions of constant pool references. */
147 tmp = XEXP (x, 0);
148 c = avoid_constant_pool_reference (tmp);
149 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
151 REAL_VALUE_TYPE d;
153 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
154 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
156 return x;
158 default:
159 return x;
162 if (GET_MODE (x) == BLKmode)
163 return x;
165 addr = XEXP (x, 0);
167 /* Call target hook to avoid the effects of -fpic etc.... */
168 addr = targetm.delegitimize_address (addr);
170 /* Split the address into a base and integer offset. */
171 if (GET_CODE (addr) == CONST
172 && GET_CODE (XEXP (addr, 0)) == PLUS
173 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
175 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
176 addr = XEXP (XEXP (addr, 0), 0);
179 if (GET_CODE (addr) == LO_SUM)
180 addr = XEXP (addr, 1);
182 /* If this is a constant pool reference, we can turn it into its
183 constant and hope that simplifications happen. */
184 if (GET_CODE (addr) == SYMBOL_REF
185 && CONSTANT_POOL_ADDRESS_P (addr))
187 c = get_pool_constant (addr);
188 cmode = get_pool_mode (addr);
190 /* If we're accessing the constant in a different mode than it was
191 originally stored, attempt to fix that up via subreg simplifications.
192 If that fails we have no choice but to return the original memory. */
193 if (offset != 0 || cmode != GET_MODE (x))
195 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
196 if (tem && CONSTANT_P (tem))
197 return tem;
199 else
200 return c;
203 return x;
206 /* Return true if X is a MEM referencing the constant pool. */
208 bool
209 constant_pool_reference_p (rtx x)
211 return avoid_constant_pool_reference (x) != x;
214 /* Make a unary operation by first seeing if it folds and otherwise making
215 the specified operation. */
218 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
219 enum machine_mode op_mode)
221 rtx tem;
223 /* If this simplifies, use it. */
224 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
225 return tem;
227 return gen_rtx_fmt_e (code, mode, op);
230 /* Likewise for ternary operations. */
233 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
234 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
236 rtx tem;
238 /* If this simplifies, use it. */
239 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
240 op0, op1, op2)))
241 return tem;
243 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
246 /* Likewise, for relational operations.
247 CMP_MODE specifies mode comparison is done in. */
250 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
251 enum machine_mode cmp_mode, rtx op0, rtx op1)
253 rtx tem;
255 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
256 op0, op1)))
257 return tem;
259 return gen_rtx_fmt_ee (code, mode, op0, op1);
262 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
263 resulting RTX. Return a new RTX which is as simplified as possible. */
266 simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
268 enum rtx_code code = GET_CODE (x);
269 enum machine_mode mode = GET_MODE (x);
270 enum machine_mode op_mode;
271 rtx op0, op1, op2;
273 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
274 to build a new expression substituting recursively. If we can't do
275 anything, return our input. */
277 if (x == old_rtx)
278 return new_rtx;
280 switch (GET_RTX_CLASS (code))
282 case RTX_UNARY:
283 op0 = XEXP (x, 0);
284 op_mode = GET_MODE (op0);
285 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
286 if (op0 == XEXP (x, 0))
287 return x;
288 return simplify_gen_unary (code, mode, op0, op_mode);
290 case RTX_BIN_ARITH:
291 case RTX_COMM_ARITH:
292 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
293 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
294 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
295 return x;
296 return simplify_gen_binary (code, mode, op0, op1);
298 case RTX_COMPARE:
299 case RTX_COMM_COMPARE:
300 op0 = XEXP (x, 0);
301 op1 = XEXP (x, 1);
302 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
303 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
304 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
305 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
306 return x;
307 return simplify_gen_relational (code, mode, op_mode, op0, op1);
309 case RTX_TERNARY:
310 case RTX_BITFIELD_OPS:
311 op0 = XEXP (x, 0);
312 op_mode = GET_MODE (op0);
313 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
314 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
315 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
316 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
317 return x;
318 if (op_mode == VOIDmode)
319 op_mode = GET_MODE (op0);
320 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
322 case RTX_EXTRA:
323 /* The only case we try to handle is a SUBREG. */
324 if (code == SUBREG)
326 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
327 if (op0 == SUBREG_REG (x))
328 return x;
329 op0 = simplify_gen_subreg (GET_MODE (x), op0,
330 GET_MODE (SUBREG_REG (x)),
331 SUBREG_BYTE (x));
332 return op0 ? op0 : x;
334 break;
336 case RTX_OBJ:
337 if (code == MEM)
339 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
340 if (op0 == XEXP (x, 0))
341 return x;
342 return replace_equiv_address_nv (x, op0);
344 else if (code == LO_SUM)
346 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
347 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
349 /* (lo_sum (high x) x) -> x */
350 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
351 return op1;
353 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
354 return x;
355 return gen_rtx_LO_SUM (mode, op0, op1);
357 else if (code == REG)
359 if (rtx_equal_p (x, old_rtx))
360 return new_rtx;
362 break;
364 default:
365 break;
367 return x;
370 /* Try to simplify a unary operation CODE whose output mode is to be
371 MODE with input operand OP whose mode was originally OP_MODE.
372 Return zero if no simplification can be made. */
374 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
375 rtx op, enum machine_mode op_mode)
377 rtx trueop, tem;
379 if (GET_CODE (op) == CONST)
380 op = XEXP (op, 0);
382 trueop = avoid_constant_pool_reference (op);
384 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
385 if (tem)
386 return tem;
388 return simplify_unary_operation_1 (code, mode, op);
391 /* Perform some simplifications we can do even if the operands
392 aren't constant. */
393 static rtx
394 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
396 enum rtx_code reversed;
397 rtx temp;
399 switch (code)
401 case NOT:
402 /* (not (not X)) == X. */
403 if (GET_CODE (op) == NOT)
404 return XEXP (op, 0);
406 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
407 comparison is all ones. */
408 if (COMPARISON_P (op)
409 && (mode == BImode || STORE_FLAG_VALUE == -1)
410 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
411 return simplify_gen_relational (reversed, mode, VOIDmode,
412 XEXP (op, 0), XEXP (op, 1));
414 /* (not (plus X -1)) can become (neg X). */
415 if (GET_CODE (op) == PLUS
416 && XEXP (op, 1) == constm1_rtx)
417 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
419 /* Similarly, (not (neg X)) is (plus X -1). */
420 if (GET_CODE (op) == NEG)
421 return plus_constant (XEXP (op, 0), -1);
423 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
424 if (GET_CODE (op) == XOR
425 && GET_CODE (XEXP (op, 1)) == CONST_INT
426 && (temp = simplify_unary_operation (NOT, mode,
427 XEXP (op, 1), mode)) != 0)
428 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
430 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
431 if (GET_CODE (op) == PLUS
432 && GET_CODE (XEXP (op, 1)) == CONST_INT
433 && mode_signbit_p (mode, XEXP (op, 1))
434 && (temp = simplify_unary_operation (NOT, mode,
435 XEXP (op, 1), mode)) != 0)
436 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
439 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
440 operands other than 1, but that is not valid. We could do a
441 similar simplification for (not (lshiftrt C X)) where C is
442 just the sign bit, but this doesn't seem common enough to
443 bother with. */
444 if (GET_CODE (op) == ASHIFT
445 && XEXP (op, 0) == const1_rtx)
447 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
448 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
451 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
452 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
453 so we can perform the above simplification. */
455 if (STORE_FLAG_VALUE == -1
456 && GET_CODE (op) == ASHIFTRT
457 && GET_CODE (XEXP (op, 1)) == CONST_INT
458 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
459 return simplify_gen_relational (GE, mode, VOIDmode,
460 XEXP (op, 0), const0_rtx);
463 if (GET_CODE (op) == SUBREG
464 && subreg_lowpart_p (op)
465 && (GET_MODE_SIZE (GET_MODE (op))
466 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
467 && GET_CODE (SUBREG_REG (op)) == ASHIFT
468 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
470 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
471 rtx x;
473 x = gen_rtx_ROTATE (inner_mode,
474 simplify_gen_unary (NOT, inner_mode, const1_rtx,
475 inner_mode),
476 XEXP (SUBREG_REG (op), 1));
477 return rtl_hooks.gen_lowpart_no_emit (mode, x);
480 /* Apply De Morgan's laws to reduce number of patterns for machines
481 with negating logical insns (and-not, nand, etc.). If result has
482 only one NOT, put it first, since that is how the patterns are
483 coded. */
485 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
487 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
488 enum machine_mode op_mode;
490 op_mode = GET_MODE (in1);
491 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
493 op_mode = GET_MODE (in2);
494 if (op_mode == VOIDmode)
495 op_mode = mode;
496 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
498 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
500 rtx tem = in2;
501 in2 = in1; in1 = tem;
504 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
505 mode, in1, in2);
507 break;
509 case NEG:
510 /* (neg (neg X)) == X. */
511 if (GET_CODE (op) == NEG)
512 return XEXP (op, 0);
514 /* (neg (plus X 1)) can become (not X). */
515 if (GET_CODE (op) == PLUS
516 && XEXP (op, 1) == const1_rtx)
517 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
519 /* Similarly, (neg (not X)) is (plus X 1). */
520 if (GET_CODE (op) == NOT)
521 return plus_constant (XEXP (op, 0), 1);
523 /* (neg (minus X Y)) can become (minus Y X). This transformation
524 isn't safe for modes with signed zeros, since if X and Y are
525 both +0, (minus Y X) is the same as (minus X Y). If the
526 rounding mode is towards +infinity (or -infinity) then the two
527 expressions will be rounded differently. */
528 if (GET_CODE (op) == MINUS
529 && !HONOR_SIGNED_ZEROS (mode)
530 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
531 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
533 if (GET_CODE (op) == PLUS
534 && !HONOR_SIGNED_ZEROS (mode)
535 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
537 /* (neg (plus A C)) is simplified to (minus -C A). */
538 if (GET_CODE (XEXP (op, 1)) == CONST_INT
539 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
541 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
542 if (temp)
543 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
546 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
547 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
548 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
551 /* (neg (mult A B)) becomes (mult (neg A) B).
552 This works even for floating-point values. */
553 if (GET_CODE (op) == MULT
554 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
556 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
557 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
560 /* NEG commutes with ASHIFT since it is multiplication. Only do
561 this if we can then eliminate the NEG (e.g., if the operand
562 is a constant). */
563 if (GET_CODE (op) == ASHIFT)
565 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
566 if (temp)
567 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
570 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
571 C is equal to the width of MODE minus 1. */
572 if (GET_CODE (op) == ASHIFTRT
573 && GET_CODE (XEXP (op, 1)) == CONST_INT
574 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
575 return simplify_gen_binary (LSHIFTRT, mode,
576 XEXP (op, 0), XEXP (op, 1));
578 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
579 C is equal to the width of MODE minus 1. */
580 if (GET_CODE (op) == LSHIFTRT
581 && GET_CODE (XEXP (op, 1)) == CONST_INT
582 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
583 return simplify_gen_binary (ASHIFTRT, mode,
584 XEXP (op, 0), XEXP (op, 1));
586 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
587 if (GET_CODE (op) == XOR
588 && XEXP (op, 1) == const1_rtx
589 && nonzero_bits (XEXP (op, 0), mode) == 1)
590 return plus_constant (XEXP (op, 0), -1);
592 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
593 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
594 if (GET_CODE (op) == LT
595 && XEXP (op, 1) == const0_rtx)
597 enum machine_mode inner = GET_MODE (XEXP (op, 0));
598 int isize = GET_MODE_BITSIZE (inner);
599 if (STORE_FLAG_VALUE == 1)
601 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
602 GEN_INT (isize - 1));
603 if (mode == inner)
604 return temp;
605 if (GET_MODE_BITSIZE (mode) > isize)
606 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
607 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
609 else if (STORE_FLAG_VALUE == -1)
611 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
612 GEN_INT (isize - 1));
613 if (mode == inner)
614 return temp;
615 if (GET_MODE_BITSIZE (mode) > isize)
616 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
617 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
620 break;
622 case TRUNCATE:
623 /* We can't handle truncation to a partial integer mode here
624 because we don't know the real bitsize of the partial
625 integer mode. */
626 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
627 break;
629 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
630 if ((GET_CODE (op) == SIGN_EXTEND
631 || GET_CODE (op) == ZERO_EXTEND)
632 && GET_MODE (XEXP (op, 0)) == mode)
633 return XEXP (op, 0);
635 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
636 (OP:SI foo:SI) if OP is NEG or ABS. */
637 if ((GET_CODE (op) == ABS
638 || GET_CODE (op) == NEG)
639 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
640 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
641 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
642 return simplify_gen_unary (GET_CODE (op), mode,
643 XEXP (XEXP (op, 0), 0), mode);
645 /* (truncate:A (subreg:B (truncate:C X) 0)) is
646 (truncate:A X). */
647 if (GET_CODE (op) == SUBREG
648 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
649 && subreg_lowpart_p (op))
650 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
651 GET_MODE (XEXP (SUBREG_REG (op), 0)));
653 /* If we know that the value is already truncated, we can
654 replace the TRUNCATE with a SUBREG. Note that this is also
655 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
656 modes we just have to apply a different definition for
657 truncation. But don't do this for an (LSHIFTRT (MULT ...))
658 since this will cause problems with the umulXi3_highpart
659 patterns. */
660 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
661 GET_MODE_BITSIZE (GET_MODE (op)))
662 ? (num_sign_bit_copies (op, GET_MODE (op))
663 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
664 - GET_MODE_BITSIZE (mode)))
665 : truncated_to_mode (mode, op))
666 && ! (GET_CODE (op) == LSHIFTRT
667 && GET_CODE (XEXP (op, 0)) == MULT))
668 return rtl_hooks.gen_lowpart_no_emit (mode, op);
670 /* A truncate of a comparison can be replaced with a subreg if
671 STORE_FLAG_VALUE permits. This is like the previous test,
672 but it works even if the comparison is done in a mode larger
673 than HOST_BITS_PER_WIDE_INT. */
674 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
675 && COMPARISON_P (op)
676 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
677 return rtl_hooks.gen_lowpart_no_emit (mode, op);
678 break;
680 case FLOAT_TRUNCATE:
681 if (DECIMAL_FLOAT_MODE_P (mode))
682 break;
684 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
685 if (GET_CODE (op) == FLOAT_EXTEND
686 && GET_MODE (XEXP (op, 0)) == mode)
687 return XEXP (op, 0);
689 /* (float_truncate:SF (float_truncate:DF foo:XF))
690 = (float_truncate:SF foo:XF).
691 This may eliminate double rounding, so it is unsafe.
693 (float_truncate:SF (float_extend:XF foo:DF))
694 = (float_truncate:SF foo:DF).
696 (float_truncate:DF (float_extend:XF foo:SF))
697 = (float_extend:SF foo:DF). */
698 if ((GET_CODE (op) == FLOAT_TRUNCATE
699 && flag_unsafe_math_optimizations)
700 || GET_CODE (op) == FLOAT_EXTEND)
701 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
702 0)))
703 > GET_MODE_SIZE (mode)
704 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
705 mode,
706 XEXP (op, 0), mode);
708 /* (float_truncate (float x)) is (float x) */
709 if (GET_CODE (op) == FLOAT
710 && (flag_unsafe_math_optimizations
711 || ((unsigned)significand_size (GET_MODE (op))
712 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
713 - num_sign_bit_copies (XEXP (op, 0),
714 GET_MODE (XEXP (op, 0)))))))
715 return simplify_gen_unary (FLOAT, mode,
716 XEXP (op, 0),
717 GET_MODE (XEXP (op, 0)));
719 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
720 (OP:SF foo:SF) if OP is NEG or ABS. */
721 if ((GET_CODE (op) == ABS
722 || GET_CODE (op) == NEG)
723 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
724 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
725 return simplify_gen_unary (GET_CODE (op), mode,
726 XEXP (XEXP (op, 0), 0), mode);
728 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
729 is (float_truncate:SF x). */
730 if (GET_CODE (op) == SUBREG
731 && subreg_lowpart_p (op)
732 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
733 return SUBREG_REG (op);
734 break;
736 case FLOAT_EXTEND:
737 if (DECIMAL_FLOAT_MODE_P (mode))
738 break;
740 /* (float_extend (float_extend x)) is (float_extend x)
742 (float_extend (float x)) is (float x) assuming that double
743 rounding can't happen.
745 if (GET_CODE (op) == FLOAT_EXTEND
746 || (GET_CODE (op) == FLOAT
747 && ((unsigned)significand_size (GET_MODE (op))
748 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
749 - num_sign_bit_copies (XEXP (op, 0),
750 GET_MODE (XEXP (op, 0)))))))
751 return simplify_gen_unary (GET_CODE (op), mode,
752 XEXP (op, 0),
753 GET_MODE (XEXP (op, 0)));
755 break;
757 case ABS:
758 /* (abs (neg <foo>)) -> (abs <foo>) */
759 if (GET_CODE (op) == NEG)
760 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
761 GET_MODE (XEXP (op, 0)));
763 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
764 do nothing. */
765 if (GET_MODE (op) == VOIDmode)
766 break;
768 /* If operand is something known to be positive, ignore the ABS. */
769 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
770 || ((GET_MODE_BITSIZE (GET_MODE (op))
771 <= HOST_BITS_PER_WIDE_INT)
772 && ((nonzero_bits (op, GET_MODE (op))
773 & ((HOST_WIDE_INT) 1
774 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
775 == 0)))
776 return op;
778 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
779 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
780 return gen_rtx_NEG (mode, op);
782 break;
784 case FFS:
785 /* (ffs (*_extend <X>)) = (ffs <X>) */
786 if (GET_CODE (op) == SIGN_EXTEND
787 || GET_CODE (op) == ZERO_EXTEND)
788 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
789 GET_MODE (XEXP (op, 0)));
790 break;
792 case POPCOUNT:
793 switch (GET_CODE (op))
795 case BSWAP:
796 case ZERO_EXTEND:
797 /* (popcount (zero_extend <X>)) = (popcount <X>) */
798 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
799 GET_MODE (XEXP (op, 0)));
801 case ROTATE:
802 case ROTATERT:
803 /* Rotations don't affect popcount. */
804 if (!side_effects_p (XEXP (op, 1)))
805 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
806 GET_MODE (XEXP (op, 0)));
807 break;
809 default:
810 break;
812 break;
814 case PARITY:
815 switch (GET_CODE (op))
817 case NOT:
818 case BSWAP:
819 case ZERO_EXTEND:
820 case SIGN_EXTEND:
821 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
822 GET_MODE (XEXP (op, 0)));
824 case ROTATE:
825 case ROTATERT:
826 /* Rotations don't affect parity. */
827 if (!side_effects_p (XEXP (op, 1)))
828 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
829 GET_MODE (XEXP (op, 0)));
830 break;
832 default:
833 break;
835 break;
837 case BSWAP:
838 /* (bswap (bswap x)) -> x. */
839 if (GET_CODE (op) == BSWAP)
840 return XEXP (op, 0);
841 break;
843 case FLOAT:
844 /* (float (sign_extend <X>)) = (float <X>). */
845 if (GET_CODE (op) == SIGN_EXTEND)
846 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
847 GET_MODE (XEXP (op, 0)));
848 break;
850 case SIGN_EXTEND:
851 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
852 becomes just the MINUS if its mode is MODE. This allows
853 folding switch statements on machines using casesi (such as
854 the VAX). */
855 if (GET_CODE (op) == TRUNCATE
856 && GET_MODE (XEXP (op, 0)) == mode
857 && GET_CODE (XEXP (op, 0)) == MINUS
858 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
859 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
860 return XEXP (op, 0);
862 /* Check for a sign extension of a subreg of a promoted
863 variable, where the promotion is sign-extended, and the
864 target mode is the same as the variable's promotion. */
865 if (GET_CODE (op) == SUBREG
866 && SUBREG_PROMOTED_VAR_P (op)
867 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
868 && GET_MODE (XEXP (op, 0)) == mode)
869 return XEXP (op, 0);
871 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
872 if (! POINTERS_EXTEND_UNSIGNED
873 && mode == Pmode && GET_MODE (op) == ptr_mode
874 && (CONSTANT_P (op)
875 || (GET_CODE (op) == SUBREG
876 && REG_P (SUBREG_REG (op))
877 && REG_POINTER (SUBREG_REG (op))
878 && GET_MODE (SUBREG_REG (op)) == Pmode)))
879 return convert_memory_address (Pmode, op);
880 #endif
881 break;
883 case ZERO_EXTEND:
884 /* Check for a zero extension of a subreg of a promoted
885 variable, where the promotion is zero-extended, and the
886 target mode is the same as the variable's promotion. */
887 if (GET_CODE (op) == SUBREG
888 && SUBREG_PROMOTED_VAR_P (op)
889 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
890 && GET_MODE (XEXP (op, 0)) == mode)
891 return XEXP (op, 0);
893 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
894 if (POINTERS_EXTEND_UNSIGNED > 0
895 && mode == Pmode && GET_MODE (op) == ptr_mode
896 && (CONSTANT_P (op)
897 || (GET_CODE (op) == SUBREG
898 && REG_P (SUBREG_REG (op))
899 && REG_POINTER (SUBREG_REG (op))
900 && GET_MODE (SUBREG_REG (op)) == Pmode)))
901 return convert_memory_address (Pmode, op);
902 #endif
903 break;
905 default:
906 break;
909 return 0;
912 /* Try to compute the value of a unary operation CODE whose output mode is to
913 be MODE with input operand OP whose mode was originally OP_MODE.
914 Return zero if the value cannot be computed. */
916 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
917 rtx op, enum machine_mode op_mode)
919 unsigned int width = GET_MODE_BITSIZE (mode);
921 if (code == VEC_DUPLICATE)
923 gcc_assert (VECTOR_MODE_P (mode));
924 if (GET_MODE (op) != VOIDmode)
926 if (!VECTOR_MODE_P (GET_MODE (op)))
927 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
928 else
929 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
930 (GET_MODE (op)));
932 if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
933 || GET_CODE (op) == CONST_VECTOR)
935 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
936 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
937 rtvec v = rtvec_alloc (n_elts);
938 unsigned int i;
940 if (GET_CODE (op) != CONST_VECTOR)
941 for (i = 0; i < n_elts; i++)
942 RTVEC_ELT (v, i) = op;
943 else
945 enum machine_mode inmode = GET_MODE (op);
946 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
947 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
949 gcc_assert (in_n_elts < n_elts);
950 gcc_assert ((n_elts % in_n_elts) == 0);
951 for (i = 0; i < n_elts; i++)
952 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
954 return gen_rtx_CONST_VECTOR (mode, v);
958 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
960 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
961 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
962 enum machine_mode opmode = GET_MODE (op);
963 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
964 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
965 rtvec v = rtvec_alloc (n_elts);
966 unsigned int i;
968 gcc_assert (op_n_elts == n_elts);
969 for (i = 0; i < n_elts; i++)
971 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
972 CONST_VECTOR_ELT (op, i),
973 GET_MODE_INNER (opmode));
974 if (!x)
975 return 0;
976 RTVEC_ELT (v, i) = x;
978 return gen_rtx_CONST_VECTOR (mode, v);
981 /* The order of these tests is critical so that, for example, we don't
982 check the wrong mode (input vs. output) for a conversion operation,
983 such as FIX. At some point, this should be simplified. */
985 if (code == FLOAT && GET_MODE (op) == VOIDmode
986 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
988 HOST_WIDE_INT hv, lv;
989 REAL_VALUE_TYPE d;
991 if (GET_CODE (op) == CONST_INT)
992 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
993 else
994 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
996 REAL_VALUE_FROM_INT (d, lv, hv, mode);
997 d = real_value_truncate (mode, d);
998 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1000 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
1001 && (GET_CODE (op) == CONST_DOUBLE
1002 || GET_CODE (op) == CONST_INT))
1004 HOST_WIDE_INT hv, lv;
1005 REAL_VALUE_TYPE d;
1007 if (GET_CODE (op) == CONST_INT)
1008 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1009 else
1010 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1012 if (op_mode == VOIDmode)
1014 /* We don't know how to interpret negative-looking numbers in
1015 this case, so don't try to fold those. */
1016 if (hv < 0)
1017 return 0;
1019 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
1021 else
1022 hv = 0, lv &= GET_MODE_MASK (op_mode);
1024 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1025 d = real_value_truncate (mode, d);
1026 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1029 if (GET_CODE (op) == CONST_INT
1030 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1032 HOST_WIDE_INT arg0 = INTVAL (op);
1033 HOST_WIDE_INT val;
1035 switch (code)
1037 case NOT:
1038 val = ~ arg0;
1039 break;
1041 case NEG:
1042 val = - arg0;
1043 break;
1045 case ABS:
1046 val = (arg0 >= 0 ? arg0 : - arg0);
1047 break;
1049 case FFS:
1050 /* Don't use ffs here. Instead, get low order bit and then its
1051 number. If arg0 is zero, this will return 0, as desired. */
1052 arg0 &= GET_MODE_MASK (mode);
1053 val = exact_log2 (arg0 & (- arg0)) + 1;
1054 break;
1056 case CLZ:
1057 arg0 &= GET_MODE_MASK (mode);
1058 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1060 else
1061 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1062 break;
1064 case CTZ:
1065 arg0 &= GET_MODE_MASK (mode);
1066 if (arg0 == 0)
1068 /* Even if the value at zero is undefined, we have to come
1069 up with some replacement. Seems good enough. */
1070 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1071 val = GET_MODE_BITSIZE (mode);
1073 else
1074 val = exact_log2 (arg0 & -arg0);
1075 break;
1077 case POPCOUNT:
1078 arg0 &= GET_MODE_MASK (mode);
1079 val = 0;
1080 while (arg0)
1081 val++, arg0 &= arg0 - 1;
1082 break;
1084 case PARITY:
1085 arg0 &= GET_MODE_MASK (mode);
1086 val = 0;
1087 while (arg0)
1088 val++, arg0 &= arg0 - 1;
1089 val &= 1;
1090 break;
1092 case BSWAP:
1094 unsigned int s;
1096 val = 0;
1097 for (s = 0; s < width; s += 8)
1099 unsigned int d = width - s - 8;
1100 unsigned HOST_WIDE_INT byte;
1101 byte = (arg0 >> s) & 0xff;
1102 val |= byte << d;
1105 break;
1107 case TRUNCATE:
1108 val = arg0;
1109 break;
1111 case ZERO_EXTEND:
1112 /* When zero-extending a CONST_INT, we need to know its
1113 original mode. */
1114 gcc_assert (op_mode != VOIDmode);
1115 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1117 /* If we were really extending the mode,
1118 we would have to distinguish between zero-extension
1119 and sign-extension. */
1120 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1121 val = arg0;
1123 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1124 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1125 else
1126 return 0;
1127 break;
1129 case SIGN_EXTEND:
1130 if (op_mode == VOIDmode)
1131 op_mode = mode;
1132 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1134 /* If we were really extending the mode,
1135 we would have to distinguish between zero-extension
1136 and sign-extension. */
1137 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1138 val = arg0;
1140 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1143 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1144 if (val
1145 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1146 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1148 else
1149 return 0;
1150 break;
1152 case SQRT:
1153 case FLOAT_EXTEND:
1154 case FLOAT_TRUNCATE:
1155 case SS_TRUNCATE:
1156 case US_TRUNCATE:
1157 case SS_NEG:
1158 return 0;
1160 default:
1161 gcc_unreachable ();
1164 return gen_int_mode (val, mode);
1167 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1168 for a DImode operation on a CONST_INT. */
1169 else if (GET_MODE (op) == VOIDmode
1170 && width <= HOST_BITS_PER_WIDE_INT * 2
1171 && (GET_CODE (op) == CONST_DOUBLE
1172 || GET_CODE (op) == CONST_INT))
1174 unsigned HOST_WIDE_INT l1, lv;
1175 HOST_WIDE_INT h1, hv;
1177 if (GET_CODE (op) == CONST_DOUBLE)
1178 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1179 else
1180 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1182 switch (code)
1184 case NOT:
1185 lv = ~ l1;
1186 hv = ~ h1;
1187 break;
1189 case NEG:
1190 neg_double (l1, h1, &lv, &hv);
1191 break;
1193 case ABS:
1194 if (h1 < 0)
1195 neg_double (l1, h1, &lv, &hv);
1196 else
1197 lv = l1, hv = h1;
1198 break;
1200 case FFS:
1201 hv = 0;
1202 if (l1 == 0)
1204 if (h1 == 0)
1205 lv = 0;
1206 else
1207 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1209 else
1210 lv = exact_log2 (l1 & -l1) + 1;
1211 break;
1213 case CLZ:
1214 hv = 0;
1215 if (h1 != 0)
1216 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1217 - HOST_BITS_PER_WIDE_INT;
1218 else if (l1 != 0)
1219 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1220 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1221 lv = GET_MODE_BITSIZE (mode);
1222 break;
1224 case CTZ:
1225 hv = 0;
1226 if (l1 != 0)
1227 lv = exact_log2 (l1 & -l1);
1228 else if (h1 != 0)
1229 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1230 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1231 lv = GET_MODE_BITSIZE (mode);
1232 break;
1234 case POPCOUNT:
1235 hv = 0;
1236 lv = 0;
1237 while (l1)
1238 lv++, l1 &= l1 - 1;
1239 while (h1)
1240 lv++, h1 &= h1 - 1;
1241 break;
1243 case PARITY:
1244 hv = 0;
1245 lv = 0;
1246 while (l1)
1247 lv++, l1 &= l1 - 1;
1248 while (h1)
1249 lv++, h1 &= h1 - 1;
1250 lv &= 1;
1251 break;
1253 case BSWAP:
1255 unsigned int s;
1257 hv = 0;
1258 lv = 0;
1259 for (s = 0; s < width; s += 8)
1261 unsigned int d = width - s - 8;
1262 unsigned HOST_WIDE_INT byte;
1264 if (s < HOST_BITS_PER_WIDE_INT)
1265 byte = (l1 >> s) & 0xff;
1266 else
1267 byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1269 if (d < HOST_BITS_PER_WIDE_INT)
1270 lv |= byte << d;
1271 else
1272 hv |= byte << (d - HOST_BITS_PER_WIDE_INT);
1275 break;
1277 case TRUNCATE:
1278 /* This is just a change-of-mode, so do nothing. */
1279 lv = l1, hv = h1;
1280 break;
1282 case ZERO_EXTEND:
1283 gcc_assert (op_mode != VOIDmode);
1285 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1286 return 0;
1288 hv = 0;
1289 lv = l1 & GET_MODE_MASK (op_mode);
1290 break;
1292 case SIGN_EXTEND:
1293 if (op_mode == VOIDmode
1294 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1295 return 0;
1296 else
1298 lv = l1 & GET_MODE_MASK (op_mode);
1299 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1300 && (lv & ((HOST_WIDE_INT) 1
1301 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1302 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1304 hv = HWI_SIGN_EXTEND (lv);
1306 break;
1308 case SQRT:
1309 return 0;
1311 default:
1312 return 0;
1315 return immed_double_const (lv, hv, mode);
1318 else if (GET_CODE (op) == CONST_DOUBLE
1319 && SCALAR_FLOAT_MODE_P (mode))
1321 REAL_VALUE_TYPE d, t;
1322 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1324 switch (code)
1326 case SQRT:
1327 if (HONOR_SNANS (mode) && real_isnan (&d))
1328 return 0;
1329 real_sqrt (&t, mode, &d);
1330 d = t;
1331 break;
1332 case ABS:
1333 d = REAL_VALUE_ABS (d);
1334 break;
1335 case NEG:
1336 d = REAL_VALUE_NEGATE (d);
1337 break;
1338 case FLOAT_TRUNCATE:
1339 d = real_value_truncate (mode, d);
1340 break;
1341 case FLOAT_EXTEND:
1342 /* All this does is change the mode. */
1343 break;
1344 case FIX:
1345 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1346 break;
1347 case NOT:
1349 long tmp[4];
1350 int i;
1352 real_to_target (tmp, &d, GET_MODE (op));
1353 for (i = 0; i < 4; i++)
1354 tmp[i] = ~tmp[i];
1355 real_from_target (&d, tmp, mode);
1356 break;
1358 default:
1359 gcc_unreachable ();
1361 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1364 else if (GET_CODE (op) == CONST_DOUBLE
1365 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1366 && GET_MODE_CLASS (mode) == MODE_INT
1367 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1369 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1370 operators are intentionally left unspecified (to ease implementation
1371 by target backends), for consistency, this routine implements the
1372 same semantics for constant folding as used by the middle-end. */
1374 /* This was formerly used only for non-IEEE float.
1375 eggert@twinsun.com says it is safe for IEEE also. */
1376 HOST_WIDE_INT xh, xl, th, tl;
1377 REAL_VALUE_TYPE x, t;
1378 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1379 switch (code)
1381 case FIX:
1382 if (REAL_VALUE_ISNAN (x))
1383 return const0_rtx;
1385 /* Test against the signed upper bound. */
1386 if (width > HOST_BITS_PER_WIDE_INT)
1388 th = ((unsigned HOST_WIDE_INT) 1
1389 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1390 tl = -1;
1392 else
1394 th = 0;
1395 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1397 real_from_integer (&t, VOIDmode, tl, th, 0);
1398 if (REAL_VALUES_LESS (t, x))
1400 xh = th;
1401 xl = tl;
1402 break;
1405 /* Test against the signed lower bound. */
1406 if (width > HOST_BITS_PER_WIDE_INT)
1408 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1409 tl = 0;
1411 else
1413 th = -1;
1414 tl = (HOST_WIDE_INT) -1 << (width - 1);
1416 real_from_integer (&t, VOIDmode, tl, th, 0);
1417 if (REAL_VALUES_LESS (x, t))
1419 xh = th;
1420 xl = tl;
1421 break;
1423 REAL_VALUE_TO_INT (&xl, &xh, x);
1424 break;
1426 case UNSIGNED_FIX:
1427 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1428 return const0_rtx;
1430 /* Test against the unsigned upper bound. */
1431 if (width == 2*HOST_BITS_PER_WIDE_INT)
1433 th = -1;
1434 tl = -1;
1436 else if (width >= HOST_BITS_PER_WIDE_INT)
1438 th = ((unsigned HOST_WIDE_INT) 1
1439 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1440 tl = -1;
1442 else
1444 th = 0;
1445 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1447 real_from_integer (&t, VOIDmode, tl, th, 1);
1448 if (REAL_VALUES_LESS (t, x))
1450 xh = th;
1451 xl = tl;
1452 break;
1455 REAL_VALUE_TO_INT (&xl, &xh, x);
1456 break;
1458 default:
1459 gcc_unreachable ();
1461 return immed_double_const (xl, xh, mode);
1464 return NULL_RTX;
1467 /* Subroutine of simplify_binary_operation to simplify a commutative,
1468 associative binary operation CODE with result mode MODE, operating
1469 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1470 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1471 canonicalization is possible. */
1473 static rtx
1474 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1475 rtx op0, rtx op1)
1477 rtx tem;
1479 /* Linearize the operator to the left. */
1480 if (GET_CODE (op1) == code)
1482 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1483 if (GET_CODE (op0) == code)
1485 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1486 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1489 /* "a op (b op c)" becomes "(b op c) op a". */
1490 if (! swap_commutative_operands_p (op1, op0))
1491 return simplify_gen_binary (code, mode, op1, op0);
1493 tem = op0;
1494 op0 = op1;
1495 op1 = tem;
1498 if (GET_CODE (op0) == code)
1500 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1501 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1503 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1504 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1507 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1508 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1509 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1510 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1511 if (tem != 0)
1512 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1514 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1515 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1516 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1517 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1518 if (tem != 0)
1519 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1522 return 0;
1526 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1527 and OP1. Return 0 if no simplification is possible.
1529 Don't use this for relational operations such as EQ or LT.
1530 Use simplify_relational_operation instead. */
1532 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1533 rtx op0, rtx op1)
1535 rtx trueop0, trueop1;
1536 rtx tem;
1538 /* Relational operations don't work here. We must know the mode
1539 of the operands in order to do the comparison correctly.
1540 Assuming a full word can give incorrect results.
1541 Consider comparing 128 with -128 in QImode. */
1542 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1543 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1545 /* Make sure the constant is second. */
1546 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1547 && swap_commutative_operands_p (op0, op1))
1549 tem = op0, op0 = op1, op1 = tem;
1552 trueop0 = avoid_constant_pool_reference (op0);
1553 trueop1 = avoid_constant_pool_reference (op1);
1555 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1556 if (tem)
1557 return tem;
1558 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1561 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1562 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1563 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1564 actual constants. */
1566 static rtx
1567 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1568 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1570 rtx tem, reversed, opleft, opright;
1571 HOST_WIDE_INT val;
1572 unsigned int width = GET_MODE_BITSIZE (mode);
1574 /* Even if we can't compute a constant result,
1575 there are some cases worth simplifying. */
1577 switch (code)
1579 case PLUS:
1580 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1581 when x is NaN, infinite, or finite and nonzero. They aren't
1582 when x is -0 and the rounding mode is not towards -infinity,
1583 since (-0) + 0 is then 0. */
1584 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1585 return op0;
1587 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1588 transformations are safe even for IEEE. */
1589 if (GET_CODE (op0) == NEG)
1590 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1591 else if (GET_CODE (op1) == NEG)
1592 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1594 /* (~a) + 1 -> -a */
1595 if (INTEGRAL_MODE_P (mode)
1596 && GET_CODE (op0) == NOT
1597 && trueop1 == const1_rtx)
1598 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1600 /* Handle both-operands-constant cases. We can only add
1601 CONST_INTs to constants since the sum of relocatable symbols
1602 can't be handled by most assemblers. Don't add CONST_INT
1603 to CONST_INT since overflow won't be computed properly if wider
1604 than HOST_BITS_PER_WIDE_INT. */
1606 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1607 && GET_CODE (op1) == CONST_INT)
1608 return plus_constant (op0, INTVAL (op1));
1609 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1610 && GET_CODE (op0) == CONST_INT)
1611 return plus_constant (op1, INTVAL (op0));
1613 /* See if this is something like X * C - X or vice versa or
1614 if the multiplication is written as a shift. If so, we can
1615 distribute and make a new multiply, shift, or maybe just
1616 have X (if C is 2 in the example above). But don't make
1617 something more expensive than we had before. */
1619 if (SCALAR_INT_MODE_P (mode))
1621 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1622 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1623 rtx lhs = op0, rhs = op1;
1625 if (GET_CODE (lhs) == NEG)
1627 coeff0l = -1;
1628 coeff0h = -1;
1629 lhs = XEXP (lhs, 0);
1631 else if (GET_CODE (lhs) == MULT
1632 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1634 coeff0l = INTVAL (XEXP (lhs, 1));
1635 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1636 lhs = XEXP (lhs, 0);
1638 else if (GET_CODE (lhs) == ASHIFT
1639 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1640 && INTVAL (XEXP (lhs, 1)) >= 0
1641 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1643 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1644 coeff0h = 0;
1645 lhs = XEXP (lhs, 0);
1648 if (GET_CODE (rhs) == NEG)
1650 coeff1l = -1;
1651 coeff1h = -1;
1652 rhs = XEXP (rhs, 0);
1654 else if (GET_CODE (rhs) == MULT
1655 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1657 coeff1l = INTVAL (XEXP (rhs, 1));
1658 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1659 rhs = XEXP (rhs, 0);
1661 else if (GET_CODE (rhs) == ASHIFT
1662 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1663 && INTVAL (XEXP (rhs, 1)) >= 0
1664 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1666 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1667 coeff1h = 0;
1668 rhs = XEXP (rhs, 0);
1671 if (rtx_equal_p (lhs, rhs))
1673 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1674 rtx coeff;
1675 unsigned HOST_WIDE_INT l;
1676 HOST_WIDE_INT h;
1678 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1679 coeff = immed_double_const (l, h, mode);
1681 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1682 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1683 ? tem : 0;
1687 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1688 if ((GET_CODE (op1) == CONST_INT
1689 || GET_CODE (op1) == CONST_DOUBLE)
1690 && GET_CODE (op0) == XOR
1691 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1692 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1693 && mode_signbit_p (mode, op1))
1694 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1695 simplify_gen_binary (XOR, mode, op1,
1696 XEXP (op0, 1)));
1698 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1699 if (GET_CODE (op0) == MULT
1700 && GET_CODE (XEXP (op0, 0)) == NEG)
1702 rtx in1, in2;
1704 in1 = XEXP (XEXP (op0, 0), 0);
1705 in2 = XEXP (op0, 1);
1706 return simplify_gen_binary (MINUS, mode, op1,
1707 simplify_gen_binary (MULT, mode,
1708 in1, in2));
1711 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1712 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1713 is 1. */
1714 if (COMPARISON_P (op0)
1715 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1716 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1717 && (reversed = reversed_comparison (op0, mode)))
1718 return
1719 simplify_gen_unary (NEG, mode, reversed, mode);
1721 /* If one of the operands is a PLUS or a MINUS, see if we can
1722 simplify this by the associative law.
1723 Don't use the associative law for floating point.
1724 The inaccuracy makes it nonassociative,
1725 and subtle programs can break if operations are associated. */
1727 if (INTEGRAL_MODE_P (mode)
1728 && (plus_minus_operand_p (op0)
1729 || plus_minus_operand_p (op1))
1730 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1731 return tem;
1733 /* Reassociate floating point addition only when the user
1734 specifies unsafe math optimizations. */
1735 if (FLOAT_MODE_P (mode)
1736 && flag_unsafe_math_optimizations)
1738 tem = simplify_associative_operation (code, mode, op0, op1);
1739 if (tem)
1740 return tem;
1742 break;
1744 case COMPARE:
1745 #ifdef HAVE_cc0
1746 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1747 using cc0, in which case we want to leave it as a COMPARE
1748 so we can distinguish it from a register-register-copy.
1750 In IEEE floating point, x-0 is not the same as x. */
1752 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1753 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1754 && trueop1 == CONST0_RTX (mode))
1755 return op0;
1756 #endif
1758 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1759 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1760 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1761 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1763 rtx xop00 = XEXP (op0, 0);
1764 rtx xop10 = XEXP (op1, 0);
1766 #ifdef HAVE_cc0
1767 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1768 #else
1769 if (REG_P (xop00) && REG_P (xop10)
1770 && GET_MODE (xop00) == GET_MODE (xop10)
1771 && REGNO (xop00) == REGNO (xop10)
1772 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1773 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1774 #endif
1775 return xop00;
1777 break;
1779 case MINUS:
1780 /* We can't assume x-x is 0 even with non-IEEE floating point,
1781 but since it is zero except in very strange circumstances, we
1782 will treat it as zero with -funsafe-math-optimizations. */
1783 if (rtx_equal_p (trueop0, trueop1)
1784 && ! side_effects_p (op0)
1785 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1786 return CONST0_RTX (mode);
1788 /* Change subtraction from zero into negation. (0 - x) is the
1789 same as -x when x is NaN, infinite, or finite and nonzero.
1790 But if the mode has signed zeros, and does not round towards
1791 -infinity, then 0 - 0 is 0, not -0. */
1792 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1793 return simplify_gen_unary (NEG, mode, op1, mode);
1795 /* (-1 - a) is ~a. */
1796 if (trueop0 == constm1_rtx)
1797 return simplify_gen_unary (NOT, mode, op1, mode);
1799 /* Subtracting 0 has no effect unless the mode has signed zeros
1800 and supports rounding towards -infinity. In such a case,
1801 0 - 0 is -0. */
1802 if (!(HONOR_SIGNED_ZEROS (mode)
1803 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1804 && trueop1 == CONST0_RTX (mode))
1805 return op0;
1807 /* See if this is something like X * C - X or vice versa or
1808 if the multiplication is written as a shift. If so, we can
1809 distribute and make a new multiply, shift, or maybe just
1810 have X (if C is 2 in the example above). But don't make
1811 something more expensive than we had before. */
1813 if (SCALAR_INT_MODE_P (mode))
1815 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1816 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1817 rtx lhs = op0, rhs = op1;
1819 if (GET_CODE (lhs) == NEG)
1821 coeff0l = -1;
1822 coeff0h = -1;
1823 lhs = XEXP (lhs, 0);
1825 else if (GET_CODE (lhs) == MULT
1826 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1828 coeff0l = INTVAL (XEXP (lhs, 1));
1829 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1830 lhs = XEXP (lhs, 0);
1832 else if (GET_CODE (lhs) == ASHIFT
1833 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1834 && INTVAL (XEXP (lhs, 1)) >= 0
1835 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1837 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1838 coeff0h = 0;
1839 lhs = XEXP (lhs, 0);
1842 if (GET_CODE (rhs) == NEG)
1844 negcoeff1l = 1;
1845 negcoeff1h = 0;
1846 rhs = XEXP (rhs, 0);
1848 else if (GET_CODE (rhs) == MULT
1849 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1851 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1852 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1853 rhs = XEXP (rhs, 0);
1855 else if (GET_CODE (rhs) == ASHIFT
1856 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1857 && INTVAL (XEXP (rhs, 1)) >= 0
1858 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1860 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1861 negcoeff1h = -1;
1862 rhs = XEXP (rhs, 0);
1865 if (rtx_equal_p (lhs, rhs))
1867 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1868 rtx coeff;
1869 unsigned HOST_WIDE_INT l;
1870 HOST_WIDE_INT h;
1872 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1873 coeff = immed_double_const (l, h, mode);
1875 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1876 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1877 ? tem : 0;
1881 /* (a - (-b)) -> (a + b). True even for IEEE. */
1882 if (GET_CODE (op1) == NEG)
1883 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1885 /* (-x - c) may be simplified as (-c - x). */
1886 if (GET_CODE (op0) == NEG
1887 && (GET_CODE (op1) == CONST_INT
1888 || GET_CODE (op1) == CONST_DOUBLE))
1890 tem = simplify_unary_operation (NEG, mode, op1, mode);
1891 if (tem)
1892 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1895 /* Don't let a relocatable value get a negative coeff. */
1896 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1897 return simplify_gen_binary (PLUS, mode,
1898 op0,
1899 neg_const_int (mode, op1));
1901 /* (x - (x & y)) -> (x & ~y) */
1902 if (GET_CODE (op1) == AND)
1904 if (rtx_equal_p (op0, XEXP (op1, 0)))
1906 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1907 GET_MODE (XEXP (op1, 1)));
1908 return simplify_gen_binary (AND, mode, op0, tem);
1910 if (rtx_equal_p (op0, XEXP (op1, 1)))
1912 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1913 GET_MODE (XEXP (op1, 0)));
1914 return simplify_gen_binary (AND, mode, op0, tem);
1918 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1919 by reversing the comparison code if valid. */
1920 if (STORE_FLAG_VALUE == 1
1921 && trueop0 == const1_rtx
1922 && COMPARISON_P (op1)
1923 && (reversed = reversed_comparison (op1, mode)))
1924 return reversed;
1926 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
1927 if (GET_CODE (op1) == MULT
1928 && GET_CODE (XEXP (op1, 0)) == NEG)
1930 rtx in1, in2;
1932 in1 = XEXP (XEXP (op1, 0), 0);
1933 in2 = XEXP (op1, 1);
1934 return simplify_gen_binary (PLUS, mode,
1935 simplify_gen_binary (MULT, mode,
1936 in1, in2),
1937 op0);
1940 /* Canonicalize (minus (neg A) (mult B C)) to
1941 (minus (mult (neg B) C) A). */
1942 if (GET_CODE (op1) == MULT
1943 && GET_CODE (op0) == NEG)
1945 rtx in1, in2;
1947 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
1948 in2 = XEXP (op1, 1);
1949 return simplify_gen_binary (MINUS, mode,
1950 simplify_gen_binary (MULT, mode,
1951 in1, in2),
1952 XEXP (op0, 0));
1955 /* If one of the operands is a PLUS or a MINUS, see if we can
1956 simplify this by the associative law. This will, for example,
1957 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1958 Don't use the associative law for floating point.
1959 The inaccuracy makes it nonassociative,
1960 and subtle programs can break if operations are associated. */
1962 if (INTEGRAL_MODE_P (mode)
1963 && (plus_minus_operand_p (op0)
1964 || plus_minus_operand_p (op1))
1965 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1966 return tem;
1967 break;
1969 case MULT:
1970 if (trueop1 == constm1_rtx)
1971 return simplify_gen_unary (NEG, mode, op0, mode);
1973 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1974 x is NaN, since x * 0 is then also NaN. Nor is it valid
1975 when the mode has signed zeros, since multiplying a negative
1976 number by 0 will give -0, not 0. */
1977 if (!HONOR_NANS (mode)
1978 && !HONOR_SIGNED_ZEROS (mode)
1979 && trueop1 == CONST0_RTX (mode)
1980 && ! side_effects_p (op0))
1981 return op1;
1983 /* In IEEE floating point, x*1 is not equivalent to x for
1984 signalling NaNs. */
1985 if (!HONOR_SNANS (mode)
1986 && trueop1 == CONST1_RTX (mode))
1987 return op0;
1989 /* Convert multiply by constant power of two into shift unless
1990 we are still generating RTL. This test is a kludge. */
1991 if (GET_CODE (trueop1) == CONST_INT
1992 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1993 /* If the mode is larger than the host word size, and the
1994 uppermost bit is set, then this isn't a power of two due
1995 to implicit sign extension. */
1996 && (width <= HOST_BITS_PER_WIDE_INT
1997 || val != HOST_BITS_PER_WIDE_INT - 1))
1998 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2000 /* Likewise for multipliers wider than a word. */
2001 if (GET_CODE (trueop1) == CONST_DOUBLE
2002 && (GET_MODE (trueop1) == VOIDmode
2003 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
2004 && GET_MODE (op0) == mode
2005 && CONST_DOUBLE_LOW (trueop1) == 0
2006 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
2007 return simplify_gen_binary (ASHIFT, mode, op0,
2008 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2010 /* x*2 is x+x and x*(-1) is -x */
2011 if (GET_CODE (trueop1) == CONST_DOUBLE
2012 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2013 && GET_MODE (op0) == mode)
2015 REAL_VALUE_TYPE d;
2016 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2018 if (REAL_VALUES_EQUAL (d, dconst2))
2019 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2021 if (!HONOR_SNANS (mode)
2022 && REAL_VALUES_EQUAL (d, dconstm1))
2023 return simplify_gen_unary (NEG, mode, op0, mode);
2026 /* Optimize -x * -x as x * x. */
2027 if (FLOAT_MODE_P (mode)
2028 && GET_CODE (op0) == NEG
2029 && GET_CODE (op1) == NEG
2030 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2031 && !side_effects_p (XEXP (op0, 0)))
2032 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2034 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2035 if (SCALAR_FLOAT_MODE_P (mode)
2036 && GET_CODE (op0) == ABS
2037 && GET_CODE (op1) == ABS
2038 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2039 && !side_effects_p (XEXP (op0, 0)))
2040 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2042 /* Reassociate multiplication, but for floating point MULTs
2043 only when the user specifies unsafe math optimizations. */
2044 if (! FLOAT_MODE_P (mode)
2045 || flag_unsafe_math_optimizations)
2047 tem = simplify_associative_operation (code, mode, op0, op1);
2048 if (tem)
2049 return tem;
2051 break;
2053 case IOR:
2054 if (trueop1 == const0_rtx)
2055 return op0;
2056 if (GET_CODE (trueop1) == CONST_INT
2057 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2058 == GET_MODE_MASK (mode)))
2059 return op1;
2060 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2061 return op0;
2062 /* A | (~A) -> -1 */
2063 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2064 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2065 && ! side_effects_p (op0)
2066 && SCALAR_INT_MODE_P (mode))
2067 return constm1_rtx;
2069 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2070 if (GET_CODE (op1) == CONST_INT
2071 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2072 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
2073 return op1;
2075 /* Convert (A & B) | A to A. */
2076 if (GET_CODE (op0) == AND
2077 && (rtx_equal_p (XEXP (op0, 0), op1)
2078 || rtx_equal_p (XEXP (op0, 1), op1))
2079 && ! side_effects_p (XEXP (op0, 0))
2080 && ! side_effects_p (XEXP (op0, 1)))
2081 return op1;
2083 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2084 mode size to (rotate A CX). */
2086 if (GET_CODE (op1) == ASHIFT
2087 || GET_CODE (op1) == SUBREG)
2089 opleft = op1;
2090 opright = op0;
2092 else
2094 opright = op1;
2095 opleft = op0;
2098 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2099 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2100 && GET_CODE (XEXP (opleft, 1)) == CONST_INT
2101 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2102 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2103 == GET_MODE_BITSIZE (mode)))
2104 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2106 /* Same, but for ashift that has been "simplified" to a wider mode
2107 by simplify_shift_const. */
2109 if (GET_CODE (opleft) == SUBREG
2110 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2111 && GET_CODE (opright) == LSHIFTRT
2112 && GET_CODE (XEXP (opright, 0)) == SUBREG
2113 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2114 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2115 && (GET_MODE_SIZE (GET_MODE (opleft))
2116 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2117 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2118 SUBREG_REG (XEXP (opright, 0)))
2119 && GET_CODE (XEXP (SUBREG_REG (opleft), 1)) == CONST_INT
2120 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2121 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2122 == GET_MODE_BITSIZE (mode)))
2123 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2124 XEXP (SUBREG_REG (opleft), 1));
2126 /* If we have (ior (and (X C1) C2)), simplify this by making
2127 C1 as small as possible if C1 actually changes. */
2128 if (GET_CODE (op1) == CONST_INT
2129 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2130 || INTVAL (op1) > 0)
2131 && GET_CODE (op0) == AND
2132 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2133 && GET_CODE (op1) == CONST_INT
2134 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2135 return simplify_gen_binary (IOR, mode,
2136 simplify_gen_binary
2137 (AND, mode, XEXP (op0, 0),
2138 GEN_INT (INTVAL (XEXP (op0, 1))
2139 & ~INTVAL (op1))),
2140 op1);
2142 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2143 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2144 the PLUS does not affect any of the bits in OP1: then we can do
2145 the IOR as a PLUS and we can associate. This is valid if OP1
2146 can be safely shifted left C bits. */
2147 if (GET_CODE (trueop1) == CONST_INT && GET_CODE (op0) == ASHIFTRT
2148 && GET_CODE (XEXP (op0, 0)) == PLUS
2149 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
2150 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2151 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2153 int count = INTVAL (XEXP (op0, 1));
2154 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2156 if (mask >> count == INTVAL (trueop1)
2157 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2158 return simplify_gen_binary (ASHIFTRT, mode,
2159 plus_constant (XEXP (op0, 0), mask),
2160 XEXP (op0, 1));
2163 tem = simplify_associative_operation (code, mode, op0, op1);
2164 if (tem)
2165 return tem;
2166 break;
2168 case XOR:
2169 if (trueop1 == const0_rtx)
2170 return op0;
2171 if (GET_CODE (trueop1) == CONST_INT
2172 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2173 == GET_MODE_MASK (mode)))
2174 return simplify_gen_unary (NOT, mode, op0, mode);
2175 if (rtx_equal_p (trueop0, trueop1)
2176 && ! side_effects_p (op0)
2177 && GET_MODE_CLASS (mode) != MODE_CC)
2178 return CONST0_RTX (mode);
2180 /* Canonicalize XOR of the most significant bit to PLUS. */
2181 if ((GET_CODE (op1) == CONST_INT
2182 || GET_CODE (op1) == CONST_DOUBLE)
2183 && mode_signbit_p (mode, op1))
2184 return simplify_gen_binary (PLUS, mode, op0, op1);
2185 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2186 if ((GET_CODE (op1) == CONST_INT
2187 || GET_CODE (op1) == CONST_DOUBLE)
2188 && GET_CODE (op0) == PLUS
2189 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
2190 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2191 && mode_signbit_p (mode, XEXP (op0, 1)))
2192 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2193 simplify_gen_binary (XOR, mode, op1,
2194 XEXP (op0, 1)));
2196 /* If we are XORing two things that have no bits in common,
2197 convert them into an IOR. This helps to detect rotation encoded
2198 using those methods and possibly other simplifications. */
2200 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2201 && (nonzero_bits (op0, mode)
2202 & nonzero_bits (op1, mode)) == 0)
2203 return (simplify_gen_binary (IOR, mode, op0, op1));
2205 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2206 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2207 (NOT y). */
2209 int num_negated = 0;
2211 if (GET_CODE (op0) == NOT)
2212 num_negated++, op0 = XEXP (op0, 0);
2213 if (GET_CODE (op1) == NOT)
2214 num_negated++, op1 = XEXP (op1, 0);
2216 if (num_negated == 2)
2217 return simplify_gen_binary (XOR, mode, op0, op1);
2218 else if (num_negated == 1)
2219 return simplify_gen_unary (NOT, mode,
2220 simplify_gen_binary (XOR, mode, op0, op1),
2221 mode);
2224 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2225 correspond to a machine insn or result in further simplifications
2226 if B is a constant. */
2228 if (GET_CODE (op0) == AND
2229 && rtx_equal_p (XEXP (op0, 1), op1)
2230 && ! side_effects_p (op1))
2231 return simplify_gen_binary (AND, mode,
2232 simplify_gen_unary (NOT, mode,
2233 XEXP (op0, 0), mode),
2234 op1);
2236 else if (GET_CODE (op0) == AND
2237 && rtx_equal_p (XEXP (op0, 0), op1)
2238 && ! side_effects_p (op1))
2239 return simplify_gen_binary (AND, mode,
2240 simplify_gen_unary (NOT, mode,
2241 XEXP (op0, 1), mode),
2242 op1);
2244 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2245 comparison if STORE_FLAG_VALUE is 1. */
2246 if (STORE_FLAG_VALUE == 1
2247 && trueop1 == const1_rtx
2248 && COMPARISON_P (op0)
2249 && (reversed = reversed_comparison (op0, mode)))
2250 return reversed;
2252 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2253 is (lt foo (const_int 0)), so we can perform the above
2254 simplification if STORE_FLAG_VALUE is 1. */
2256 if (STORE_FLAG_VALUE == 1
2257 && trueop1 == const1_rtx
2258 && GET_CODE (op0) == LSHIFTRT
2259 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2260 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2261 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2263 /* (xor (comparison foo bar) (const_int sign-bit))
2264 when STORE_FLAG_VALUE is the sign bit. */
2265 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2266 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2267 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2268 && trueop1 == const_true_rtx
2269 && COMPARISON_P (op0)
2270 && (reversed = reversed_comparison (op0, mode)))
2271 return reversed;
2273 break;
2275 tem = simplify_associative_operation (code, mode, op0, op1);
2276 if (tem)
2277 return tem;
2278 break;
2280 case AND:
2281 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2282 return trueop1;
2283 /* If we are turning off bits already known off in OP0, we need
2284 not do an AND. */
2285 if (GET_CODE (trueop1) == CONST_INT
2286 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2287 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
2288 return op0;
2289 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2290 && GET_MODE_CLASS (mode) != MODE_CC)
2291 return op0;
2292 /* A & (~A) -> 0 */
2293 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2294 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2295 && ! side_effects_p (op0)
2296 && GET_MODE_CLASS (mode) != MODE_CC)
2297 return CONST0_RTX (mode);
2299 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2300 there are no nonzero bits of C outside of X's mode. */
2301 if ((GET_CODE (op0) == SIGN_EXTEND
2302 || GET_CODE (op0) == ZERO_EXTEND)
2303 && GET_CODE (trueop1) == CONST_INT
2304 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2305 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2306 & INTVAL (trueop1)) == 0)
2308 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2309 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2310 gen_int_mode (INTVAL (trueop1),
2311 imode));
2312 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2315 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2316 insn (and may simplify more). */
2317 if (GET_CODE (op0) == XOR
2318 && rtx_equal_p (XEXP (op0, 0), op1)
2319 && ! side_effects_p (op1))
2320 return simplify_gen_binary (AND, mode,
2321 simplify_gen_unary (NOT, mode,
2322 XEXP (op0, 1), mode),
2323 op1);
2325 if (GET_CODE (op0) == XOR
2326 && rtx_equal_p (XEXP (op0, 1), op1)
2327 && ! side_effects_p (op1))
2328 return simplify_gen_binary (AND, mode,
2329 simplify_gen_unary (NOT, mode,
2330 XEXP (op0, 0), mode),
2331 op1);
2333 /* Similarly for (~(A ^ B)) & A. */
2334 if (GET_CODE (op0) == NOT
2335 && GET_CODE (XEXP (op0, 0)) == XOR
2336 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2337 && ! side_effects_p (op1))
2338 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2340 if (GET_CODE (op0) == NOT
2341 && GET_CODE (XEXP (op0, 0)) == XOR
2342 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2343 && ! side_effects_p (op1))
2344 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2346 /* Convert (A | B) & A to A. */
2347 if (GET_CODE (op0) == IOR
2348 && (rtx_equal_p (XEXP (op0, 0), op1)
2349 || rtx_equal_p (XEXP (op0, 1), op1))
2350 && ! side_effects_p (XEXP (op0, 0))
2351 && ! side_effects_p (XEXP (op0, 1)))
2352 return op1;
2354 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2355 ((A & N) + B) & M -> (A + B) & M
2356 Similarly if (N & M) == 0,
2357 ((A | N) + B) & M -> (A + B) & M
2358 and for - instead of + and/or ^ instead of |. */
2359 if (GET_CODE (trueop1) == CONST_INT
2360 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2361 && ~INTVAL (trueop1)
2362 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2363 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2365 rtx pmop[2];
2366 int which;
2368 pmop[0] = XEXP (op0, 0);
2369 pmop[1] = XEXP (op0, 1);
2371 for (which = 0; which < 2; which++)
2373 tem = pmop[which];
2374 switch (GET_CODE (tem))
2376 case AND:
2377 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2378 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2379 == INTVAL (trueop1))
2380 pmop[which] = XEXP (tem, 0);
2381 break;
2382 case IOR:
2383 case XOR:
2384 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2385 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2386 pmop[which] = XEXP (tem, 0);
2387 break;
2388 default:
2389 break;
2393 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2395 tem = simplify_gen_binary (GET_CODE (op0), mode,
2396 pmop[0], pmop[1]);
2397 return simplify_gen_binary (code, mode, tem, op1);
2400 tem = simplify_associative_operation (code, mode, op0, op1);
2401 if (tem)
2402 return tem;
2403 break;
2405 case UDIV:
2406 /* 0/x is 0 (or x&0 if x has side-effects). */
2407 if (trueop0 == CONST0_RTX (mode))
2409 if (side_effects_p (op1))
2410 return simplify_gen_binary (AND, mode, op1, trueop0);
2411 return trueop0;
2413 /* x/1 is x. */
2414 if (trueop1 == CONST1_RTX (mode))
2415 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2416 /* Convert divide by power of two into shift. */
2417 if (GET_CODE (trueop1) == CONST_INT
2418 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2419 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2420 break;
2422 case DIV:
2423 /* Handle floating point and integers separately. */
2424 if (SCALAR_FLOAT_MODE_P (mode))
2426 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2427 safe for modes with NaNs, since 0.0 / 0.0 will then be
2428 NaN rather than 0.0. Nor is it safe for modes with signed
2429 zeros, since dividing 0 by a negative number gives -0.0 */
2430 if (trueop0 == CONST0_RTX (mode)
2431 && !HONOR_NANS (mode)
2432 && !HONOR_SIGNED_ZEROS (mode)
2433 && ! side_effects_p (op1))
2434 return op0;
2435 /* x/1.0 is x. */
2436 if (trueop1 == CONST1_RTX (mode)
2437 && !HONOR_SNANS (mode))
2438 return op0;
2440 if (GET_CODE (trueop1) == CONST_DOUBLE
2441 && trueop1 != CONST0_RTX (mode))
2443 REAL_VALUE_TYPE d;
2444 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2446 /* x/-1.0 is -x. */
2447 if (REAL_VALUES_EQUAL (d, dconstm1)
2448 && !HONOR_SNANS (mode))
2449 return simplify_gen_unary (NEG, mode, op0, mode);
2451 /* Change FP division by a constant into multiplication.
2452 Only do this with -funsafe-math-optimizations. */
2453 if (flag_unsafe_math_optimizations
2454 && !REAL_VALUES_EQUAL (d, dconst0))
2456 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2457 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2458 return simplify_gen_binary (MULT, mode, op0, tem);
2462 else
2464 /* 0/x is 0 (or x&0 if x has side-effects). */
2465 if (trueop0 == CONST0_RTX (mode))
2467 if (side_effects_p (op1))
2468 return simplify_gen_binary (AND, mode, op1, trueop0);
2469 return trueop0;
2471 /* x/1 is x. */
2472 if (trueop1 == CONST1_RTX (mode))
2473 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2474 /* x/-1 is -x. */
2475 if (trueop1 == constm1_rtx)
2477 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2478 return simplify_gen_unary (NEG, mode, x, mode);
2481 break;
2483 case UMOD:
2484 /* 0%x is 0 (or x&0 if x has side-effects). */
2485 if (trueop0 == CONST0_RTX (mode))
2487 if (side_effects_p (op1))
2488 return simplify_gen_binary (AND, mode, op1, trueop0);
2489 return trueop0;
2491 /* x%1 is 0 (of x&0 if x has side-effects). */
2492 if (trueop1 == CONST1_RTX (mode))
2494 if (side_effects_p (op0))
2495 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2496 return CONST0_RTX (mode);
2498 /* Implement modulus by power of two as AND. */
2499 if (GET_CODE (trueop1) == CONST_INT
2500 && exact_log2 (INTVAL (trueop1)) > 0)
2501 return simplify_gen_binary (AND, mode, op0,
2502 GEN_INT (INTVAL (op1) - 1));
2503 break;
2505 case MOD:
2506 /* 0%x is 0 (or x&0 if x has side-effects). */
2507 if (trueop0 == CONST0_RTX (mode))
2509 if (side_effects_p (op1))
2510 return simplify_gen_binary (AND, mode, op1, trueop0);
2511 return trueop0;
2513 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2514 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2516 if (side_effects_p (op0))
2517 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2518 return CONST0_RTX (mode);
2520 break;
2522 case ROTATERT:
2523 case ROTATE:
2524 case ASHIFTRT:
2525 if (trueop1 == CONST0_RTX (mode))
2526 return op0;
2527 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2528 return op0;
2529 /* Rotating ~0 always results in ~0. */
2530 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2531 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2532 && ! side_effects_p (op1))
2533 return op0;
2534 break;
2536 case ASHIFT:
2537 case SS_ASHIFT:
2538 if (trueop1 == CONST0_RTX (mode))
2539 return op0;
2540 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2541 return op0;
2542 break;
2544 case LSHIFTRT:
2545 if (trueop1 == CONST0_RTX (mode))
2546 return op0;
2547 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2548 return op0;
2549 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2550 if (GET_CODE (op0) == CLZ
2551 && GET_CODE (trueop1) == CONST_INT
2552 && STORE_FLAG_VALUE == 1
2553 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2555 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2556 unsigned HOST_WIDE_INT zero_val = 0;
2558 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2559 && zero_val == GET_MODE_BITSIZE (imode)
2560 && INTVAL (trueop1) == exact_log2 (zero_val))
2561 return simplify_gen_relational (EQ, mode, imode,
2562 XEXP (op0, 0), const0_rtx);
2564 break;
2566 case SMIN:
2567 if (width <= HOST_BITS_PER_WIDE_INT
2568 && GET_CODE (trueop1) == CONST_INT
2569 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2570 && ! side_effects_p (op0))
2571 return op1;
2572 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2573 return op0;
2574 tem = simplify_associative_operation (code, mode, op0, op1);
2575 if (tem)
2576 return tem;
2577 break;
2579 case SMAX:
2580 if (width <= HOST_BITS_PER_WIDE_INT
2581 && GET_CODE (trueop1) == CONST_INT
2582 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2583 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2584 && ! side_effects_p (op0))
2585 return op1;
2586 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2587 return op0;
2588 tem = simplify_associative_operation (code, mode, op0, op1);
2589 if (tem)
2590 return tem;
2591 break;
2593 case UMIN:
2594 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2595 return op1;
2596 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2597 return op0;
2598 tem = simplify_associative_operation (code, mode, op0, op1);
2599 if (tem)
2600 return tem;
2601 break;
2603 case UMAX:
2604 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2605 return op1;
2606 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2607 return op0;
2608 tem = simplify_associative_operation (code, mode, op0, op1);
2609 if (tem)
2610 return tem;
2611 break;
2613 case SS_PLUS:
2614 case US_PLUS:
2615 case SS_MINUS:
2616 case US_MINUS:
2617 /* ??? There are simplifications that can be done. */
2618 return 0;
2620 case VEC_SELECT:
2621 if (!VECTOR_MODE_P (mode))
2623 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2624 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2625 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2626 gcc_assert (XVECLEN (trueop1, 0) == 1);
2627 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2629 if (GET_CODE (trueop0) == CONST_VECTOR)
2630 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2631 (trueop1, 0, 0)));
2633 else
2635 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2636 gcc_assert (GET_MODE_INNER (mode)
2637 == GET_MODE_INNER (GET_MODE (trueop0)));
2638 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2640 if (GET_CODE (trueop0) == CONST_VECTOR)
2642 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2643 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2644 rtvec v = rtvec_alloc (n_elts);
2645 unsigned int i;
2647 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2648 for (i = 0; i < n_elts; i++)
2650 rtx x = XVECEXP (trueop1, 0, i);
2652 gcc_assert (GET_CODE (x) == CONST_INT);
2653 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2654 INTVAL (x));
2657 return gen_rtx_CONST_VECTOR (mode, v);
2661 if (XVECLEN (trueop1, 0) == 1
2662 && GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT
2663 && GET_CODE (trueop0) == VEC_CONCAT)
2665 rtx vec = trueop0;
2666 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2668 /* Try to find the element in the VEC_CONCAT. */
2669 while (GET_MODE (vec) != mode
2670 && GET_CODE (vec) == VEC_CONCAT)
2672 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2673 if (offset < vec_size)
2674 vec = XEXP (vec, 0);
2675 else
2677 offset -= vec_size;
2678 vec = XEXP (vec, 1);
2680 vec = avoid_constant_pool_reference (vec);
2683 if (GET_MODE (vec) == mode)
2684 return vec;
2687 return 0;
2688 case VEC_CONCAT:
2690 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2691 ? GET_MODE (trueop0)
2692 : GET_MODE_INNER (mode));
2693 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2694 ? GET_MODE (trueop1)
2695 : GET_MODE_INNER (mode));
2697 gcc_assert (VECTOR_MODE_P (mode));
2698 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2699 == GET_MODE_SIZE (mode));
2701 if (VECTOR_MODE_P (op0_mode))
2702 gcc_assert (GET_MODE_INNER (mode)
2703 == GET_MODE_INNER (op0_mode));
2704 else
2705 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2707 if (VECTOR_MODE_P (op1_mode))
2708 gcc_assert (GET_MODE_INNER (mode)
2709 == GET_MODE_INNER (op1_mode));
2710 else
2711 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2713 if ((GET_CODE (trueop0) == CONST_VECTOR
2714 || GET_CODE (trueop0) == CONST_INT
2715 || GET_CODE (trueop0) == CONST_DOUBLE)
2716 && (GET_CODE (trueop1) == CONST_VECTOR
2717 || GET_CODE (trueop1) == CONST_INT
2718 || GET_CODE (trueop1) == CONST_DOUBLE))
2720 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2721 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2722 rtvec v = rtvec_alloc (n_elts);
2723 unsigned int i;
2724 unsigned in_n_elts = 1;
2726 if (VECTOR_MODE_P (op0_mode))
2727 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2728 for (i = 0; i < n_elts; i++)
2730 if (i < in_n_elts)
2732 if (!VECTOR_MODE_P (op0_mode))
2733 RTVEC_ELT (v, i) = trueop0;
2734 else
2735 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2737 else
2739 if (!VECTOR_MODE_P (op1_mode))
2740 RTVEC_ELT (v, i) = trueop1;
2741 else
2742 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2743 i - in_n_elts);
2747 return gen_rtx_CONST_VECTOR (mode, v);
2750 return 0;
2752 default:
2753 gcc_unreachable ();
2756 return 0;
2760 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
2761 rtx op0, rtx op1)
2763 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
2764 HOST_WIDE_INT val;
2765 unsigned int width = GET_MODE_BITSIZE (mode);
2767 if (VECTOR_MODE_P (mode)
2768 && code != VEC_CONCAT
2769 && GET_CODE (op0) == CONST_VECTOR
2770 && GET_CODE (op1) == CONST_VECTOR)
2772 unsigned n_elts = GET_MODE_NUNITS (mode);
2773 enum machine_mode op0mode = GET_MODE (op0);
2774 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2775 enum machine_mode op1mode = GET_MODE (op1);
2776 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2777 rtvec v = rtvec_alloc (n_elts);
2778 unsigned int i;
2780 gcc_assert (op0_n_elts == n_elts);
2781 gcc_assert (op1_n_elts == n_elts);
2782 for (i = 0; i < n_elts; i++)
2784 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2785 CONST_VECTOR_ELT (op0, i),
2786 CONST_VECTOR_ELT (op1, i));
2787 if (!x)
2788 return 0;
2789 RTVEC_ELT (v, i) = x;
2792 return gen_rtx_CONST_VECTOR (mode, v);
2795 if (VECTOR_MODE_P (mode)
2796 && code == VEC_CONCAT
2797 && CONSTANT_P (op0) && CONSTANT_P (op1))
2799 unsigned n_elts = GET_MODE_NUNITS (mode);
2800 rtvec v = rtvec_alloc (n_elts);
2802 gcc_assert (n_elts >= 2);
2803 if (n_elts == 2)
2805 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2806 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2808 RTVEC_ELT (v, 0) = op0;
2809 RTVEC_ELT (v, 1) = op1;
2811 else
2813 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2814 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2815 unsigned i;
2817 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2818 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2819 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2821 for (i = 0; i < op0_n_elts; ++i)
2822 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2823 for (i = 0; i < op1_n_elts; ++i)
2824 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2827 return gen_rtx_CONST_VECTOR (mode, v);
2830 if (SCALAR_FLOAT_MODE_P (mode)
2831 && GET_CODE (op0) == CONST_DOUBLE
2832 && GET_CODE (op1) == CONST_DOUBLE
2833 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2835 if (code == AND
2836 || code == IOR
2837 || code == XOR)
2839 long tmp0[4];
2840 long tmp1[4];
2841 REAL_VALUE_TYPE r;
2842 int i;
2844 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
2845 GET_MODE (op0));
2846 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
2847 GET_MODE (op1));
2848 for (i = 0; i < 4; i++)
2850 switch (code)
2852 case AND:
2853 tmp0[i] &= tmp1[i];
2854 break;
2855 case IOR:
2856 tmp0[i] |= tmp1[i];
2857 break;
2858 case XOR:
2859 tmp0[i] ^= tmp1[i];
2860 break;
2861 default:
2862 gcc_unreachable ();
2865 real_from_target (&r, tmp0, mode);
2866 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
2868 else
2870 REAL_VALUE_TYPE f0, f1, value, result;
2871 bool inexact;
2873 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
2874 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
2875 real_convert (&f0, mode, &f0);
2876 real_convert (&f1, mode, &f1);
2878 if (HONOR_SNANS (mode)
2879 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
2880 return 0;
2882 if (code == DIV
2883 && REAL_VALUES_EQUAL (f1, dconst0)
2884 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
2885 return 0;
2887 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2888 && flag_trapping_math
2889 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
2891 int s0 = REAL_VALUE_NEGATIVE (f0);
2892 int s1 = REAL_VALUE_NEGATIVE (f1);
2894 switch (code)
2896 case PLUS:
2897 /* Inf + -Inf = NaN plus exception. */
2898 if (s0 != s1)
2899 return 0;
2900 break;
2901 case MINUS:
2902 /* Inf - Inf = NaN plus exception. */
2903 if (s0 == s1)
2904 return 0;
2905 break;
2906 case DIV:
2907 /* Inf / Inf = NaN plus exception. */
2908 return 0;
2909 default:
2910 break;
2914 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2915 && flag_trapping_math
2916 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
2917 || (REAL_VALUE_ISINF (f1)
2918 && REAL_VALUES_EQUAL (f0, dconst0))))
2919 /* Inf * 0 = NaN plus exception. */
2920 return 0;
2922 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
2923 &f0, &f1);
2924 real_convert (&result, mode, &value);
2926 /* Don't constant fold this floating point operation if
2927 the result has overflowed and flag_trapping_math. */
2929 if (flag_trapping_math
2930 && MODE_HAS_INFINITIES (mode)
2931 && REAL_VALUE_ISINF (result)
2932 && !REAL_VALUE_ISINF (f0)
2933 && !REAL_VALUE_ISINF (f1))
2934 /* Overflow plus exception. */
2935 return 0;
2937 /* Don't constant fold this floating point operation if the
2938 result may dependent upon the run-time rounding mode and
2939 flag_rounding_math is set, or if GCC's software emulation
2940 is unable to accurately represent the result. */
2942 if ((flag_rounding_math
2943 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
2944 && !flag_unsafe_math_optimizations))
2945 && (inexact || !real_identical (&result, &value)))
2946 return NULL_RTX;
2948 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
2952 /* We can fold some multi-word operations. */
2953 if (GET_MODE_CLASS (mode) == MODE_INT
2954 && width == HOST_BITS_PER_WIDE_INT * 2
2955 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
2956 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
2958 unsigned HOST_WIDE_INT l1, l2, lv, lt;
2959 HOST_WIDE_INT h1, h2, hv, ht;
2961 if (GET_CODE (op0) == CONST_DOUBLE)
2962 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
2963 else
2964 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
2966 if (GET_CODE (op1) == CONST_DOUBLE)
2967 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
2968 else
2969 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
2971 switch (code)
2973 case MINUS:
2974 /* A - B == A + (-B). */
2975 neg_double (l2, h2, &lv, &hv);
2976 l2 = lv, h2 = hv;
2978 /* Fall through.... */
2980 case PLUS:
2981 add_double (l1, h1, l2, h2, &lv, &hv);
2982 break;
2984 case MULT:
2985 mul_double (l1, h1, l2, h2, &lv, &hv);
2986 break;
2988 case DIV:
2989 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2990 &lv, &hv, &lt, &ht))
2991 return 0;
2992 break;
2994 case MOD:
2995 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2996 &lt, &ht, &lv, &hv))
2997 return 0;
2998 break;
3000 case UDIV:
3001 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3002 &lv, &hv, &lt, &ht))
3003 return 0;
3004 break;
3006 case UMOD:
3007 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3008 &lt, &ht, &lv, &hv))
3009 return 0;
3010 break;
3012 case AND:
3013 lv = l1 & l2, hv = h1 & h2;
3014 break;
3016 case IOR:
3017 lv = l1 | l2, hv = h1 | h2;
3018 break;
3020 case XOR:
3021 lv = l1 ^ l2, hv = h1 ^ h2;
3022 break;
3024 case SMIN:
3025 if (h1 < h2
3026 || (h1 == h2
3027 && ((unsigned HOST_WIDE_INT) l1
3028 < (unsigned HOST_WIDE_INT) l2)))
3029 lv = l1, hv = h1;
3030 else
3031 lv = l2, hv = h2;
3032 break;
3034 case SMAX:
3035 if (h1 > h2
3036 || (h1 == h2
3037 && ((unsigned HOST_WIDE_INT) l1
3038 > (unsigned HOST_WIDE_INT) l2)))
3039 lv = l1, hv = h1;
3040 else
3041 lv = l2, hv = h2;
3042 break;
3044 case UMIN:
3045 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
3046 || (h1 == h2
3047 && ((unsigned HOST_WIDE_INT) l1
3048 < (unsigned HOST_WIDE_INT) l2)))
3049 lv = l1, hv = h1;
3050 else
3051 lv = l2, hv = h2;
3052 break;
3054 case UMAX:
3055 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
3056 || (h1 == h2
3057 && ((unsigned HOST_WIDE_INT) l1
3058 > (unsigned HOST_WIDE_INT) l2)))
3059 lv = l1, hv = h1;
3060 else
3061 lv = l2, hv = h2;
3062 break;
3064 case LSHIFTRT: case ASHIFTRT:
3065 case ASHIFT:
3066 case ROTATE: case ROTATERT:
3067 if (SHIFT_COUNT_TRUNCATED)
3068 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
3070 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
3071 return 0;
3073 if (code == LSHIFTRT || code == ASHIFTRT)
3074 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
3075 code == ASHIFTRT);
3076 else if (code == ASHIFT)
3077 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
3078 else if (code == ROTATE)
3079 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3080 else /* code == ROTATERT */
3081 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3082 break;
3084 default:
3085 return 0;
3088 return immed_double_const (lv, hv, mode);
3091 if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
3092 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3094 /* Get the integer argument values in two forms:
3095 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3097 arg0 = INTVAL (op0);
3098 arg1 = INTVAL (op1);
3100 if (width < HOST_BITS_PER_WIDE_INT)
3102 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
3103 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
3105 arg0s = arg0;
3106 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3107 arg0s |= ((HOST_WIDE_INT) (-1) << width);
3109 arg1s = arg1;
3110 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3111 arg1s |= ((HOST_WIDE_INT) (-1) << width);
3113 else
3115 arg0s = arg0;
3116 arg1s = arg1;
3119 /* Compute the value of the arithmetic. */
3121 switch (code)
3123 case PLUS:
3124 val = arg0s + arg1s;
3125 break;
3127 case MINUS:
3128 val = arg0s - arg1s;
3129 break;
3131 case MULT:
3132 val = arg0s * arg1s;
3133 break;
3135 case DIV:
3136 if (arg1s == 0
3137 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3138 && arg1s == -1))
3139 return 0;
3140 val = arg0s / arg1s;
3141 break;
3143 case MOD:
3144 if (arg1s == 0
3145 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3146 && arg1s == -1))
3147 return 0;
3148 val = arg0s % arg1s;
3149 break;
3151 case UDIV:
3152 if (arg1 == 0
3153 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3154 && arg1s == -1))
3155 return 0;
3156 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3157 break;
3159 case UMOD:
3160 if (arg1 == 0
3161 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3162 && arg1s == -1))
3163 return 0;
3164 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3165 break;
3167 case AND:
3168 val = arg0 & arg1;
3169 break;
3171 case IOR:
3172 val = arg0 | arg1;
3173 break;
3175 case XOR:
3176 val = arg0 ^ arg1;
3177 break;
3179 case LSHIFTRT:
3180 case ASHIFT:
3181 case ASHIFTRT:
3182 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3183 the value is in range. We can't return any old value for
3184 out-of-range arguments because either the middle-end (via
3185 shift_truncation_mask) or the back-end might be relying on
3186 target-specific knowledge. Nor can we rely on
3187 shift_truncation_mask, since the shift might not be part of an
3188 ashlM3, lshrM3 or ashrM3 instruction. */
3189 if (SHIFT_COUNT_TRUNCATED)
3190 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3191 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3192 return 0;
3194 val = (code == ASHIFT
3195 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3196 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3198 /* Sign-extend the result for arithmetic right shifts. */
3199 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3200 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3201 break;
3203 case ROTATERT:
3204 if (arg1 < 0)
3205 return 0;
3207 arg1 %= width;
3208 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3209 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3210 break;
3212 case ROTATE:
3213 if (arg1 < 0)
3214 return 0;
3216 arg1 %= width;
3217 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3218 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3219 break;
3221 case COMPARE:
3222 /* Do nothing here. */
3223 return 0;
3225 case SMIN:
3226 val = arg0s <= arg1s ? arg0s : arg1s;
3227 break;
3229 case UMIN:
3230 val = ((unsigned HOST_WIDE_INT) arg0
3231 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3232 break;
3234 case SMAX:
3235 val = arg0s > arg1s ? arg0s : arg1s;
3236 break;
3238 case UMAX:
3239 val = ((unsigned HOST_WIDE_INT) arg0
3240 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3241 break;
3243 case SS_PLUS:
3244 case US_PLUS:
3245 case SS_MINUS:
3246 case US_MINUS:
3247 case SS_ASHIFT:
3248 /* ??? There are simplifications that can be done. */
3249 return 0;
3251 default:
3252 gcc_unreachable ();
3255 return gen_int_mode (val, mode);
3258 return NULL_RTX;
3263 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3264 PLUS or MINUS.
3266 Rather than test for specific case, we do this by a brute-force method
3267 and do all possible simplifications until no more changes occur. Then
3268 we rebuild the operation. */
3270 struct simplify_plus_minus_op_data
3272 rtx op;
3273 short neg;
3276 static int
3277 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
3279 const struct simplify_plus_minus_op_data *d1 = p1;
3280 const struct simplify_plus_minus_op_data *d2 = p2;
3281 int result;
3283 result = (commutative_operand_precedence (d2->op)
3284 - commutative_operand_precedence (d1->op));
3285 if (result)
3286 return result;
3288 /* Group together equal REGs to do more simplification. */
3289 if (REG_P (d1->op) && REG_P (d2->op))
3290 return REGNO (d1->op) - REGNO (d2->op);
3291 else
3292 return 0;
3295 static rtx
3296 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3297 rtx op1)
3299 struct simplify_plus_minus_op_data ops[8];
3300 rtx result, tem;
3301 int n_ops = 2, input_ops = 2;
3302 int changed, n_constants = 0, canonicalized = 0;
3303 int i, j;
3305 memset (ops, 0, sizeof ops);
3307 /* Set up the two operands and then expand them until nothing has been
3308 changed. If we run out of room in our array, give up; this should
3309 almost never happen. */
3311 ops[0].op = op0;
3312 ops[0].neg = 0;
3313 ops[1].op = op1;
3314 ops[1].neg = (code == MINUS);
3318 changed = 0;
3320 for (i = 0; i < n_ops; i++)
3322 rtx this_op = ops[i].op;
3323 int this_neg = ops[i].neg;
3324 enum rtx_code this_code = GET_CODE (this_op);
3326 switch (this_code)
3328 case PLUS:
3329 case MINUS:
3330 if (n_ops == 7)
3331 return NULL_RTX;
3333 ops[n_ops].op = XEXP (this_op, 1);
3334 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3335 n_ops++;
3337 ops[i].op = XEXP (this_op, 0);
3338 input_ops++;
3339 changed = 1;
3340 canonicalized |= this_neg;
3341 break;
3343 case NEG:
3344 ops[i].op = XEXP (this_op, 0);
3345 ops[i].neg = ! this_neg;
3346 changed = 1;
3347 canonicalized = 1;
3348 break;
3350 case CONST:
3351 if (n_ops < 7
3352 && GET_CODE (XEXP (this_op, 0)) == PLUS
3353 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3354 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3356 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3357 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3358 ops[n_ops].neg = this_neg;
3359 n_ops++;
3360 changed = 1;
3361 canonicalized = 1;
3363 break;
3365 case NOT:
3366 /* ~a -> (-a - 1) */
3367 if (n_ops != 7)
3369 ops[n_ops].op = constm1_rtx;
3370 ops[n_ops++].neg = this_neg;
3371 ops[i].op = XEXP (this_op, 0);
3372 ops[i].neg = !this_neg;
3373 changed = 1;
3374 canonicalized = 1;
3376 break;
3378 case CONST_INT:
3379 n_constants++;
3380 if (this_neg)
3382 ops[i].op = neg_const_int (mode, this_op);
3383 ops[i].neg = 0;
3384 changed = 1;
3385 canonicalized = 1;
3387 break;
3389 default:
3390 break;
3394 while (changed);
3396 if (n_constants > 1)
3397 canonicalized = 1;
3399 gcc_assert (n_ops >= 2);
3401 /* If we only have two operands, we can avoid the loops. */
3402 if (n_ops == 2)
3404 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3405 rtx lhs, rhs;
3407 /* Get the two operands. Be careful with the order, especially for
3408 the cases where code == MINUS. */
3409 if (ops[0].neg && ops[1].neg)
3411 lhs = gen_rtx_NEG (mode, ops[0].op);
3412 rhs = ops[1].op;
3414 else if (ops[0].neg)
3416 lhs = ops[1].op;
3417 rhs = ops[0].op;
3419 else
3421 lhs = ops[0].op;
3422 rhs = ops[1].op;
3425 return simplify_const_binary_operation (code, mode, lhs, rhs);
3428 /* Now simplify each pair of operands until nothing changes. */
3431 /* Insertion sort is good enough for an eight-element array. */
3432 for (i = 1; i < n_ops; i++)
3434 struct simplify_plus_minus_op_data save;
3435 j = i - 1;
3436 if (simplify_plus_minus_op_data_cmp (&ops[j], &ops[i]) < 0)
3437 continue;
3439 canonicalized = 1;
3440 save = ops[i];
3442 ops[j + 1] = ops[j];
3443 while (j-- && simplify_plus_minus_op_data_cmp (&ops[j], &save) > 0);
3444 ops[j + 1] = save;
3447 /* This is only useful the first time through. */
3448 if (!canonicalized)
3449 return NULL_RTX;
3451 changed = 0;
3452 for (i = n_ops - 1; i > 0; i--)
3453 for (j = i - 1; j >= 0; j--)
3455 rtx lhs = ops[j].op, rhs = ops[i].op;
3456 int lneg = ops[j].neg, rneg = ops[i].neg;
3458 if (lhs != 0 && rhs != 0)
3460 enum rtx_code ncode = PLUS;
3462 if (lneg != rneg)
3464 ncode = MINUS;
3465 if (lneg)
3466 tem = lhs, lhs = rhs, rhs = tem;
3468 else if (swap_commutative_operands_p (lhs, rhs))
3469 tem = lhs, lhs = rhs, rhs = tem;
3471 if ((GET_CODE (lhs) == CONST || GET_CODE (lhs) == CONST_INT)
3472 && (GET_CODE (rhs) == CONST || GET_CODE (rhs) == CONST_INT))
3474 rtx tem_lhs, tem_rhs;
3476 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3477 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3478 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3480 if (tem && !CONSTANT_P (tem))
3481 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3483 else
3484 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3486 /* Reject "simplifications" that just wrap the two
3487 arguments in a CONST. Failure to do so can result
3488 in infinite recursion with simplify_binary_operation
3489 when it calls us to simplify CONST operations. */
3490 if (tem
3491 && ! (GET_CODE (tem) == CONST
3492 && GET_CODE (XEXP (tem, 0)) == ncode
3493 && XEXP (XEXP (tem, 0), 0) == lhs
3494 && XEXP (XEXP (tem, 0), 1) == rhs))
3496 lneg &= rneg;
3497 if (GET_CODE (tem) == NEG)
3498 tem = XEXP (tem, 0), lneg = !lneg;
3499 if (GET_CODE (tem) == CONST_INT && lneg)
3500 tem = neg_const_int (mode, tem), lneg = 0;
3502 ops[i].op = tem;
3503 ops[i].neg = lneg;
3504 ops[j].op = NULL_RTX;
3505 changed = 1;
3510 /* Pack all the operands to the lower-numbered entries. */
3511 for (i = 0, j = 0; j < n_ops; j++)
3512 if (ops[j].op)
3514 ops[i] = ops[j];
3515 i++;
3517 n_ops = i;
3519 while (changed);
3521 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3522 if (n_ops == 2
3523 && GET_CODE (ops[1].op) == CONST_INT
3524 && CONSTANT_P (ops[0].op)
3525 && ops[0].neg)
3526 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3528 /* We suppressed creation of trivial CONST expressions in the
3529 combination loop to avoid recursion. Create one manually now.
3530 The combination loop should have ensured that there is exactly
3531 one CONST_INT, and the sort will have ensured that it is last
3532 in the array and that any other constant will be next-to-last. */
3534 if (n_ops > 1
3535 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
3536 && CONSTANT_P (ops[n_ops - 2].op))
3538 rtx value = ops[n_ops - 1].op;
3539 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3540 value = neg_const_int (mode, value);
3541 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3542 n_ops--;
3545 /* Put a non-negated operand first, if possible. */
3547 for (i = 0; i < n_ops && ops[i].neg; i++)
3548 continue;
3549 if (i == n_ops)
3550 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3551 else if (i != 0)
3553 tem = ops[0].op;
3554 ops[0] = ops[i];
3555 ops[i].op = tem;
3556 ops[i].neg = 1;
3559 /* Now make the result by performing the requested operations. */
3560 result = ops[0].op;
3561 for (i = 1; i < n_ops; i++)
3562 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3563 mode, result, ops[i].op);
3565 return result;
3568 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3569 static bool
3570 plus_minus_operand_p (rtx x)
3572 return GET_CODE (x) == PLUS
3573 || GET_CODE (x) == MINUS
3574 || (GET_CODE (x) == CONST
3575 && GET_CODE (XEXP (x, 0)) == PLUS
3576 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3577 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3580 /* Like simplify_binary_operation except used for relational operators.
3581 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3582 not also be VOIDmode.
3584 CMP_MODE specifies in which mode the comparison is done in, so it is
3585 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3586 the operands or, if both are VOIDmode, the operands are compared in
3587 "infinite precision". */
3589 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3590 enum machine_mode cmp_mode, rtx op0, rtx op1)
3592 rtx tem, trueop0, trueop1;
3594 if (cmp_mode == VOIDmode)
3595 cmp_mode = GET_MODE (op0);
3596 if (cmp_mode == VOIDmode)
3597 cmp_mode = GET_MODE (op1);
3599 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3600 if (tem)
3602 if (SCALAR_FLOAT_MODE_P (mode))
3604 if (tem == const0_rtx)
3605 return CONST0_RTX (mode);
3606 #ifdef FLOAT_STORE_FLAG_VALUE
3608 REAL_VALUE_TYPE val;
3609 val = FLOAT_STORE_FLAG_VALUE (mode);
3610 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3612 #else
3613 return NULL_RTX;
3614 #endif
3616 if (VECTOR_MODE_P (mode))
3618 if (tem == const0_rtx)
3619 return CONST0_RTX (mode);
3620 #ifdef VECTOR_STORE_FLAG_VALUE
3622 int i, units;
3623 rtvec v;
3625 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3626 if (val == NULL_RTX)
3627 return NULL_RTX;
3628 if (val == const1_rtx)
3629 return CONST1_RTX (mode);
3631 units = GET_MODE_NUNITS (mode);
3632 v = rtvec_alloc (units);
3633 for (i = 0; i < units; i++)
3634 RTVEC_ELT (v, i) = val;
3635 return gen_rtx_raw_CONST_VECTOR (mode, v);
3637 #else
3638 return NULL_RTX;
3639 #endif
3642 return tem;
3645 /* For the following tests, ensure const0_rtx is op1. */
3646 if (swap_commutative_operands_p (op0, op1)
3647 || (op0 == const0_rtx && op1 != const0_rtx))
3648 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3650 /* If op0 is a compare, extract the comparison arguments from it. */
3651 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3652 return simplify_relational_operation (code, mode, VOIDmode,
3653 XEXP (op0, 0), XEXP (op0, 1));
3655 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
3656 || CC0_P (op0))
3657 return NULL_RTX;
3659 trueop0 = avoid_constant_pool_reference (op0);
3660 trueop1 = avoid_constant_pool_reference (op1);
3661 return simplify_relational_operation_1 (code, mode, cmp_mode,
3662 trueop0, trueop1);
3665 /* This part of simplify_relational_operation is only used when CMP_MODE
3666 is not in class MODE_CC (i.e. it is a real comparison).
3668 MODE is the mode of the result, while CMP_MODE specifies in which
3669 mode the comparison is done in, so it is the mode of the operands. */
3671 static rtx
3672 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
3673 enum machine_mode cmp_mode, rtx op0, rtx op1)
3675 enum rtx_code op0code = GET_CODE (op0);
3677 if (op1 == const0_rtx && COMPARISON_P (op0))
3679 /* If op0 is a comparison, extract the comparison arguments
3680 from it. */
3681 if (code == NE)
3683 if (GET_MODE (op0) == mode)
3684 return simplify_rtx (op0);
3685 else
3686 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
3687 XEXP (op0, 0), XEXP (op0, 1));
3689 else if (code == EQ)
3691 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
3692 if (new_code != UNKNOWN)
3693 return simplify_gen_relational (new_code, mode, VOIDmode,
3694 XEXP (op0, 0), XEXP (op0, 1));
3698 if (op1 == const0_rtx)
3700 /* Canonicalize (GTU x 0) as (NE x 0). */
3701 if (code == GTU)
3702 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
3703 /* Canonicalize (LEU x 0) as (EQ x 0). */
3704 if (code == LEU)
3705 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
3707 else if (op1 == const1_rtx)
3709 switch (code)
3711 case GE:
3712 /* Canonicalize (GE x 1) as (GT x 0). */
3713 return simplify_gen_relational (GT, mode, cmp_mode,
3714 op0, const0_rtx);
3715 case GEU:
3716 /* Canonicalize (GEU x 1) as (NE x 0). */
3717 return simplify_gen_relational (NE, mode, cmp_mode,
3718 op0, const0_rtx);
3719 case LT:
3720 /* Canonicalize (LT x 1) as (LE x 0). */
3721 return simplify_gen_relational (LE, mode, cmp_mode,
3722 op0, const0_rtx);
3723 case LTU:
3724 /* Canonicalize (LTU x 1) as (EQ x 0). */
3725 return simplify_gen_relational (EQ, mode, cmp_mode,
3726 op0, const0_rtx);
3727 default:
3728 break;
3731 else if (op1 == constm1_rtx)
3733 /* Canonicalize (LE x -1) as (LT x 0). */
3734 if (code == LE)
3735 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
3736 /* Canonicalize (GT x -1) as (GE x 0). */
3737 if (code == GT)
3738 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
3741 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
3742 if ((code == EQ || code == NE)
3743 && (op0code == PLUS || op0code == MINUS)
3744 && CONSTANT_P (op1)
3745 && CONSTANT_P (XEXP (op0, 1))
3746 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
3748 rtx x = XEXP (op0, 0);
3749 rtx c = XEXP (op0, 1);
3751 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
3752 cmp_mode, op1, c);
3753 return simplify_gen_relational (code, mode, cmp_mode, x, c);
3756 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3757 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3758 if (code == NE
3759 && op1 == const0_rtx
3760 && GET_MODE_CLASS (mode) == MODE_INT
3761 && cmp_mode != VOIDmode
3762 /* ??? Work-around BImode bugs in the ia64 backend. */
3763 && mode != BImode
3764 && cmp_mode != BImode
3765 && nonzero_bits (op0, cmp_mode) == 1
3766 && STORE_FLAG_VALUE == 1)
3767 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
3768 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
3769 : lowpart_subreg (mode, op0, cmp_mode);
3771 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
3772 if ((code == EQ || code == NE)
3773 && op1 == const0_rtx
3774 && op0code == XOR)
3775 return simplify_gen_relational (code, mode, cmp_mode,
3776 XEXP (op0, 0), XEXP (op0, 1));
3778 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
3779 if ((code == EQ || code == NE)
3780 && op0code == XOR
3781 && rtx_equal_p (XEXP (op0, 0), op1)
3782 && !side_effects_p (XEXP (op0, 0)))
3783 return simplify_gen_relational (code, mode, cmp_mode,
3784 XEXP (op0, 1), const0_rtx);
3786 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
3787 if ((code == EQ || code == NE)
3788 && op0code == XOR
3789 && rtx_equal_p (XEXP (op0, 1), op1)
3790 && !side_effects_p (XEXP (op0, 1)))
3791 return simplify_gen_relational (code, mode, cmp_mode,
3792 XEXP (op0, 0), const0_rtx);
3794 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
3795 if ((code == EQ || code == NE)
3796 && op0code == XOR
3797 && (GET_CODE (op1) == CONST_INT
3798 || GET_CODE (op1) == CONST_DOUBLE)
3799 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
3800 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
3801 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
3802 simplify_gen_binary (XOR, cmp_mode,
3803 XEXP (op0, 1), op1));
3805 if (op0code == POPCOUNT && op1 == const0_rtx)
3806 switch (code)
3808 case EQ:
3809 case LE:
3810 case LEU:
3811 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
3812 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
3813 XEXP (op0, 0), const0_rtx);
3815 case NE:
3816 case GT:
3817 case GTU:
3818 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
3819 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
3820 XEXP (op0, 0), const0_rtx);
3822 default:
3823 break;
3826 return NULL_RTX;
3829 /* Check if the given comparison (done in the given MODE) is actually a
3830 tautology or a contradiction.
3831 If no simplification is possible, this function returns zero.
3832 Otherwise, it returns either const_true_rtx or const0_rtx. */
3835 simplify_const_relational_operation (enum rtx_code code,
3836 enum machine_mode mode,
3837 rtx op0, rtx op1)
3839 int equal, op0lt, op0ltu, op1lt, op1ltu;
3840 rtx tem;
3841 rtx trueop0;
3842 rtx trueop1;
3844 gcc_assert (mode != VOIDmode
3845 || (GET_MODE (op0) == VOIDmode
3846 && GET_MODE (op1) == VOIDmode));
3848 /* If op0 is a compare, extract the comparison arguments from it. */
3849 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3851 op1 = XEXP (op0, 1);
3852 op0 = XEXP (op0, 0);
3854 if (GET_MODE (op0) != VOIDmode)
3855 mode = GET_MODE (op0);
3856 else if (GET_MODE (op1) != VOIDmode)
3857 mode = GET_MODE (op1);
3858 else
3859 return 0;
3862 /* We can't simplify MODE_CC values since we don't know what the
3863 actual comparison is. */
3864 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
3865 return 0;
3867 /* Make sure the constant is second. */
3868 if (swap_commutative_operands_p (op0, op1))
3870 tem = op0, op0 = op1, op1 = tem;
3871 code = swap_condition (code);
3874 trueop0 = avoid_constant_pool_reference (op0);
3875 trueop1 = avoid_constant_pool_reference (op1);
3877 /* For integer comparisons of A and B maybe we can simplify A - B and can
3878 then simplify a comparison of that with zero. If A and B are both either
3879 a register or a CONST_INT, this can't help; testing for these cases will
3880 prevent infinite recursion here and speed things up.
3882 We can only do this for EQ and NE comparisons as otherwise we may
3883 lose or introduce overflow which we cannot disregard as undefined as
3884 we do not know the signedness of the operation on either the left or
3885 the right hand side of the comparison. */
3887 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
3888 && (code == EQ || code == NE)
3889 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
3890 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
3891 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
3892 /* We cannot do this if tem is a nonzero address. */
3893 && ! nonzero_address_p (tem))
3894 return simplify_const_relational_operation (signed_condition (code),
3895 mode, tem, const0_rtx);
3897 if (! HONOR_NANS (mode) && code == ORDERED)
3898 return const_true_rtx;
3900 if (! HONOR_NANS (mode) && code == UNORDERED)
3901 return const0_rtx;
3903 /* For modes without NaNs, if the two operands are equal, we know the
3904 result except if they have side-effects. */
3905 if (! HONOR_NANS (GET_MODE (trueop0))
3906 && rtx_equal_p (trueop0, trueop1)
3907 && ! side_effects_p (trueop0))
3908 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
3910 /* If the operands are floating-point constants, see if we can fold
3911 the result. */
3912 else if (GET_CODE (trueop0) == CONST_DOUBLE
3913 && GET_CODE (trueop1) == CONST_DOUBLE
3914 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
3916 REAL_VALUE_TYPE d0, d1;
3918 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
3919 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
3921 /* Comparisons are unordered iff at least one of the values is NaN. */
3922 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
3923 switch (code)
3925 case UNEQ:
3926 case UNLT:
3927 case UNGT:
3928 case UNLE:
3929 case UNGE:
3930 case NE:
3931 case UNORDERED:
3932 return const_true_rtx;
3933 case EQ:
3934 case LT:
3935 case GT:
3936 case LE:
3937 case GE:
3938 case LTGT:
3939 case ORDERED:
3940 return const0_rtx;
3941 default:
3942 return 0;
3945 equal = REAL_VALUES_EQUAL (d0, d1);
3946 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
3947 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
3950 /* Otherwise, see if the operands are both integers. */
3951 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
3952 && (GET_CODE (trueop0) == CONST_DOUBLE
3953 || GET_CODE (trueop0) == CONST_INT)
3954 && (GET_CODE (trueop1) == CONST_DOUBLE
3955 || GET_CODE (trueop1) == CONST_INT))
3957 int width = GET_MODE_BITSIZE (mode);
3958 HOST_WIDE_INT l0s, h0s, l1s, h1s;
3959 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
3961 /* Get the two words comprising each integer constant. */
3962 if (GET_CODE (trueop0) == CONST_DOUBLE)
3964 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
3965 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
3967 else
3969 l0u = l0s = INTVAL (trueop0);
3970 h0u = h0s = HWI_SIGN_EXTEND (l0s);
3973 if (GET_CODE (trueop1) == CONST_DOUBLE)
3975 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
3976 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
3978 else
3980 l1u = l1s = INTVAL (trueop1);
3981 h1u = h1s = HWI_SIGN_EXTEND (l1s);
3984 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
3985 we have to sign or zero-extend the values. */
3986 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
3988 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
3989 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
3991 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3992 l0s |= ((HOST_WIDE_INT) (-1) << width);
3994 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3995 l1s |= ((HOST_WIDE_INT) (-1) << width);
3997 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
3998 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4000 equal = (h0u == h1u && l0u == l1u);
4001 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
4002 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
4003 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
4004 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
4007 /* Otherwise, there are some code-specific tests we can make. */
4008 else
4010 /* Optimize comparisons with upper and lower bounds. */
4011 if (SCALAR_INT_MODE_P (mode)
4012 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
4014 rtx mmin, mmax;
4015 int sign;
4017 if (code == GEU
4018 || code == LEU
4019 || code == GTU
4020 || code == LTU)
4021 sign = 0;
4022 else
4023 sign = 1;
4025 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
4027 tem = NULL_RTX;
4028 switch (code)
4030 case GEU:
4031 case GE:
4032 /* x >= min is always true. */
4033 if (rtx_equal_p (trueop1, mmin))
4034 tem = const_true_rtx;
4035 else
4036 break;
4038 case LEU:
4039 case LE:
4040 /* x <= max is always true. */
4041 if (rtx_equal_p (trueop1, mmax))
4042 tem = const_true_rtx;
4043 break;
4045 case GTU:
4046 case GT:
4047 /* x > max is always false. */
4048 if (rtx_equal_p (trueop1, mmax))
4049 tem = const0_rtx;
4050 break;
4052 case LTU:
4053 case LT:
4054 /* x < min is always false. */
4055 if (rtx_equal_p (trueop1, mmin))
4056 tem = const0_rtx;
4057 break;
4059 default:
4060 break;
4062 if (tem == const0_rtx
4063 || tem == const_true_rtx)
4064 return tem;
4067 switch (code)
4069 case EQ:
4070 if (trueop1 == const0_rtx && nonzero_address_p (op0))
4071 return const0_rtx;
4072 break;
4074 case NE:
4075 if (trueop1 == const0_rtx && nonzero_address_p (op0))
4076 return const_true_rtx;
4077 break;
4079 case LT:
4080 /* Optimize abs(x) < 0.0. */
4081 if (trueop1 == CONST0_RTX (mode)
4082 && !HONOR_SNANS (mode)
4083 && (!INTEGRAL_MODE_P (mode)
4084 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4086 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
4087 : trueop0;
4088 if (GET_CODE (tem) == ABS)
4089 return const0_rtx;
4092 /* Optimize popcount (x) < 0. */
4093 if (GET_CODE (trueop0) == POPCOUNT && trueop1 == const0_rtx)
4094 return const_true_rtx;
4095 break;
4097 case GE:
4098 /* Optimize abs(x) >= 0.0. */
4099 if (trueop1 == CONST0_RTX (mode)
4100 && !HONOR_NANS (mode)
4101 && (!INTEGRAL_MODE_P (mode)
4102 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4104 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
4105 : trueop0;
4106 if (GET_CODE (tem) == ABS)
4107 return const_true_rtx;
4110 /* Optimize popcount (x) >= 0. */
4111 if (GET_CODE (trueop0) == POPCOUNT && trueop1 == const0_rtx)
4112 return const_true_rtx;
4113 break;
4115 case UNGE:
4116 /* Optimize ! (abs(x) < 0.0). */
4117 if (trueop1 == CONST0_RTX (mode))
4119 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
4120 : trueop0;
4121 if (GET_CODE (tem) == ABS)
4122 return const_true_rtx;
4124 break;
4126 default:
4127 break;
4130 return 0;
4133 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
4134 as appropriate. */
4135 switch (code)
4137 case EQ:
4138 case UNEQ:
4139 return equal ? const_true_rtx : const0_rtx;
4140 case NE:
4141 case LTGT:
4142 return ! equal ? const_true_rtx : const0_rtx;
4143 case LT:
4144 case UNLT:
4145 return op0lt ? const_true_rtx : const0_rtx;
4146 case GT:
4147 case UNGT:
4148 return op1lt ? const_true_rtx : const0_rtx;
4149 case LTU:
4150 return op0ltu ? const_true_rtx : const0_rtx;
4151 case GTU:
4152 return op1ltu ? const_true_rtx : const0_rtx;
4153 case LE:
4154 case UNLE:
4155 return equal || op0lt ? const_true_rtx : const0_rtx;
4156 case GE:
4157 case UNGE:
4158 return equal || op1lt ? const_true_rtx : const0_rtx;
4159 case LEU:
4160 return equal || op0ltu ? const_true_rtx : const0_rtx;
4161 case GEU:
4162 return equal || op1ltu ? const_true_rtx : const0_rtx;
4163 case ORDERED:
4164 return const_true_rtx;
4165 case UNORDERED:
4166 return const0_rtx;
4167 default:
4168 gcc_unreachable ();
4172 /* Simplify CODE, an operation with result mode MODE and three operands,
4173 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4174 a constant. Return 0 if no simplifications is possible. */
4177 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4178 enum machine_mode op0_mode, rtx op0, rtx op1,
4179 rtx op2)
4181 unsigned int width = GET_MODE_BITSIZE (mode);
4183 /* VOIDmode means "infinite" precision. */
4184 if (width == 0)
4185 width = HOST_BITS_PER_WIDE_INT;
4187 switch (code)
4189 case SIGN_EXTRACT:
4190 case ZERO_EXTRACT:
4191 if (GET_CODE (op0) == CONST_INT
4192 && GET_CODE (op1) == CONST_INT
4193 && GET_CODE (op2) == CONST_INT
4194 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4195 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4197 /* Extracting a bit-field from a constant */
4198 HOST_WIDE_INT val = INTVAL (op0);
4200 if (BITS_BIG_ENDIAN)
4201 val >>= (GET_MODE_BITSIZE (op0_mode)
4202 - INTVAL (op2) - INTVAL (op1));
4203 else
4204 val >>= INTVAL (op2);
4206 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4208 /* First zero-extend. */
4209 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4210 /* If desired, propagate sign bit. */
4211 if (code == SIGN_EXTRACT
4212 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4213 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4216 /* Clear the bits that don't belong in our mode,
4217 unless they and our sign bit are all one.
4218 So we get either a reasonable negative value or a reasonable
4219 unsigned value for this mode. */
4220 if (width < HOST_BITS_PER_WIDE_INT
4221 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4222 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4223 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4225 return gen_int_mode (val, mode);
4227 break;
4229 case IF_THEN_ELSE:
4230 if (GET_CODE (op0) == CONST_INT)
4231 return op0 != const0_rtx ? op1 : op2;
4233 /* Convert c ? a : a into "a". */
4234 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4235 return op1;
4237 /* Convert a != b ? a : b into "a". */
4238 if (GET_CODE (op0) == NE
4239 && ! side_effects_p (op0)
4240 && ! HONOR_NANS (mode)
4241 && ! HONOR_SIGNED_ZEROS (mode)
4242 && ((rtx_equal_p (XEXP (op0, 0), op1)
4243 && rtx_equal_p (XEXP (op0, 1), op2))
4244 || (rtx_equal_p (XEXP (op0, 0), op2)
4245 && rtx_equal_p (XEXP (op0, 1), op1))))
4246 return op1;
4248 /* Convert a == b ? a : b into "b". */
4249 if (GET_CODE (op0) == EQ
4250 && ! side_effects_p (op0)
4251 && ! HONOR_NANS (mode)
4252 && ! HONOR_SIGNED_ZEROS (mode)
4253 && ((rtx_equal_p (XEXP (op0, 0), op1)
4254 && rtx_equal_p (XEXP (op0, 1), op2))
4255 || (rtx_equal_p (XEXP (op0, 0), op2)
4256 && rtx_equal_p (XEXP (op0, 1), op1))))
4257 return op2;
4259 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4261 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4262 ? GET_MODE (XEXP (op0, 1))
4263 : GET_MODE (XEXP (op0, 0)));
4264 rtx temp;
4266 /* Look for happy constants in op1 and op2. */
4267 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
4269 HOST_WIDE_INT t = INTVAL (op1);
4270 HOST_WIDE_INT f = INTVAL (op2);
4272 if (t == STORE_FLAG_VALUE && f == 0)
4273 code = GET_CODE (op0);
4274 else if (t == 0 && f == STORE_FLAG_VALUE)
4276 enum rtx_code tmp;
4277 tmp = reversed_comparison_code (op0, NULL_RTX);
4278 if (tmp == UNKNOWN)
4279 break;
4280 code = tmp;
4282 else
4283 break;
4285 return simplify_gen_relational (code, mode, cmp_mode,
4286 XEXP (op0, 0), XEXP (op0, 1));
4289 if (cmp_mode == VOIDmode)
4290 cmp_mode = op0_mode;
4291 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4292 cmp_mode, XEXP (op0, 0),
4293 XEXP (op0, 1));
4295 /* See if any simplifications were possible. */
4296 if (temp)
4298 if (GET_CODE (temp) == CONST_INT)
4299 return temp == const0_rtx ? op2 : op1;
4300 else if (temp)
4301 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4304 break;
4306 case VEC_MERGE:
4307 gcc_assert (GET_MODE (op0) == mode);
4308 gcc_assert (GET_MODE (op1) == mode);
4309 gcc_assert (VECTOR_MODE_P (mode));
4310 op2 = avoid_constant_pool_reference (op2);
4311 if (GET_CODE (op2) == CONST_INT)
4313 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4314 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4315 int mask = (1 << n_elts) - 1;
4317 if (!(INTVAL (op2) & mask))
4318 return op1;
4319 if ((INTVAL (op2) & mask) == mask)
4320 return op0;
4322 op0 = avoid_constant_pool_reference (op0);
4323 op1 = avoid_constant_pool_reference (op1);
4324 if (GET_CODE (op0) == CONST_VECTOR
4325 && GET_CODE (op1) == CONST_VECTOR)
4327 rtvec v = rtvec_alloc (n_elts);
4328 unsigned int i;
4330 for (i = 0; i < n_elts; i++)
4331 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4332 ? CONST_VECTOR_ELT (op0, i)
4333 : CONST_VECTOR_ELT (op1, i));
4334 return gen_rtx_CONST_VECTOR (mode, v);
4337 break;
4339 default:
4340 gcc_unreachable ();
4343 return 0;
4346 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
4347 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
4349 Works by unpacking OP into a collection of 8-bit values
4350 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4351 and then repacking them again for OUTERMODE. */
4353 static rtx
4354 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4355 enum machine_mode innermode, unsigned int byte)
4357 /* We support up to 512-bit values (for V8DFmode). */
4358 enum {
4359 max_bitsize = 512,
4360 value_bit = 8,
4361 value_mask = (1 << value_bit) - 1
4363 unsigned char value[max_bitsize / value_bit];
4364 int value_start;
4365 int i;
4366 int elem;
4368 int num_elem;
4369 rtx * elems;
4370 int elem_bitsize;
4371 rtx result_s;
4372 rtvec result_v = NULL;
4373 enum mode_class outer_class;
4374 enum machine_mode outer_submode;
4376 /* Some ports misuse CCmode. */
4377 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
4378 return op;
4380 /* We have no way to represent a complex constant at the rtl level. */
4381 if (COMPLEX_MODE_P (outermode))
4382 return NULL_RTX;
4384 /* Unpack the value. */
4386 if (GET_CODE (op) == CONST_VECTOR)
4388 num_elem = CONST_VECTOR_NUNITS (op);
4389 elems = &CONST_VECTOR_ELT (op, 0);
4390 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4392 else
4394 num_elem = 1;
4395 elems = &op;
4396 elem_bitsize = max_bitsize;
4398 /* If this asserts, it is too complicated; reducing value_bit may help. */
4399 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4400 /* I don't know how to handle endianness of sub-units. */
4401 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4403 for (elem = 0; elem < num_elem; elem++)
4405 unsigned char * vp;
4406 rtx el = elems[elem];
4408 /* Vectors are kept in target memory order. (This is probably
4409 a mistake.) */
4411 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4412 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4413 / BITS_PER_UNIT);
4414 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4415 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4416 unsigned bytele = (subword_byte % UNITS_PER_WORD
4417 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4418 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4421 switch (GET_CODE (el))
4423 case CONST_INT:
4424 for (i = 0;
4425 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4426 i += value_bit)
4427 *vp++ = INTVAL (el) >> i;
4428 /* CONST_INTs are always logically sign-extended. */
4429 for (; i < elem_bitsize; i += value_bit)
4430 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4431 break;
4433 case CONST_DOUBLE:
4434 if (GET_MODE (el) == VOIDmode)
4436 /* If this triggers, someone should have generated a
4437 CONST_INT instead. */
4438 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4440 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4441 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4442 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4444 *vp++
4445 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4446 i += value_bit;
4448 /* It shouldn't matter what's done here, so fill it with
4449 zero. */
4450 for (; i < elem_bitsize; i += value_bit)
4451 *vp++ = 0;
4453 else
4455 long tmp[max_bitsize / 32];
4456 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4458 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4459 gcc_assert (bitsize <= elem_bitsize);
4460 gcc_assert (bitsize % value_bit == 0);
4462 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4463 GET_MODE (el));
4465 /* real_to_target produces its result in words affected by
4466 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4467 and use WORDS_BIG_ENDIAN instead; see the documentation
4468 of SUBREG in rtl.texi. */
4469 for (i = 0; i < bitsize; i += value_bit)
4471 int ibase;
4472 if (WORDS_BIG_ENDIAN)
4473 ibase = bitsize - 1 - i;
4474 else
4475 ibase = i;
4476 *vp++ = tmp[ibase / 32] >> i % 32;
4479 /* It shouldn't matter what's done here, so fill it with
4480 zero. */
4481 for (; i < elem_bitsize; i += value_bit)
4482 *vp++ = 0;
4484 break;
4486 default:
4487 gcc_unreachable ();
4491 /* Now, pick the right byte to start with. */
4492 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4493 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4494 will already have offset 0. */
4495 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4497 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4498 - byte);
4499 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4500 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4501 byte = (subword_byte % UNITS_PER_WORD
4502 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4505 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4506 so if it's become negative it will instead be very large.) */
4507 gcc_assert (byte < GET_MODE_SIZE (innermode));
4509 /* Convert from bytes to chunks of size value_bit. */
4510 value_start = byte * (BITS_PER_UNIT / value_bit);
4512 /* Re-pack the value. */
4514 if (VECTOR_MODE_P (outermode))
4516 num_elem = GET_MODE_NUNITS (outermode);
4517 result_v = rtvec_alloc (num_elem);
4518 elems = &RTVEC_ELT (result_v, 0);
4519 outer_submode = GET_MODE_INNER (outermode);
4521 else
4523 num_elem = 1;
4524 elems = &result_s;
4525 outer_submode = outermode;
4528 outer_class = GET_MODE_CLASS (outer_submode);
4529 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
4531 gcc_assert (elem_bitsize % value_bit == 0);
4532 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
4534 for (elem = 0; elem < num_elem; elem++)
4536 unsigned char *vp;
4538 /* Vectors are stored in target memory order. (This is probably
4539 a mistake.) */
4541 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4542 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4543 / BITS_PER_UNIT);
4544 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4545 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4546 unsigned bytele = (subword_byte % UNITS_PER_WORD
4547 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4548 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
4551 switch (outer_class)
4553 case MODE_INT:
4554 case MODE_PARTIAL_INT:
4556 unsigned HOST_WIDE_INT hi = 0, lo = 0;
4558 for (i = 0;
4559 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4560 i += value_bit)
4561 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
4562 for (; i < elem_bitsize; i += value_bit)
4563 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
4564 << (i - HOST_BITS_PER_WIDE_INT));
4566 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4567 know why. */
4568 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4569 elems[elem] = gen_int_mode (lo, outer_submode);
4570 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
4571 elems[elem] = immed_double_const (lo, hi, outer_submode);
4572 else
4573 return NULL_RTX;
4575 break;
4577 case MODE_FLOAT:
4578 case MODE_DECIMAL_FLOAT:
4580 REAL_VALUE_TYPE r;
4581 long tmp[max_bitsize / 32];
4583 /* real_from_target wants its input in words affected by
4584 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4585 and use WORDS_BIG_ENDIAN instead; see the documentation
4586 of SUBREG in rtl.texi. */
4587 for (i = 0; i < max_bitsize / 32; i++)
4588 tmp[i] = 0;
4589 for (i = 0; i < elem_bitsize; i += value_bit)
4591 int ibase;
4592 if (WORDS_BIG_ENDIAN)
4593 ibase = elem_bitsize - 1 - i;
4594 else
4595 ibase = i;
4596 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
4599 real_from_target (&r, tmp, outer_submode);
4600 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
4602 break;
4604 default:
4605 gcc_unreachable ();
4608 if (VECTOR_MODE_P (outermode))
4609 return gen_rtx_CONST_VECTOR (outermode, result_v);
4610 else
4611 return result_s;
4614 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
4615 Return 0 if no simplifications are possible. */
4617 simplify_subreg (enum machine_mode outermode, rtx op,
4618 enum machine_mode innermode, unsigned int byte)
4620 /* Little bit of sanity checking. */
4621 gcc_assert (innermode != VOIDmode);
4622 gcc_assert (outermode != VOIDmode);
4623 gcc_assert (innermode != BLKmode);
4624 gcc_assert (outermode != BLKmode);
4626 gcc_assert (GET_MODE (op) == innermode
4627 || GET_MODE (op) == VOIDmode);
4629 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
4630 gcc_assert (byte < GET_MODE_SIZE (innermode));
4632 if (outermode == innermode && !byte)
4633 return op;
4635 if (GET_CODE (op) == CONST_INT
4636 || GET_CODE (op) == CONST_DOUBLE
4637 || GET_CODE (op) == CONST_VECTOR)
4638 return simplify_immed_subreg (outermode, op, innermode, byte);
4640 /* Changing mode twice with SUBREG => just change it once,
4641 or not at all if changing back op starting mode. */
4642 if (GET_CODE (op) == SUBREG)
4644 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
4645 int final_offset = byte + SUBREG_BYTE (op);
4646 rtx newx;
4648 if (outermode == innermostmode
4649 && byte == 0 && SUBREG_BYTE (op) == 0)
4650 return SUBREG_REG (op);
4652 /* The SUBREG_BYTE represents offset, as if the value were stored
4653 in memory. Irritating exception is paradoxical subreg, where
4654 we define SUBREG_BYTE to be 0. On big endian machines, this
4655 value should be negative. For a moment, undo this exception. */
4656 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4658 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
4659 if (WORDS_BIG_ENDIAN)
4660 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4661 if (BYTES_BIG_ENDIAN)
4662 final_offset += difference % UNITS_PER_WORD;
4664 if (SUBREG_BYTE (op) == 0
4665 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
4667 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
4668 if (WORDS_BIG_ENDIAN)
4669 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4670 if (BYTES_BIG_ENDIAN)
4671 final_offset += difference % UNITS_PER_WORD;
4674 /* See whether resulting subreg will be paradoxical. */
4675 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
4677 /* In nonparadoxical subregs we can't handle negative offsets. */
4678 if (final_offset < 0)
4679 return NULL_RTX;
4680 /* Bail out in case resulting subreg would be incorrect. */
4681 if (final_offset % GET_MODE_SIZE (outermode)
4682 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
4683 return NULL_RTX;
4685 else
4687 int offset = 0;
4688 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
4690 /* In paradoxical subreg, see if we are still looking on lower part.
4691 If so, our SUBREG_BYTE will be 0. */
4692 if (WORDS_BIG_ENDIAN)
4693 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4694 if (BYTES_BIG_ENDIAN)
4695 offset += difference % UNITS_PER_WORD;
4696 if (offset == final_offset)
4697 final_offset = 0;
4698 else
4699 return NULL_RTX;
4702 /* Recurse for further possible simplifications. */
4703 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
4704 final_offset);
4705 if (newx)
4706 return newx;
4707 if (validate_subreg (outermode, innermostmode,
4708 SUBREG_REG (op), final_offset))
4709 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
4710 return NULL_RTX;
4713 /* Merge implicit and explicit truncations. */
4715 if (GET_CODE (op) == TRUNCATE
4716 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
4717 && subreg_lowpart_offset (outermode, innermode) == byte)
4718 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
4719 GET_MODE (XEXP (op, 0)));
4721 /* SUBREG of a hard register => just change the register number
4722 and/or mode. If the hard register is not valid in that mode,
4723 suppress this simplification. If the hard register is the stack,
4724 frame, or argument pointer, leave this as a SUBREG. */
4726 if (REG_P (op)
4727 && REGNO (op) < FIRST_PSEUDO_REGISTER
4728 #ifdef CANNOT_CHANGE_MODE_CLASS
4729 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
4730 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
4731 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
4732 #endif
4733 && ((reload_completed && !frame_pointer_needed)
4734 || (REGNO (op) != FRAME_POINTER_REGNUM
4735 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
4736 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
4737 #endif
4739 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4740 && REGNO (op) != ARG_POINTER_REGNUM
4741 #endif
4742 && REGNO (op) != STACK_POINTER_REGNUM
4743 && subreg_offset_representable_p (REGNO (op), innermode,
4744 byte, outermode))
4746 unsigned int regno = REGNO (op);
4747 unsigned int final_regno
4748 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
4750 /* ??? We do allow it if the current REG is not valid for
4751 its mode. This is a kludge to work around how float/complex
4752 arguments are passed on 32-bit SPARC and should be fixed. */
4753 if (HARD_REGNO_MODE_OK (final_regno, outermode)
4754 || ! HARD_REGNO_MODE_OK (regno, innermode))
4756 rtx x;
4757 int final_offset = byte;
4759 /* Adjust offset for paradoxical subregs. */
4760 if (byte == 0
4761 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4763 int difference = (GET_MODE_SIZE (innermode)
4764 - GET_MODE_SIZE (outermode));
4765 if (WORDS_BIG_ENDIAN)
4766 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4767 if (BYTES_BIG_ENDIAN)
4768 final_offset += difference % UNITS_PER_WORD;
4771 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
4773 /* Propagate original regno. We don't have any way to specify
4774 the offset inside original regno, so do so only for lowpart.
4775 The information is used only by alias analysis that can not
4776 grog partial register anyway. */
4778 if (subreg_lowpart_offset (outermode, innermode) == byte)
4779 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
4780 return x;
4784 /* If we have a SUBREG of a register that we are replacing and we are
4785 replacing it with a MEM, make a new MEM and try replacing the
4786 SUBREG with it. Don't do this if the MEM has a mode-dependent address
4787 or if we would be widening it. */
4789 if (MEM_P (op)
4790 && ! mode_dependent_address_p (XEXP (op, 0))
4791 /* Allow splitting of volatile memory references in case we don't
4792 have instruction to move the whole thing. */
4793 && (! MEM_VOLATILE_P (op)
4794 || ! have_insn_for (SET, innermode))
4795 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
4796 return adjust_address_nv (op, outermode, byte);
4798 /* Handle complex values represented as CONCAT
4799 of real and imaginary part. */
4800 if (GET_CODE (op) == CONCAT)
4802 unsigned int part_size, final_offset;
4803 rtx part, res;
4805 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
4806 if (byte < part_size)
4808 part = XEXP (op, 0);
4809 final_offset = byte;
4811 else
4813 part = XEXP (op, 1);
4814 final_offset = byte - part_size;
4817 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
4818 return NULL_RTX;
4820 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
4821 if (res)
4822 return res;
4823 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
4824 return gen_rtx_SUBREG (outermode, part, final_offset);
4825 return NULL_RTX;
4828 /* Optimize SUBREG truncations of zero and sign extended values. */
4829 if ((GET_CODE (op) == ZERO_EXTEND
4830 || GET_CODE (op) == SIGN_EXTEND)
4831 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
4833 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
4835 /* If we're requesting the lowpart of a zero or sign extension,
4836 there are three possibilities. If the outermode is the same
4837 as the origmode, we can omit both the extension and the subreg.
4838 If the outermode is not larger than the origmode, we can apply
4839 the truncation without the extension. Finally, if the outermode
4840 is larger than the origmode, but both are integer modes, we
4841 can just extend to the appropriate mode. */
4842 if (bitpos == 0)
4844 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
4845 if (outermode == origmode)
4846 return XEXP (op, 0);
4847 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
4848 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
4849 subreg_lowpart_offset (outermode,
4850 origmode));
4851 if (SCALAR_INT_MODE_P (outermode))
4852 return simplify_gen_unary (GET_CODE (op), outermode,
4853 XEXP (op, 0), origmode);
4856 /* A SUBREG resulting from a zero extension may fold to zero if
4857 it extracts higher bits that the ZERO_EXTEND's source bits. */
4858 if (GET_CODE (op) == ZERO_EXTEND
4859 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
4860 return CONST0_RTX (outermode);
4863 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
4864 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
4865 the outer subreg is effectively a truncation to the original mode. */
4866 if ((GET_CODE (op) == LSHIFTRT
4867 || GET_CODE (op) == ASHIFTRT)
4868 && SCALAR_INT_MODE_P (outermode)
4869 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
4870 to avoid the possibility that an outer LSHIFTRT shifts by more
4871 than the sign extension's sign_bit_copies and introduces zeros
4872 into the high bits of the result. */
4873 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
4874 && GET_CODE (XEXP (op, 1)) == CONST_INT
4875 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
4876 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4877 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4878 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4879 return simplify_gen_binary (ASHIFTRT, outermode,
4880 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4882 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
4883 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
4884 the outer subreg is effectively a truncation to the original mode. */
4885 if ((GET_CODE (op) == LSHIFTRT
4886 || GET_CODE (op) == ASHIFTRT)
4887 && SCALAR_INT_MODE_P (outermode)
4888 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4889 && GET_CODE (XEXP (op, 1)) == CONST_INT
4890 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4891 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4892 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4893 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4894 return simplify_gen_binary (LSHIFTRT, outermode,
4895 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4897 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
4898 to (ashift:QI (x:QI) C), where C is a suitable small constant and
4899 the outer subreg is effectively a truncation to the original mode. */
4900 if (GET_CODE (op) == ASHIFT
4901 && SCALAR_INT_MODE_P (outermode)
4902 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4903 && GET_CODE (XEXP (op, 1)) == CONST_INT
4904 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4905 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
4906 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4907 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4908 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4909 return simplify_gen_binary (ASHIFT, outermode,
4910 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4912 return NULL_RTX;
4915 /* Make a SUBREG operation or equivalent if it folds. */
4918 simplify_gen_subreg (enum machine_mode outermode, rtx op,
4919 enum machine_mode innermode, unsigned int byte)
4921 rtx newx;
4923 newx = simplify_subreg (outermode, op, innermode, byte);
4924 if (newx)
4925 return newx;
4927 if (GET_CODE (op) == SUBREG
4928 || GET_CODE (op) == CONCAT
4929 || GET_MODE (op) == VOIDmode)
4930 return NULL_RTX;
4932 if (validate_subreg (outermode, innermode, op, byte))
4933 return gen_rtx_SUBREG (outermode, op, byte);
4935 return NULL_RTX;
4938 /* Simplify X, an rtx expression.
4940 Return the simplified expression or NULL if no simplifications
4941 were possible.
4943 This is the preferred entry point into the simplification routines;
4944 however, we still allow passes to call the more specific routines.
4946 Right now GCC has three (yes, three) major bodies of RTL simplification
4947 code that need to be unified.
4949 1. fold_rtx in cse.c. This code uses various CSE specific
4950 information to aid in RTL simplification.
4952 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
4953 it uses combine specific information to aid in RTL
4954 simplification.
4956 3. The routines in this file.
4959 Long term we want to only have one body of simplification code; to
4960 get to that state I recommend the following steps:
4962 1. Pour over fold_rtx & simplify_rtx and move any simplifications
4963 which are not pass dependent state into these routines.
4965 2. As code is moved by #1, change fold_rtx & simplify_rtx to
4966 use this routine whenever possible.
4968 3. Allow for pass dependent state to be provided to these
4969 routines and add simplifications based on the pass dependent
4970 state. Remove code from cse.c & combine.c that becomes
4971 redundant/dead.
4973 It will take time, but ultimately the compiler will be easier to
4974 maintain and improve. It's totally silly that when we add a
4975 simplification that it needs to be added to 4 places (3 for RTL
4976 simplification and 1 for tree simplification. */
4979 simplify_rtx (rtx x)
4981 enum rtx_code code = GET_CODE (x);
4982 enum machine_mode mode = GET_MODE (x);
4984 switch (GET_RTX_CLASS (code))
4986 case RTX_UNARY:
4987 return simplify_unary_operation (code, mode,
4988 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
4989 case RTX_COMM_ARITH:
4990 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
4991 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
4993 /* Fall through.... */
4995 case RTX_BIN_ARITH:
4996 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
4998 case RTX_TERNARY:
4999 case RTX_BITFIELD_OPS:
5000 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5001 XEXP (x, 0), XEXP (x, 1),
5002 XEXP (x, 2));
5004 case RTX_COMPARE:
5005 case RTX_COMM_COMPARE:
5006 return simplify_relational_operation (code, mode,
5007 ((GET_MODE (XEXP (x, 0))
5008 != VOIDmode)
5009 ? GET_MODE (XEXP (x, 0))
5010 : GET_MODE (XEXP (x, 1))),
5011 XEXP (x, 0),
5012 XEXP (x, 1));
5014 case RTX_EXTRA:
5015 if (code == SUBREG)
5016 return simplify_subreg (mode, SUBREG_REG (x),
5017 GET_MODE (SUBREG_REG (x)),
5018 SUBREG_BYTE (x));
5019 break;
5021 case RTX_OBJ:
5022 if (code == LO_SUM)
5024 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5025 if (GET_CODE (XEXP (x, 0)) == HIGH
5026 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5027 return XEXP (x, 1);
5029 break;
5031 default:
5032 break;
5034 return NULL;