Makefile.in: Add dummy "install-info" target.
[official-gcc.git] / gcc / simplify-rtx.c
blob4251df520c8ab9e90fb8f94c2e1f68d9855d5b9a
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
21 02110-1301, USA. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "tm_p.h"
31 #include "regs.h"
32 #include "hard-reg-set.h"
33 #include "flags.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "recog.h"
37 #include "function.h"
38 #include "expr.h"
39 #include "toplev.h"
40 #include "output.h"
41 #include "ggc.h"
42 #include "target.h"
44 /* Simplification and canonicalization of RTL. */
46 /* Much code operates on (low, high) pairs; the low value is an
47 unsigned wide int, the high value a signed wide int. We
48 occasionally need to sign extend from low to high as if low were a
49 signed wide int. */
50 #define HWI_SIGN_EXTEND(low) \
51 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
53 static rtx neg_const_int (enum machine_mode, rtx);
54 static bool plus_minus_operand_p (rtx);
55 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
56 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
57 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
58 unsigned int);
59 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
60 rtx, rtx);
61 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
62 enum machine_mode, rtx, rtx);
63 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
64 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
65 rtx, rtx, rtx, rtx);
67 /* Negate a CONST_INT rtx, truncating (because a conversion from a
68 maximally negative number can overflow). */
69 static rtx
70 neg_const_int (enum machine_mode mode, rtx i)
72 return gen_int_mode (- INTVAL (i), mode);
75 /* Test whether expression, X, is an immediate constant that represents
76 the most significant bit of machine mode MODE. */
78 bool
79 mode_signbit_p (enum machine_mode mode, rtx x)
81 unsigned HOST_WIDE_INT val;
82 unsigned int width;
84 if (GET_MODE_CLASS (mode) != MODE_INT)
85 return false;
87 width = GET_MODE_BITSIZE (mode);
88 if (width == 0)
89 return false;
91 if (width <= HOST_BITS_PER_WIDE_INT
92 && GET_CODE (x) == CONST_INT)
93 val = INTVAL (x);
94 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
95 && GET_CODE (x) == CONST_DOUBLE
96 && CONST_DOUBLE_LOW (x) == 0)
98 val = CONST_DOUBLE_HIGH (x);
99 width -= HOST_BITS_PER_WIDE_INT;
101 else
102 return false;
104 if (width < HOST_BITS_PER_WIDE_INT)
105 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
106 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
109 /* Make a binary operation by properly ordering the operands and
110 seeing if the expression folds. */
113 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
114 rtx op1)
116 rtx tem;
118 /* If this simplifies, do it. */
119 tem = simplify_binary_operation (code, mode, op0, op1);
120 if (tem)
121 return tem;
123 /* Put complex operands first and constants second if commutative. */
124 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
125 && swap_commutative_operands_p (op0, op1))
126 tem = op0, op0 = op1, op1 = tem;
128 return gen_rtx_fmt_ee (code, mode, op0, op1);
131 /* If X is a MEM referencing the constant pool, return the real value.
132 Otherwise return X. */
134 avoid_constant_pool_reference (rtx x)
136 rtx c, tmp, addr;
137 enum machine_mode cmode;
138 HOST_WIDE_INT offset = 0;
140 switch (GET_CODE (x))
142 case MEM:
143 break;
145 case FLOAT_EXTEND:
146 /* Handle float extensions of constant pool references. */
147 tmp = XEXP (x, 0);
148 c = avoid_constant_pool_reference (tmp);
149 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
151 REAL_VALUE_TYPE d;
153 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
154 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
156 return x;
158 default:
159 return x;
162 if (GET_MODE (x) == BLKmode)
163 return x;
165 addr = XEXP (x, 0);
167 /* Call target hook to avoid the effects of -fpic etc.... */
168 addr = targetm.delegitimize_address (addr);
170 /* Split the address into a base and integer offset. */
171 if (GET_CODE (addr) == CONST
172 && GET_CODE (XEXP (addr, 0)) == PLUS
173 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
175 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
176 addr = XEXP (XEXP (addr, 0), 0);
179 if (GET_CODE (addr) == LO_SUM)
180 addr = XEXP (addr, 1);
182 /* If this is a constant pool reference, we can turn it into its
183 constant and hope that simplifications happen. */
184 if (GET_CODE (addr) == SYMBOL_REF
185 && CONSTANT_POOL_ADDRESS_P (addr))
187 c = get_pool_constant (addr);
188 cmode = get_pool_mode (addr);
190 /* If we're accessing the constant in a different mode than it was
191 originally stored, attempt to fix that up via subreg simplifications.
192 If that fails we have no choice but to return the original memory. */
193 if (offset != 0 || cmode != GET_MODE (x))
195 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
196 if (tem && CONSTANT_P (tem))
197 return tem;
199 else
200 return c;
203 return x;
206 /* Return true if X is a MEM referencing the constant pool. */
208 bool
209 constant_pool_reference_p (rtx x)
211 return avoid_constant_pool_reference (x) != x;
214 /* Make a unary operation by first seeing if it folds and otherwise making
215 the specified operation. */
218 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
219 enum machine_mode op_mode)
221 rtx tem;
223 /* If this simplifies, use it. */
224 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
225 return tem;
227 return gen_rtx_fmt_e (code, mode, op);
230 /* Likewise for ternary operations. */
233 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
234 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
236 rtx tem;
238 /* If this simplifies, use it. */
239 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
240 op0, op1, op2)))
241 return tem;
243 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
246 /* Likewise, for relational operations.
247 CMP_MODE specifies mode comparison is done in. */
250 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
251 enum machine_mode cmp_mode, rtx op0, rtx op1)
253 rtx tem;
255 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
256 op0, op1)))
257 return tem;
259 return gen_rtx_fmt_ee (code, mode, op0, op1);
262 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
263 resulting RTX. Return a new RTX which is as simplified as possible. */
266 simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
268 enum rtx_code code = GET_CODE (x);
269 enum machine_mode mode = GET_MODE (x);
270 enum machine_mode op_mode;
271 rtx op0, op1, op2;
273 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
274 to build a new expression substituting recursively. If we can't do
275 anything, return our input. */
277 if (x == old_rtx)
278 return new_rtx;
280 switch (GET_RTX_CLASS (code))
282 case RTX_UNARY:
283 op0 = XEXP (x, 0);
284 op_mode = GET_MODE (op0);
285 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
286 if (op0 == XEXP (x, 0))
287 return x;
288 return simplify_gen_unary (code, mode, op0, op_mode);
290 case RTX_BIN_ARITH:
291 case RTX_COMM_ARITH:
292 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
293 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
294 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
295 return x;
296 return simplify_gen_binary (code, mode, op0, op1);
298 case RTX_COMPARE:
299 case RTX_COMM_COMPARE:
300 op0 = XEXP (x, 0);
301 op1 = XEXP (x, 1);
302 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
303 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
304 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
305 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
306 return x;
307 return simplify_gen_relational (code, mode, op_mode, op0, op1);
309 case RTX_TERNARY:
310 case RTX_BITFIELD_OPS:
311 op0 = XEXP (x, 0);
312 op_mode = GET_MODE (op0);
313 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
314 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
315 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
316 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
317 return x;
318 if (op_mode == VOIDmode)
319 op_mode = GET_MODE (op0);
320 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
322 case RTX_EXTRA:
323 /* The only case we try to handle is a SUBREG. */
324 if (code == SUBREG)
326 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
327 if (op0 == SUBREG_REG (x))
328 return x;
329 op0 = simplify_gen_subreg (GET_MODE (x), op0,
330 GET_MODE (SUBREG_REG (x)),
331 SUBREG_BYTE (x));
332 return op0 ? op0 : x;
334 break;
336 case RTX_OBJ:
337 if (code == MEM)
339 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
340 if (op0 == XEXP (x, 0))
341 return x;
342 return replace_equiv_address_nv (x, op0);
344 else if (code == LO_SUM)
346 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
347 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
349 /* (lo_sum (high x) x) -> x */
350 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
351 return op1;
353 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
354 return x;
355 return gen_rtx_LO_SUM (mode, op0, op1);
357 else if (code == REG)
359 if (rtx_equal_p (x, old_rtx))
360 return new_rtx;
362 break;
364 default:
365 break;
367 return x;
370 /* Try to simplify a unary operation CODE whose output mode is to be
371 MODE with input operand OP whose mode was originally OP_MODE.
372 Return zero if no simplification can be made. */
374 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
375 rtx op, enum machine_mode op_mode)
377 rtx trueop, tem;
379 if (GET_CODE (op) == CONST)
380 op = XEXP (op, 0);
382 trueop = avoid_constant_pool_reference (op);
384 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
385 if (tem)
386 return tem;
388 return simplify_unary_operation_1 (code, mode, op);
391 /* Perform some simplifications we can do even if the operands
392 aren't constant. */
393 static rtx
394 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
396 enum rtx_code reversed;
397 rtx temp;
399 switch (code)
401 case NOT:
402 /* (not (not X)) == X. */
403 if (GET_CODE (op) == NOT)
404 return XEXP (op, 0);
406 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
407 comparison is all ones. */
408 if (COMPARISON_P (op)
409 && (mode == BImode || STORE_FLAG_VALUE == -1)
410 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
411 return simplify_gen_relational (reversed, mode, VOIDmode,
412 XEXP (op, 0), XEXP (op, 1));
414 /* (not (plus X -1)) can become (neg X). */
415 if (GET_CODE (op) == PLUS
416 && XEXP (op, 1) == constm1_rtx)
417 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
419 /* Similarly, (not (neg X)) is (plus X -1). */
420 if (GET_CODE (op) == NEG)
421 return plus_constant (XEXP (op, 0), -1);
423 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
424 if (GET_CODE (op) == XOR
425 && GET_CODE (XEXP (op, 1)) == CONST_INT
426 && (temp = simplify_unary_operation (NOT, mode,
427 XEXP (op, 1), mode)) != 0)
428 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
430 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
431 if (GET_CODE (op) == PLUS
432 && GET_CODE (XEXP (op, 1)) == CONST_INT
433 && mode_signbit_p (mode, XEXP (op, 1))
434 && (temp = simplify_unary_operation (NOT, mode,
435 XEXP (op, 1), mode)) != 0)
436 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
439 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
440 operands other than 1, but that is not valid. We could do a
441 similar simplification for (not (lshiftrt C X)) where C is
442 just the sign bit, but this doesn't seem common enough to
443 bother with. */
444 if (GET_CODE (op) == ASHIFT
445 && XEXP (op, 0) == const1_rtx)
447 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
448 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
451 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
452 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
453 so we can perform the above simplification. */
455 if (STORE_FLAG_VALUE == -1
456 && GET_CODE (op) == ASHIFTRT
457 && GET_CODE (XEXP (op, 1)) == CONST_INT
458 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
459 return simplify_gen_relational (GE, mode, VOIDmode,
460 XEXP (op, 0), const0_rtx);
463 if (GET_CODE (op) == SUBREG
464 && subreg_lowpart_p (op)
465 && (GET_MODE_SIZE (GET_MODE (op))
466 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
467 && GET_CODE (SUBREG_REG (op)) == ASHIFT
468 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
470 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
471 rtx x;
473 x = gen_rtx_ROTATE (inner_mode,
474 simplify_gen_unary (NOT, inner_mode, const1_rtx,
475 inner_mode),
476 XEXP (SUBREG_REG (op), 1));
477 return rtl_hooks.gen_lowpart_no_emit (mode, x);
480 /* Apply De Morgan's laws to reduce number of patterns for machines
481 with negating logical insns (and-not, nand, etc.). If result has
482 only one NOT, put it first, since that is how the patterns are
483 coded. */
485 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
487 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
488 enum machine_mode op_mode;
490 op_mode = GET_MODE (in1);
491 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
493 op_mode = GET_MODE (in2);
494 if (op_mode == VOIDmode)
495 op_mode = mode;
496 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
498 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
500 rtx tem = in2;
501 in2 = in1; in1 = tem;
504 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
505 mode, in1, in2);
507 break;
509 case NEG:
510 /* (neg (neg X)) == X. */
511 if (GET_CODE (op) == NEG)
512 return XEXP (op, 0);
514 /* (neg (plus X 1)) can become (not X). */
515 if (GET_CODE (op) == PLUS
516 && XEXP (op, 1) == const1_rtx)
517 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
519 /* Similarly, (neg (not X)) is (plus X 1). */
520 if (GET_CODE (op) == NOT)
521 return plus_constant (XEXP (op, 0), 1);
523 /* (neg (minus X Y)) can become (minus Y X). This transformation
524 isn't safe for modes with signed zeros, since if X and Y are
525 both +0, (minus Y X) is the same as (minus X Y). If the
526 rounding mode is towards +infinity (or -infinity) then the two
527 expressions will be rounded differently. */
528 if (GET_CODE (op) == MINUS
529 && !HONOR_SIGNED_ZEROS (mode)
530 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
531 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
533 if (GET_CODE (op) == PLUS
534 && !HONOR_SIGNED_ZEROS (mode)
535 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
537 /* (neg (plus A C)) is simplified to (minus -C A). */
538 if (GET_CODE (XEXP (op, 1)) == CONST_INT
539 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
541 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
542 if (temp)
543 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
546 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
547 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
548 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
551 /* (neg (mult A B)) becomes (mult (neg A) B).
552 This works even for floating-point values. */
553 if (GET_CODE (op) == MULT
554 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
556 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
557 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
560 /* NEG commutes with ASHIFT since it is multiplication. Only do
561 this if we can then eliminate the NEG (e.g., if the operand
562 is a constant). */
563 if (GET_CODE (op) == ASHIFT)
565 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
566 if (temp)
567 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
570 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
571 C is equal to the width of MODE minus 1. */
572 if (GET_CODE (op) == ASHIFTRT
573 && GET_CODE (XEXP (op, 1)) == CONST_INT
574 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
575 return simplify_gen_binary (LSHIFTRT, mode,
576 XEXP (op, 0), XEXP (op, 1));
578 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
579 C is equal to the width of MODE minus 1. */
580 if (GET_CODE (op) == LSHIFTRT
581 && GET_CODE (XEXP (op, 1)) == CONST_INT
582 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
583 return simplify_gen_binary (ASHIFTRT, mode,
584 XEXP (op, 0), XEXP (op, 1));
586 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
587 if (GET_CODE (op) == XOR
588 && XEXP (op, 1) == const1_rtx
589 && nonzero_bits (XEXP (op, 0), mode) == 1)
590 return plus_constant (XEXP (op, 0), -1);
592 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
593 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
594 if (GET_CODE (op) == LT
595 && XEXP (op, 1) == const0_rtx)
597 enum machine_mode inner = GET_MODE (XEXP (op, 0));
598 int isize = GET_MODE_BITSIZE (inner);
599 if (STORE_FLAG_VALUE == 1)
601 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
602 GEN_INT (isize - 1));
603 if (mode == inner)
604 return temp;
605 if (GET_MODE_BITSIZE (mode) > isize)
606 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
607 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
609 else if (STORE_FLAG_VALUE == -1)
611 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
612 GEN_INT (isize - 1));
613 if (mode == inner)
614 return temp;
615 if (GET_MODE_BITSIZE (mode) > isize)
616 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
617 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
620 break;
622 case TRUNCATE:
623 /* We can't handle truncation to a partial integer mode here
624 because we don't know the real bitsize of the partial
625 integer mode. */
626 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
627 break;
629 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
630 if ((GET_CODE (op) == SIGN_EXTEND
631 || GET_CODE (op) == ZERO_EXTEND)
632 && GET_MODE (XEXP (op, 0)) == mode)
633 return XEXP (op, 0);
635 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
636 (OP:SI foo:SI) if OP is NEG or ABS. */
637 if ((GET_CODE (op) == ABS
638 || GET_CODE (op) == NEG)
639 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
640 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
641 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
642 return simplify_gen_unary (GET_CODE (op), mode,
643 XEXP (XEXP (op, 0), 0), mode);
645 /* (truncate:A (subreg:B (truncate:C X) 0)) is
646 (truncate:A X). */
647 if (GET_CODE (op) == SUBREG
648 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
649 && subreg_lowpart_p (op))
650 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
651 GET_MODE (XEXP (SUBREG_REG (op), 0)));
653 /* If we know that the value is already truncated, we can
654 replace the TRUNCATE with a SUBREG. Note that this is also
655 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
656 modes we just have to apply a different definition for
657 truncation. But don't do this for an (LSHIFTRT (MULT ...))
658 since this will cause problems with the umulXi3_highpart
659 patterns. */
660 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
661 GET_MODE_BITSIZE (GET_MODE (op)))
662 ? (num_sign_bit_copies (op, GET_MODE (op))
663 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
664 - GET_MODE_BITSIZE (mode)))
665 : truncated_to_mode (mode, op))
666 && ! (GET_CODE (op) == LSHIFTRT
667 && GET_CODE (XEXP (op, 0)) == MULT))
668 return rtl_hooks.gen_lowpart_no_emit (mode, op);
670 /* A truncate of a comparison can be replaced with a subreg if
671 STORE_FLAG_VALUE permits. This is like the previous test,
672 but it works even if the comparison is done in a mode larger
673 than HOST_BITS_PER_WIDE_INT. */
674 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
675 && COMPARISON_P (op)
676 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
677 return rtl_hooks.gen_lowpart_no_emit (mode, op);
678 break;
680 case FLOAT_TRUNCATE:
681 if (DECIMAL_FLOAT_MODE_P (mode))
682 break;
684 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
685 if (GET_CODE (op) == FLOAT_EXTEND
686 && GET_MODE (XEXP (op, 0)) == mode)
687 return XEXP (op, 0);
689 /* (float_truncate:SF (float_truncate:DF foo:XF))
690 = (float_truncate:SF foo:XF).
691 This may eliminate double rounding, so it is unsafe.
693 (float_truncate:SF (float_extend:XF foo:DF))
694 = (float_truncate:SF foo:DF).
696 (float_truncate:DF (float_extend:XF foo:SF))
697 = (float_extend:SF foo:DF). */
698 if ((GET_CODE (op) == FLOAT_TRUNCATE
699 && flag_unsafe_math_optimizations)
700 || GET_CODE (op) == FLOAT_EXTEND)
701 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
702 0)))
703 > GET_MODE_SIZE (mode)
704 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
705 mode,
706 XEXP (op, 0), mode);
708 /* (float_truncate (float x)) is (float x) */
709 if (GET_CODE (op) == FLOAT
710 && (flag_unsafe_math_optimizations
711 || ((unsigned)significand_size (GET_MODE (op))
712 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
713 - num_sign_bit_copies (XEXP (op, 0),
714 GET_MODE (XEXP (op, 0)))))))
715 return simplify_gen_unary (FLOAT, mode,
716 XEXP (op, 0),
717 GET_MODE (XEXP (op, 0)));
719 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
720 (OP:SF foo:SF) if OP is NEG or ABS. */
721 if ((GET_CODE (op) == ABS
722 || GET_CODE (op) == NEG)
723 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
724 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
725 return simplify_gen_unary (GET_CODE (op), mode,
726 XEXP (XEXP (op, 0), 0), mode);
728 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
729 is (float_truncate:SF x). */
730 if (GET_CODE (op) == SUBREG
731 && subreg_lowpart_p (op)
732 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
733 return SUBREG_REG (op);
734 break;
736 case FLOAT_EXTEND:
737 if (DECIMAL_FLOAT_MODE_P (mode))
738 break;
740 /* (float_extend (float_extend x)) is (float_extend x)
742 (float_extend (float x)) is (float x) assuming that double
743 rounding can't happen.
745 if (GET_CODE (op) == FLOAT_EXTEND
746 || (GET_CODE (op) == FLOAT
747 && ((unsigned)significand_size (GET_MODE (op))
748 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
749 - num_sign_bit_copies (XEXP (op, 0),
750 GET_MODE (XEXP (op, 0)))))))
751 return simplify_gen_unary (GET_CODE (op), mode,
752 XEXP (op, 0),
753 GET_MODE (XEXP (op, 0)));
755 break;
757 case ABS:
758 /* (abs (neg <foo>)) -> (abs <foo>) */
759 if (GET_CODE (op) == NEG)
760 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
761 GET_MODE (XEXP (op, 0)));
763 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
764 do nothing. */
765 if (GET_MODE (op) == VOIDmode)
766 break;
768 /* If operand is something known to be positive, ignore the ABS. */
769 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
770 || ((GET_MODE_BITSIZE (GET_MODE (op))
771 <= HOST_BITS_PER_WIDE_INT)
772 && ((nonzero_bits (op, GET_MODE (op))
773 & ((HOST_WIDE_INT) 1
774 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
775 == 0)))
776 return op;
778 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
779 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
780 return gen_rtx_NEG (mode, op);
782 break;
784 case FFS:
785 /* (ffs (*_extend <X>)) = (ffs <X>) */
786 if (GET_CODE (op) == SIGN_EXTEND
787 || GET_CODE (op) == ZERO_EXTEND)
788 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
789 GET_MODE (XEXP (op, 0)));
790 break;
792 case POPCOUNT:
793 switch (GET_CODE (op))
795 case BSWAP:
796 case ZERO_EXTEND:
797 /* (popcount (zero_extend <X>)) = (popcount <X>) */
798 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
799 GET_MODE (XEXP (op, 0)));
801 case ROTATE:
802 case ROTATERT:
803 /* Rotations don't affect popcount. */
804 if (!side_effects_p (XEXP (op, 1)))
805 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
806 GET_MODE (XEXP (op, 0)));
807 break;
809 default:
810 break;
812 break;
814 case PARITY:
815 switch (GET_CODE (op))
817 case NOT:
818 case BSWAP:
819 case ZERO_EXTEND:
820 case SIGN_EXTEND:
821 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
822 GET_MODE (XEXP (op, 0)));
824 case ROTATE:
825 case ROTATERT:
826 /* Rotations don't affect parity. */
827 if (!side_effects_p (XEXP (op, 1)))
828 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
829 GET_MODE (XEXP (op, 0)));
830 break;
832 default:
833 break;
835 break;
837 case BSWAP:
838 /* (bswap (bswap x)) -> x. */
839 if (GET_CODE (op) == BSWAP)
840 return XEXP (op, 0);
841 break;
843 case FLOAT:
844 /* (float (sign_extend <X>)) = (float <X>). */
845 if (GET_CODE (op) == SIGN_EXTEND)
846 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
847 GET_MODE (XEXP (op, 0)));
848 break;
850 case SIGN_EXTEND:
851 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
852 becomes just the MINUS if its mode is MODE. This allows
853 folding switch statements on machines using casesi (such as
854 the VAX). */
855 if (GET_CODE (op) == TRUNCATE
856 && GET_MODE (XEXP (op, 0)) == mode
857 && GET_CODE (XEXP (op, 0)) == MINUS
858 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
859 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
860 return XEXP (op, 0);
862 /* Check for a sign extension of a subreg of a promoted
863 variable, where the promotion is sign-extended, and the
864 target mode is the same as the variable's promotion. */
865 if (GET_CODE (op) == SUBREG
866 && SUBREG_PROMOTED_VAR_P (op)
867 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
868 && GET_MODE (XEXP (op, 0)) == mode)
869 return XEXP (op, 0);
871 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
872 if (! POINTERS_EXTEND_UNSIGNED
873 && mode == Pmode && GET_MODE (op) == ptr_mode
874 && (CONSTANT_P (op)
875 || (GET_CODE (op) == SUBREG
876 && REG_P (SUBREG_REG (op))
877 && REG_POINTER (SUBREG_REG (op))
878 && GET_MODE (SUBREG_REG (op)) == Pmode)))
879 return convert_memory_address (Pmode, op);
880 #endif
881 break;
883 case ZERO_EXTEND:
884 /* Check for a zero extension of a subreg of a promoted
885 variable, where the promotion is zero-extended, and the
886 target mode is the same as the variable's promotion. */
887 if (GET_CODE (op) == SUBREG
888 && SUBREG_PROMOTED_VAR_P (op)
889 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
890 && GET_MODE (XEXP (op, 0)) == mode)
891 return XEXP (op, 0);
893 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
894 if (POINTERS_EXTEND_UNSIGNED > 0
895 && mode == Pmode && GET_MODE (op) == ptr_mode
896 && (CONSTANT_P (op)
897 || (GET_CODE (op) == SUBREG
898 && REG_P (SUBREG_REG (op))
899 && REG_POINTER (SUBREG_REG (op))
900 && GET_MODE (SUBREG_REG (op)) == Pmode)))
901 return convert_memory_address (Pmode, op);
902 #endif
903 break;
905 default:
906 break;
909 return 0;
912 /* Try to compute the value of a unary operation CODE whose output mode is to
913 be MODE with input operand OP whose mode was originally OP_MODE.
914 Return zero if the value cannot be computed. */
916 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
917 rtx op, enum machine_mode op_mode)
919 unsigned int width = GET_MODE_BITSIZE (mode);
921 if (code == VEC_DUPLICATE)
923 gcc_assert (VECTOR_MODE_P (mode));
924 if (GET_MODE (op) != VOIDmode)
926 if (!VECTOR_MODE_P (GET_MODE (op)))
927 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
928 else
929 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
930 (GET_MODE (op)));
932 if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
933 || GET_CODE (op) == CONST_VECTOR)
935 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
936 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
937 rtvec v = rtvec_alloc (n_elts);
938 unsigned int i;
940 if (GET_CODE (op) != CONST_VECTOR)
941 for (i = 0; i < n_elts; i++)
942 RTVEC_ELT (v, i) = op;
943 else
945 enum machine_mode inmode = GET_MODE (op);
946 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
947 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
949 gcc_assert (in_n_elts < n_elts);
950 gcc_assert ((n_elts % in_n_elts) == 0);
951 for (i = 0; i < n_elts; i++)
952 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
954 return gen_rtx_CONST_VECTOR (mode, v);
958 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
960 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
961 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
962 enum machine_mode opmode = GET_MODE (op);
963 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
964 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
965 rtvec v = rtvec_alloc (n_elts);
966 unsigned int i;
968 gcc_assert (op_n_elts == n_elts);
969 for (i = 0; i < n_elts; i++)
971 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
972 CONST_VECTOR_ELT (op, i),
973 GET_MODE_INNER (opmode));
974 if (!x)
975 return 0;
976 RTVEC_ELT (v, i) = x;
978 return gen_rtx_CONST_VECTOR (mode, v);
981 /* The order of these tests is critical so that, for example, we don't
982 check the wrong mode (input vs. output) for a conversion operation,
983 such as FIX. At some point, this should be simplified. */
985 if (code == FLOAT && GET_MODE (op) == VOIDmode
986 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
988 HOST_WIDE_INT hv, lv;
989 REAL_VALUE_TYPE d;
991 if (GET_CODE (op) == CONST_INT)
992 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
993 else
994 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
996 REAL_VALUE_FROM_INT (d, lv, hv, mode);
997 d = real_value_truncate (mode, d);
998 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1000 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
1001 && (GET_CODE (op) == CONST_DOUBLE
1002 || GET_CODE (op) == CONST_INT))
1004 HOST_WIDE_INT hv, lv;
1005 REAL_VALUE_TYPE d;
1007 if (GET_CODE (op) == CONST_INT)
1008 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1009 else
1010 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1012 if (op_mode == VOIDmode)
1014 /* We don't know how to interpret negative-looking numbers in
1015 this case, so don't try to fold those. */
1016 if (hv < 0)
1017 return 0;
1019 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
1021 else
1022 hv = 0, lv &= GET_MODE_MASK (op_mode);
1024 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1025 d = real_value_truncate (mode, d);
1026 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1029 if (GET_CODE (op) == CONST_INT
1030 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1032 HOST_WIDE_INT arg0 = INTVAL (op);
1033 HOST_WIDE_INT val;
1035 switch (code)
1037 case NOT:
1038 val = ~ arg0;
1039 break;
1041 case NEG:
1042 val = - arg0;
1043 break;
1045 case ABS:
1046 val = (arg0 >= 0 ? arg0 : - arg0);
1047 break;
1049 case FFS:
1050 /* Don't use ffs here. Instead, get low order bit and then its
1051 number. If arg0 is zero, this will return 0, as desired. */
1052 arg0 &= GET_MODE_MASK (mode);
1053 val = exact_log2 (arg0 & (- arg0)) + 1;
1054 break;
1056 case CLZ:
1057 arg0 &= GET_MODE_MASK (mode);
1058 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1060 else
1061 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1062 break;
1064 case CTZ:
1065 arg0 &= GET_MODE_MASK (mode);
1066 if (arg0 == 0)
1068 /* Even if the value at zero is undefined, we have to come
1069 up with some replacement. Seems good enough. */
1070 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1071 val = GET_MODE_BITSIZE (mode);
1073 else
1074 val = exact_log2 (arg0 & -arg0);
1075 break;
1077 case POPCOUNT:
1078 arg0 &= GET_MODE_MASK (mode);
1079 val = 0;
1080 while (arg0)
1081 val++, arg0 &= arg0 - 1;
1082 break;
1084 case PARITY:
1085 arg0 &= GET_MODE_MASK (mode);
1086 val = 0;
1087 while (arg0)
1088 val++, arg0 &= arg0 - 1;
1089 val &= 1;
1090 break;
1092 case BSWAP:
1094 unsigned int s;
1096 val = 0;
1097 for (s = 0; s < width; s += 8)
1099 unsigned int d = width - s - 8;
1100 unsigned HOST_WIDE_INT byte;
1101 byte = (arg0 >> s) & 0xff;
1102 val |= byte << d;
1105 break;
1107 case TRUNCATE:
1108 val = arg0;
1109 break;
1111 case ZERO_EXTEND:
1112 /* When zero-extending a CONST_INT, we need to know its
1113 original mode. */
1114 gcc_assert (op_mode != VOIDmode);
1115 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1117 /* If we were really extending the mode,
1118 we would have to distinguish between zero-extension
1119 and sign-extension. */
1120 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1121 val = arg0;
1123 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1124 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1125 else
1126 return 0;
1127 break;
1129 case SIGN_EXTEND:
1130 if (op_mode == VOIDmode)
1131 op_mode = mode;
1132 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1134 /* If we were really extending the mode,
1135 we would have to distinguish between zero-extension
1136 and sign-extension. */
1137 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1138 val = arg0;
1140 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1143 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1144 if (val
1145 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1146 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1148 else
1149 return 0;
1150 break;
1152 case SQRT:
1153 case FLOAT_EXTEND:
1154 case FLOAT_TRUNCATE:
1155 case SS_TRUNCATE:
1156 case US_TRUNCATE:
1157 case SS_NEG:
1158 return 0;
1160 default:
1161 gcc_unreachable ();
1164 return gen_int_mode (val, mode);
1167 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1168 for a DImode operation on a CONST_INT. */
1169 else if (GET_MODE (op) == VOIDmode
1170 && width <= HOST_BITS_PER_WIDE_INT * 2
1171 && (GET_CODE (op) == CONST_DOUBLE
1172 || GET_CODE (op) == CONST_INT))
1174 unsigned HOST_WIDE_INT l1, lv;
1175 HOST_WIDE_INT h1, hv;
1177 if (GET_CODE (op) == CONST_DOUBLE)
1178 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1179 else
1180 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1182 switch (code)
1184 case NOT:
1185 lv = ~ l1;
1186 hv = ~ h1;
1187 break;
1189 case NEG:
1190 neg_double (l1, h1, &lv, &hv);
1191 break;
1193 case ABS:
1194 if (h1 < 0)
1195 neg_double (l1, h1, &lv, &hv);
1196 else
1197 lv = l1, hv = h1;
1198 break;
1200 case FFS:
1201 hv = 0;
1202 if (l1 == 0)
1204 if (h1 == 0)
1205 lv = 0;
1206 else
1207 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1209 else
1210 lv = exact_log2 (l1 & -l1) + 1;
1211 break;
1213 case CLZ:
1214 hv = 0;
1215 if (h1 != 0)
1216 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1217 - HOST_BITS_PER_WIDE_INT;
1218 else if (l1 != 0)
1219 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1220 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1221 lv = GET_MODE_BITSIZE (mode);
1222 break;
1224 case CTZ:
1225 hv = 0;
1226 if (l1 != 0)
1227 lv = exact_log2 (l1 & -l1);
1228 else if (h1 != 0)
1229 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1230 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1231 lv = GET_MODE_BITSIZE (mode);
1232 break;
1234 case POPCOUNT:
1235 hv = 0;
1236 lv = 0;
1237 while (l1)
1238 lv++, l1 &= l1 - 1;
1239 while (h1)
1240 lv++, h1 &= h1 - 1;
1241 break;
1243 case PARITY:
1244 hv = 0;
1245 lv = 0;
1246 while (l1)
1247 lv++, l1 &= l1 - 1;
1248 while (h1)
1249 lv++, h1 &= h1 - 1;
1250 lv &= 1;
1251 break;
1253 case BSWAP:
1255 unsigned int s;
1257 hv = 0;
1258 lv = 0;
1259 for (s = 0; s < width; s += 8)
1261 unsigned int d = width - s - 8;
1262 unsigned HOST_WIDE_INT byte;
1264 if (s < HOST_BITS_PER_WIDE_INT)
1265 byte = (l1 >> s) & 0xff;
1266 else
1267 byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1269 if (d < HOST_BITS_PER_WIDE_INT)
1270 lv |= byte << d;
1271 else
1272 hv |= byte << (d - HOST_BITS_PER_WIDE_INT);
1275 break;
1277 case TRUNCATE:
1278 /* This is just a change-of-mode, so do nothing. */
1279 lv = l1, hv = h1;
1280 break;
1282 case ZERO_EXTEND:
1283 gcc_assert (op_mode != VOIDmode);
1285 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1286 return 0;
1288 hv = 0;
1289 lv = l1 & GET_MODE_MASK (op_mode);
1290 break;
1292 case SIGN_EXTEND:
1293 if (op_mode == VOIDmode
1294 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1295 return 0;
1296 else
1298 lv = l1 & GET_MODE_MASK (op_mode);
1299 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1300 && (lv & ((HOST_WIDE_INT) 1
1301 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1302 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1304 hv = HWI_SIGN_EXTEND (lv);
1306 break;
1308 case SQRT:
1309 return 0;
1311 default:
1312 return 0;
1315 return immed_double_const (lv, hv, mode);
1318 else if (GET_CODE (op) == CONST_DOUBLE
1319 && SCALAR_FLOAT_MODE_P (mode))
1321 REAL_VALUE_TYPE d, t;
1322 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1324 switch (code)
1326 case SQRT:
1327 if (HONOR_SNANS (mode) && real_isnan (&d))
1328 return 0;
1329 real_sqrt (&t, mode, &d);
1330 d = t;
1331 break;
1332 case ABS:
1333 d = REAL_VALUE_ABS (d);
1334 break;
1335 case NEG:
1336 d = REAL_VALUE_NEGATE (d);
1337 break;
1338 case FLOAT_TRUNCATE:
1339 d = real_value_truncate (mode, d);
1340 break;
1341 case FLOAT_EXTEND:
1342 /* All this does is change the mode. */
1343 break;
1344 case FIX:
1345 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1346 break;
1347 case NOT:
1349 long tmp[4];
1350 int i;
1352 real_to_target (tmp, &d, GET_MODE (op));
1353 for (i = 0; i < 4; i++)
1354 tmp[i] = ~tmp[i];
1355 real_from_target (&d, tmp, mode);
1356 break;
1358 default:
1359 gcc_unreachable ();
1361 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1364 else if (GET_CODE (op) == CONST_DOUBLE
1365 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1366 && GET_MODE_CLASS (mode) == MODE_INT
1367 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1369 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1370 operators are intentionally left unspecified (to ease implementation
1371 by target backends), for consistency, this routine implements the
1372 same semantics for constant folding as used by the middle-end. */
1374 /* This was formerly used only for non-IEEE float.
1375 eggert@twinsun.com says it is safe for IEEE also. */
1376 HOST_WIDE_INT xh, xl, th, tl;
1377 REAL_VALUE_TYPE x, t;
1378 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1379 switch (code)
1381 case FIX:
1382 if (REAL_VALUE_ISNAN (x))
1383 return const0_rtx;
1385 /* Test against the signed upper bound. */
1386 if (width > HOST_BITS_PER_WIDE_INT)
1388 th = ((unsigned HOST_WIDE_INT) 1
1389 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1390 tl = -1;
1392 else
1394 th = 0;
1395 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1397 real_from_integer (&t, VOIDmode, tl, th, 0);
1398 if (REAL_VALUES_LESS (t, x))
1400 xh = th;
1401 xl = tl;
1402 break;
1405 /* Test against the signed lower bound. */
1406 if (width > HOST_BITS_PER_WIDE_INT)
1408 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1409 tl = 0;
1411 else
1413 th = -1;
1414 tl = (HOST_WIDE_INT) -1 << (width - 1);
1416 real_from_integer (&t, VOIDmode, tl, th, 0);
1417 if (REAL_VALUES_LESS (x, t))
1419 xh = th;
1420 xl = tl;
1421 break;
1423 REAL_VALUE_TO_INT (&xl, &xh, x);
1424 break;
1426 case UNSIGNED_FIX:
1427 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1428 return const0_rtx;
1430 /* Test against the unsigned upper bound. */
1431 if (width == 2*HOST_BITS_PER_WIDE_INT)
1433 th = -1;
1434 tl = -1;
1436 else if (width >= HOST_BITS_PER_WIDE_INT)
1438 th = ((unsigned HOST_WIDE_INT) 1
1439 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1440 tl = -1;
1442 else
1444 th = 0;
1445 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1447 real_from_integer (&t, VOIDmode, tl, th, 1);
1448 if (REAL_VALUES_LESS (t, x))
1450 xh = th;
1451 xl = tl;
1452 break;
1455 REAL_VALUE_TO_INT (&xl, &xh, x);
1456 break;
1458 default:
1459 gcc_unreachable ();
1461 return immed_double_const (xl, xh, mode);
1464 return NULL_RTX;
1467 /* Subroutine of simplify_binary_operation to simplify a commutative,
1468 associative binary operation CODE with result mode MODE, operating
1469 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1470 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1471 canonicalization is possible. */
1473 static rtx
1474 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1475 rtx op0, rtx op1)
1477 rtx tem;
1479 /* Linearize the operator to the left. */
1480 if (GET_CODE (op1) == code)
1482 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1483 if (GET_CODE (op0) == code)
1485 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1486 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1489 /* "a op (b op c)" becomes "(b op c) op a". */
1490 if (! swap_commutative_operands_p (op1, op0))
1491 return simplify_gen_binary (code, mode, op1, op0);
1493 tem = op0;
1494 op0 = op1;
1495 op1 = tem;
1498 if (GET_CODE (op0) == code)
1500 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1501 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1503 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1504 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1507 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1508 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1509 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1510 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1511 if (tem != 0)
1512 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1514 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1515 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1516 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1517 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1518 if (tem != 0)
1519 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1522 return 0;
1526 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1527 and OP1. Return 0 if no simplification is possible.
1529 Don't use this for relational operations such as EQ or LT.
1530 Use simplify_relational_operation instead. */
1532 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1533 rtx op0, rtx op1)
1535 rtx trueop0, trueop1;
1536 rtx tem;
1538 /* Relational operations don't work here. We must know the mode
1539 of the operands in order to do the comparison correctly.
1540 Assuming a full word can give incorrect results.
1541 Consider comparing 128 with -128 in QImode. */
1542 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1543 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1545 /* Make sure the constant is second. */
1546 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1547 && swap_commutative_operands_p (op0, op1))
1549 tem = op0, op0 = op1, op1 = tem;
1552 trueop0 = avoid_constant_pool_reference (op0);
1553 trueop1 = avoid_constant_pool_reference (op1);
1555 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1556 if (tem)
1557 return tem;
1558 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1561 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1562 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1563 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1564 actual constants. */
1566 static rtx
1567 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1568 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1570 rtx tem, reversed, opleft, opright;
1571 HOST_WIDE_INT val;
1572 unsigned int width = GET_MODE_BITSIZE (mode);
1574 /* Even if we can't compute a constant result,
1575 there are some cases worth simplifying. */
1577 switch (code)
1579 case PLUS:
1580 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1581 when x is NaN, infinite, or finite and nonzero. They aren't
1582 when x is -0 and the rounding mode is not towards -infinity,
1583 since (-0) + 0 is then 0. */
1584 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1585 return op0;
1587 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1588 transformations are safe even for IEEE. */
1589 if (GET_CODE (op0) == NEG)
1590 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1591 else if (GET_CODE (op1) == NEG)
1592 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1594 /* (~a) + 1 -> -a */
1595 if (INTEGRAL_MODE_P (mode)
1596 && GET_CODE (op0) == NOT
1597 && trueop1 == const1_rtx)
1598 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1600 /* Handle both-operands-constant cases. We can only add
1601 CONST_INTs to constants since the sum of relocatable symbols
1602 can't be handled by most assemblers. Don't add CONST_INT
1603 to CONST_INT since overflow won't be computed properly if wider
1604 than HOST_BITS_PER_WIDE_INT. */
1606 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1607 && GET_CODE (op1) == CONST_INT)
1608 return plus_constant (op0, INTVAL (op1));
1609 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1610 && GET_CODE (op0) == CONST_INT)
1611 return plus_constant (op1, INTVAL (op0));
1613 /* See if this is something like X * C - X or vice versa or
1614 if the multiplication is written as a shift. If so, we can
1615 distribute and make a new multiply, shift, or maybe just
1616 have X (if C is 2 in the example above). But don't make
1617 something more expensive than we had before. */
1619 if (SCALAR_INT_MODE_P (mode))
1621 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1622 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1623 rtx lhs = op0, rhs = op1;
1625 if (GET_CODE (lhs) == NEG)
1627 coeff0l = -1;
1628 coeff0h = -1;
1629 lhs = XEXP (lhs, 0);
1631 else if (GET_CODE (lhs) == MULT
1632 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1634 coeff0l = INTVAL (XEXP (lhs, 1));
1635 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1636 lhs = XEXP (lhs, 0);
1638 else if (GET_CODE (lhs) == ASHIFT
1639 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1640 && INTVAL (XEXP (lhs, 1)) >= 0
1641 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1643 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1644 coeff0h = 0;
1645 lhs = XEXP (lhs, 0);
1648 if (GET_CODE (rhs) == NEG)
1650 coeff1l = -1;
1651 coeff1h = -1;
1652 rhs = XEXP (rhs, 0);
1654 else if (GET_CODE (rhs) == MULT
1655 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1657 coeff1l = INTVAL (XEXP (rhs, 1));
1658 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1659 rhs = XEXP (rhs, 0);
1661 else if (GET_CODE (rhs) == ASHIFT
1662 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1663 && INTVAL (XEXP (rhs, 1)) >= 0
1664 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1666 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1667 coeff1h = 0;
1668 rhs = XEXP (rhs, 0);
1671 if (rtx_equal_p (lhs, rhs))
1673 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1674 rtx coeff;
1675 unsigned HOST_WIDE_INT l;
1676 HOST_WIDE_INT h;
1678 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1679 coeff = immed_double_const (l, h, mode);
1681 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1682 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1683 ? tem : 0;
1687 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1688 if ((GET_CODE (op1) == CONST_INT
1689 || GET_CODE (op1) == CONST_DOUBLE)
1690 && GET_CODE (op0) == XOR
1691 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1692 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1693 && mode_signbit_p (mode, op1))
1694 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1695 simplify_gen_binary (XOR, mode, op1,
1696 XEXP (op0, 1)));
1698 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1699 if (GET_CODE (op0) == MULT
1700 && GET_CODE (XEXP (op0, 0)) == NEG)
1702 rtx in1, in2;
1704 in1 = XEXP (XEXP (op0, 0), 0);
1705 in2 = XEXP (op0, 1);
1706 return simplify_gen_binary (MINUS, mode, op1,
1707 simplify_gen_binary (MULT, mode,
1708 in1, in2));
1711 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1712 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1713 is 1. */
1714 if (COMPARISON_P (op0)
1715 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1716 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1717 && (reversed = reversed_comparison (op0, mode)))
1718 return
1719 simplify_gen_unary (NEG, mode, reversed, mode);
1721 /* If one of the operands is a PLUS or a MINUS, see if we can
1722 simplify this by the associative law.
1723 Don't use the associative law for floating point.
1724 The inaccuracy makes it nonassociative,
1725 and subtle programs can break if operations are associated. */
1727 if (INTEGRAL_MODE_P (mode)
1728 && (plus_minus_operand_p (op0)
1729 || plus_minus_operand_p (op1))
1730 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1731 return tem;
1733 /* Reassociate floating point addition only when the user
1734 specifies unsafe math optimizations. */
1735 if (FLOAT_MODE_P (mode)
1736 && flag_unsafe_math_optimizations)
1738 tem = simplify_associative_operation (code, mode, op0, op1);
1739 if (tem)
1740 return tem;
1742 break;
1744 case COMPARE:
1745 #ifdef HAVE_cc0
1746 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1747 using cc0, in which case we want to leave it as a COMPARE
1748 so we can distinguish it from a register-register-copy.
1750 In IEEE floating point, x-0 is not the same as x. */
1752 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1753 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1754 && trueop1 == CONST0_RTX (mode))
1755 return op0;
1756 #endif
1758 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1759 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1760 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1761 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1763 rtx xop00 = XEXP (op0, 0);
1764 rtx xop10 = XEXP (op1, 0);
1766 #ifdef HAVE_cc0
1767 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1768 #else
1769 if (REG_P (xop00) && REG_P (xop10)
1770 && GET_MODE (xop00) == GET_MODE (xop10)
1771 && REGNO (xop00) == REGNO (xop10)
1772 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1773 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1774 #endif
1775 return xop00;
1777 break;
1779 case MINUS:
1780 /* We can't assume x-x is 0 even with non-IEEE floating point,
1781 but since it is zero except in very strange circumstances, we
1782 will treat it as zero with -funsafe-math-optimizations. */
1783 if (rtx_equal_p (trueop0, trueop1)
1784 && ! side_effects_p (op0)
1785 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1786 return CONST0_RTX (mode);
1788 /* Change subtraction from zero into negation. (0 - x) is the
1789 same as -x when x is NaN, infinite, or finite and nonzero.
1790 But if the mode has signed zeros, and does not round towards
1791 -infinity, then 0 - 0 is 0, not -0. */
1792 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1793 return simplify_gen_unary (NEG, mode, op1, mode);
1795 /* (-1 - a) is ~a. */
1796 if (trueop0 == constm1_rtx)
1797 return simplify_gen_unary (NOT, mode, op1, mode);
1799 /* Subtracting 0 has no effect unless the mode has signed zeros
1800 and supports rounding towards -infinity. In such a case,
1801 0 - 0 is -0. */
1802 if (!(HONOR_SIGNED_ZEROS (mode)
1803 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1804 && trueop1 == CONST0_RTX (mode))
1805 return op0;
1807 /* See if this is something like X * C - X or vice versa or
1808 if the multiplication is written as a shift. If so, we can
1809 distribute and make a new multiply, shift, or maybe just
1810 have X (if C is 2 in the example above). But don't make
1811 something more expensive than we had before. */
1813 if (SCALAR_INT_MODE_P (mode))
1815 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1816 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1817 rtx lhs = op0, rhs = op1;
1819 if (GET_CODE (lhs) == NEG)
1821 coeff0l = -1;
1822 coeff0h = -1;
1823 lhs = XEXP (lhs, 0);
1825 else if (GET_CODE (lhs) == MULT
1826 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1828 coeff0l = INTVAL (XEXP (lhs, 1));
1829 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1830 lhs = XEXP (lhs, 0);
1832 else if (GET_CODE (lhs) == ASHIFT
1833 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1834 && INTVAL (XEXP (lhs, 1)) >= 0
1835 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1837 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1838 coeff0h = 0;
1839 lhs = XEXP (lhs, 0);
1842 if (GET_CODE (rhs) == NEG)
1844 negcoeff1l = 1;
1845 negcoeff1h = 0;
1846 rhs = XEXP (rhs, 0);
1848 else if (GET_CODE (rhs) == MULT
1849 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1851 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1852 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1853 rhs = XEXP (rhs, 0);
1855 else if (GET_CODE (rhs) == ASHIFT
1856 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1857 && INTVAL (XEXP (rhs, 1)) >= 0
1858 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1860 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1861 negcoeff1h = -1;
1862 rhs = XEXP (rhs, 0);
1865 if (rtx_equal_p (lhs, rhs))
1867 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1868 rtx coeff;
1869 unsigned HOST_WIDE_INT l;
1870 HOST_WIDE_INT h;
1872 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1873 coeff = immed_double_const (l, h, mode);
1875 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1876 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1877 ? tem : 0;
1881 /* (a - (-b)) -> (a + b). True even for IEEE. */
1882 if (GET_CODE (op1) == NEG)
1883 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1885 /* (-x - c) may be simplified as (-c - x). */
1886 if (GET_CODE (op0) == NEG
1887 && (GET_CODE (op1) == CONST_INT
1888 || GET_CODE (op1) == CONST_DOUBLE))
1890 tem = simplify_unary_operation (NEG, mode, op1, mode);
1891 if (tem)
1892 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1895 /* Don't let a relocatable value get a negative coeff. */
1896 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1897 return simplify_gen_binary (PLUS, mode,
1898 op0,
1899 neg_const_int (mode, op1));
1901 /* (x - (x & y)) -> (x & ~y) */
1902 if (GET_CODE (op1) == AND)
1904 if (rtx_equal_p (op0, XEXP (op1, 0)))
1906 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1907 GET_MODE (XEXP (op1, 1)));
1908 return simplify_gen_binary (AND, mode, op0, tem);
1910 if (rtx_equal_p (op0, XEXP (op1, 1)))
1912 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1913 GET_MODE (XEXP (op1, 0)));
1914 return simplify_gen_binary (AND, mode, op0, tem);
1918 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1919 by reversing the comparison code if valid. */
1920 if (STORE_FLAG_VALUE == 1
1921 && trueop0 == const1_rtx
1922 && COMPARISON_P (op1)
1923 && (reversed = reversed_comparison (op1, mode)))
1924 return reversed;
1926 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
1927 if (GET_CODE (op1) == MULT
1928 && GET_CODE (XEXP (op1, 0)) == NEG)
1930 rtx in1, in2;
1932 in1 = XEXP (XEXP (op1, 0), 0);
1933 in2 = XEXP (op1, 1);
1934 return simplify_gen_binary (PLUS, mode,
1935 simplify_gen_binary (MULT, mode,
1936 in1, in2),
1937 op0);
1940 /* Canonicalize (minus (neg A) (mult B C)) to
1941 (minus (mult (neg B) C) A). */
1942 if (GET_CODE (op1) == MULT
1943 && GET_CODE (op0) == NEG)
1945 rtx in1, in2;
1947 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
1948 in2 = XEXP (op1, 1);
1949 return simplify_gen_binary (MINUS, mode,
1950 simplify_gen_binary (MULT, mode,
1951 in1, in2),
1952 XEXP (op0, 0));
1955 /* If one of the operands is a PLUS or a MINUS, see if we can
1956 simplify this by the associative law. This will, for example,
1957 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1958 Don't use the associative law for floating point.
1959 The inaccuracy makes it nonassociative,
1960 and subtle programs can break if operations are associated. */
1962 if (INTEGRAL_MODE_P (mode)
1963 && (plus_minus_operand_p (op0)
1964 || plus_minus_operand_p (op1))
1965 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1966 return tem;
1967 break;
1969 case MULT:
1970 if (trueop1 == constm1_rtx)
1971 return simplify_gen_unary (NEG, mode, op0, mode);
1973 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1974 x is NaN, since x * 0 is then also NaN. Nor is it valid
1975 when the mode has signed zeros, since multiplying a negative
1976 number by 0 will give -0, not 0. */
1977 if (!HONOR_NANS (mode)
1978 && !HONOR_SIGNED_ZEROS (mode)
1979 && trueop1 == CONST0_RTX (mode)
1980 && ! side_effects_p (op0))
1981 return op1;
1983 /* In IEEE floating point, x*1 is not equivalent to x for
1984 signalling NaNs. */
1985 if (!HONOR_SNANS (mode)
1986 && trueop1 == CONST1_RTX (mode))
1987 return op0;
1989 /* Convert multiply by constant power of two into shift unless
1990 we are still generating RTL. This test is a kludge. */
1991 if (GET_CODE (trueop1) == CONST_INT
1992 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1993 /* If the mode is larger than the host word size, and the
1994 uppermost bit is set, then this isn't a power of two due
1995 to implicit sign extension. */
1996 && (width <= HOST_BITS_PER_WIDE_INT
1997 || val != HOST_BITS_PER_WIDE_INT - 1))
1998 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2000 /* Likewise for multipliers wider than a word. */
2001 if (GET_CODE (trueop1) == CONST_DOUBLE
2002 && (GET_MODE (trueop1) == VOIDmode
2003 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
2004 && GET_MODE (op0) == mode
2005 && CONST_DOUBLE_LOW (trueop1) == 0
2006 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
2007 return simplify_gen_binary (ASHIFT, mode, op0,
2008 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2010 /* x*2 is x+x and x*(-1) is -x */
2011 if (GET_CODE (trueop1) == CONST_DOUBLE
2012 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2013 && GET_MODE (op0) == mode)
2015 REAL_VALUE_TYPE d;
2016 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2018 if (REAL_VALUES_EQUAL (d, dconst2))
2019 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2021 if (!HONOR_SNANS (mode)
2022 && REAL_VALUES_EQUAL (d, dconstm1))
2023 return simplify_gen_unary (NEG, mode, op0, mode);
2026 /* Optimize -x * -x as x * x. */
2027 if (FLOAT_MODE_P (mode)
2028 && GET_CODE (op0) == NEG
2029 && GET_CODE (op1) == NEG
2030 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2031 && !side_effects_p (XEXP (op0, 0)))
2032 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2034 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2035 if (SCALAR_FLOAT_MODE_P (mode)
2036 && GET_CODE (op0) == ABS
2037 && GET_CODE (op1) == ABS
2038 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2039 && !side_effects_p (XEXP (op0, 0)))
2040 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2042 /* Reassociate multiplication, but for floating point MULTs
2043 only when the user specifies unsafe math optimizations. */
2044 if (! FLOAT_MODE_P (mode)
2045 || flag_unsafe_math_optimizations)
2047 tem = simplify_associative_operation (code, mode, op0, op1);
2048 if (tem)
2049 return tem;
2051 break;
2053 case IOR:
2054 if (trueop1 == const0_rtx)
2055 return op0;
2056 if (GET_CODE (trueop1) == CONST_INT
2057 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2058 == GET_MODE_MASK (mode)))
2059 return op1;
2060 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2061 return op0;
2062 /* A | (~A) -> -1 */
2063 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2064 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2065 && ! side_effects_p (op0)
2066 && SCALAR_INT_MODE_P (mode))
2067 return constm1_rtx;
2069 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2070 if (GET_CODE (op1) == CONST_INT
2071 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2072 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
2073 return op1;
2075 /* Canonicalize (X & C1) | C2. */
2076 if (GET_CODE (op0) == AND
2077 && GET_CODE (trueop1) == CONST_INT
2078 && GET_CODE (XEXP (op0, 1)) == CONST_INT)
2080 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2081 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2082 HOST_WIDE_INT c2 = INTVAL (trueop1);
2084 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2085 if ((c1 & c2) == c1
2086 && !side_effects_p (XEXP (op0, 0)))
2087 return trueop1;
2089 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2090 if (((c1|c2) & mask) == mask)
2091 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2093 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2094 if (((c1 & ~c2) & mask) != (c1 & mask))
2096 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2097 gen_int_mode (c1 & ~c2, mode));
2098 return simplify_gen_binary (IOR, mode, tem, op1);
2102 /* Convert (A & B) | A to A. */
2103 if (GET_CODE (op0) == AND
2104 && (rtx_equal_p (XEXP (op0, 0), op1)
2105 || rtx_equal_p (XEXP (op0, 1), op1))
2106 && ! side_effects_p (XEXP (op0, 0))
2107 && ! side_effects_p (XEXP (op0, 1)))
2108 return op1;
2110 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2111 mode size to (rotate A CX). */
2113 if (GET_CODE (op1) == ASHIFT
2114 || GET_CODE (op1) == SUBREG)
2116 opleft = op1;
2117 opright = op0;
2119 else
2121 opright = op1;
2122 opleft = op0;
2125 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2126 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2127 && GET_CODE (XEXP (opleft, 1)) == CONST_INT
2128 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2129 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2130 == GET_MODE_BITSIZE (mode)))
2131 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2133 /* Same, but for ashift that has been "simplified" to a wider mode
2134 by simplify_shift_const. */
2136 if (GET_CODE (opleft) == SUBREG
2137 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2138 && GET_CODE (opright) == LSHIFTRT
2139 && GET_CODE (XEXP (opright, 0)) == SUBREG
2140 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2141 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2142 && (GET_MODE_SIZE (GET_MODE (opleft))
2143 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2144 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2145 SUBREG_REG (XEXP (opright, 0)))
2146 && GET_CODE (XEXP (SUBREG_REG (opleft), 1)) == CONST_INT
2147 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2148 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2149 == GET_MODE_BITSIZE (mode)))
2150 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2151 XEXP (SUBREG_REG (opleft), 1));
2153 /* If we have (ior (and (X C1) C2)), simplify this by making
2154 C1 as small as possible if C1 actually changes. */
2155 if (GET_CODE (op1) == CONST_INT
2156 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2157 || INTVAL (op1) > 0)
2158 && GET_CODE (op0) == AND
2159 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2160 && GET_CODE (op1) == CONST_INT
2161 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2162 return simplify_gen_binary (IOR, mode,
2163 simplify_gen_binary
2164 (AND, mode, XEXP (op0, 0),
2165 GEN_INT (INTVAL (XEXP (op0, 1))
2166 & ~INTVAL (op1))),
2167 op1);
2169 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2170 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2171 the PLUS does not affect any of the bits in OP1: then we can do
2172 the IOR as a PLUS and we can associate. This is valid if OP1
2173 can be safely shifted left C bits. */
2174 if (GET_CODE (trueop1) == CONST_INT && GET_CODE (op0) == ASHIFTRT
2175 && GET_CODE (XEXP (op0, 0)) == PLUS
2176 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
2177 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2178 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2180 int count = INTVAL (XEXP (op0, 1));
2181 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2183 if (mask >> count == INTVAL (trueop1)
2184 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2185 return simplify_gen_binary (ASHIFTRT, mode,
2186 plus_constant (XEXP (op0, 0), mask),
2187 XEXP (op0, 1));
2190 tem = simplify_associative_operation (code, mode, op0, op1);
2191 if (tem)
2192 return tem;
2193 break;
2195 case XOR:
2196 if (trueop1 == const0_rtx)
2197 return op0;
2198 if (GET_CODE (trueop1) == CONST_INT
2199 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2200 == GET_MODE_MASK (mode)))
2201 return simplify_gen_unary (NOT, mode, op0, mode);
2202 if (rtx_equal_p (trueop0, trueop1)
2203 && ! side_effects_p (op0)
2204 && GET_MODE_CLASS (mode) != MODE_CC)
2205 return CONST0_RTX (mode);
2207 /* Canonicalize XOR of the most significant bit to PLUS. */
2208 if ((GET_CODE (op1) == CONST_INT
2209 || GET_CODE (op1) == CONST_DOUBLE)
2210 && mode_signbit_p (mode, op1))
2211 return simplify_gen_binary (PLUS, mode, op0, op1);
2212 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2213 if ((GET_CODE (op1) == CONST_INT
2214 || GET_CODE (op1) == CONST_DOUBLE)
2215 && GET_CODE (op0) == PLUS
2216 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
2217 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2218 && mode_signbit_p (mode, XEXP (op0, 1)))
2219 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2220 simplify_gen_binary (XOR, mode, op1,
2221 XEXP (op0, 1)));
2223 /* If we are XORing two things that have no bits in common,
2224 convert them into an IOR. This helps to detect rotation encoded
2225 using those methods and possibly other simplifications. */
2227 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2228 && (nonzero_bits (op0, mode)
2229 & nonzero_bits (op1, mode)) == 0)
2230 return (simplify_gen_binary (IOR, mode, op0, op1));
2232 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2233 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2234 (NOT y). */
2236 int num_negated = 0;
2238 if (GET_CODE (op0) == NOT)
2239 num_negated++, op0 = XEXP (op0, 0);
2240 if (GET_CODE (op1) == NOT)
2241 num_negated++, op1 = XEXP (op1, 0);
2243 if (num_negated == 2)
2244 return simplify_gen_binary (XOR, mode, op0, op1);
2245 else if (num_negated == 1)
2246 return simplify_gen_unary (NOT, mode,
2247 simplify_gen_binary (XOR, mode, op0, op1),
2248 mode);
2251 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2252 correspond to a machine insn or result in further simplifications
2253 if B is a constant. */
2255 if (GET_CODE (op0) == AND
2256 && rtx_equal_p (XEXP (op0, 1), op1)
2257 && ! side_effects_p (op1))
2258 return simplify_gen_binary (AND, mode,
2259 simplify_gen_unary (NOT, mode,
2260 XEXP (op0, 0), mode),
2261 op1);
2263 else if (GET_CODE (op0) == AND
2264 && rtx_equal_p (XEXP (op0, 0), op1)
2265 && ! side_effects_p (op1))
2266 return simplify_gen_binary (AND, mode,
2267 simplify_gen_unary (NOT, mode,
2268 XEXP (op0, 1), mode),
2269 op1);
2271 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2272 comparison if STORE_FLAG_VALUE is 1. */
2273 if (STORE_FLAG_VALUE == 1
2274 && trueop1 == const1_rtx
2275 && COMPARISON_P (op0)
2276 && (reversed = reversed_comparison (op0, mode)))
2277 return reversed;
2279 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2280 is (lt foo (const_int 0)), so we can perform the above
2281 simplification if STORE_FLAG_VALUE is 1. */
2283 if (STORE_FLAG_VALUE == 1
2284 && trueop1 == const1_rtx
2285 && GET_CODE (op0) == LSHIFTRT
2286 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2287 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2288 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2290 /* (xor (comparison foo bar) (const_int sign-bit))
2291 when STORE_FLAG_VALUE is the sign bit. */
2292 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2293 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2294 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2295 && trueop1 == const_true_rtx
2296 && COMPARISON_P (op0)
2297 && (reversed = reversed_comparison (op0, mode)))
2298 return reversed;
2300 break;
2302 tem = simplify_associative_operation (code, mode, op0, op1);
2303 if (tem)
2304 return tem;
2305 break;
2307 case AND:
2308 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2309 return trueop1;
2310 /* If we are turning off bits already known off in OP0, we need
2311 not do an AND. */
2312 if (GET_CODE (trueop1) == CONST_INT
2313 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2314 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
2315 return op0;
2316 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2317 && GET_MODE_CLASS (mode) != MODE_CC)
2318 return op0;
2319 /* A & (~A) -> 0 */
2320 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2321 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2322 && ! side_effects_p (op0)
2323 && GET_MODE_CLASS (mode) != MODE_CC)
2324 return CONST0_RTX (mode);
2326 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2327 there are no nonzero bits of C outside of X's mode. */
2328 if ((GET_CODE (op0) == SIGN_EXTEND
2329 || GET_CODE (op0) == ZERO_EXTEND)
2330 && GET_CODE (trueop1) == CONST_INT
2331 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2332 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2333 & INTVAL (trueop1)) == 0)
2335 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2336 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2337 gen_int_mode (INTVAL (trueop1),
2338 imode));
2339 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2342 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2343 if (GET_CODE (op0) == IOR
2344 && GET_CODE (trueop1) == CONST_INT
2345 && GET_CODE (XEXP (op0, 1)) == CONST_INT)
2347 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2348 return simplify_gen_binary (IOR, mode,
2349 simplify_gen_binary (AND, mode,
2350 XEXP (op0, 0), op1),
2351 gen_int_mode (tmp, mode));
2354 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2355 insn (and may simplify more). */
2356 if (GET_CODE (op0) == XOR
2357 && rtx_equal_p (XEXP (op0, 0), op1)
2358 && ! side_effects_p (op1))
2359 return simplify_gen_binary (AND, mode,
2360 simplify_gen_unary (NOT, mode,
2361 XEXP (op0, 1), mode),
2362 op1);
2364 if (GET_CODE (op0) == XOR
2365 && rtx_equal_p (XEXP (op0, 1), op1)
2366 && ! side_effects_p (op1))
2367 return simplify_gen_binary (AND, mode,
2368 simplify_gen_unary (NOT, mode,
2369 XEXP (op0, 0), mode),
2370 op1);
2372 /* Similarly for (~(A ^ B)) & A. */
2373 if (GET_CODE (op0) == NOT
2374 && GET_CODE (XEXP (op0, 0)) == XOR
2375 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2376 && ! side_effects_p (op1))
2377 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2379 if (GET_CODE (op0) == NOT
2380 && GET_CODE (XEXP (op0, 0)) == XOR
2381 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2382 && ! side_effects_p (op1))
2383 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2385 /* Convert (A | B) & A to A. */
2386 if (GET_CODE (op0) == IOR
2387 && (rtx_equal_p (XEXP (op0, 0), op1)
2388 || rtx_equal_p (XEXP (op0, 1), op1))
2389 && ! side_effects_p (XEXP (op0, 0))
2390 && ! side_effects_p (XEXP (op0, 1)))
2391 return op1;
2393 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2394 ((A & N) + B) & M -> (A + B) & M
2395 Similarly if (N & M) == 0,
2396 ((A | N) + B) & M -> (A + B) & M
2397 and for - instead of + and/or ^ instead of |. */
2398 if (GET_CODE (trueop1) == CONST_INT
2399 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2400 && ~INTVAL (trueop1)
2401 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2402 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2404 rtx pmop[2];
2405 int which;
2407 pmop[0] = XEXP (op0, 0);
2408 pmop[1] = XEXP (op0, 1);
2410 for (which = 0; which < 2; which++)
2412 tem = pmop[which];
2413 switch (GET_CODE (tem))
2415 case AND:
2416 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2417 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2418 == INTVAL (trueop1))
2419 pmop[which] = XEXP (tem, 0);
2420 break;
2421 case IOR:
2422 case XOR:
2423 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2424 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2425 pmop[which] = XEXP (tem, 0);
2426 break;
2427 default:
2428 break;
2432 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2434 tem = simplify_gen_binary (GET_CODE (op0), mode,
2435 pmop[0], pmop[1]);
2436 return simplify_gen_binary (code, mode, tem, op1);
2439 tem = simplify_associative_operation (code, mode, op0, op1);
2440 if (tem)
2441 return tem;
2442 break;
2444 case UDIV:
2445 /* 0/x is 0 (or x&0 if x has side-effects). */
2446 if (trueop0 == CONST0_RTX (mode))
2448 if (side_effects_p (op1))
2449 return simplify_gen_binary (AND, mode, op1, trueop0);
2450 return trueop0;
2452 /* x/1 is x. */
2453 if (trueop1 == CONST1_RTX (mode))
2454 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2455 /* Convert divide by power of two into shift. */
2456 if (GET_CODE (trueop1) == CONST_INT
2457 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2458 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2459 break;
2461 case DIV:
2462 /* Handle floating point and integers separately. */
2463 if (SCALAR_FLOAT_MODE_P (mode))
2465 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2466 safe for modes with NaNs, since 0.0 / 0.0 will then be
2467 NaN rather than 0.0. Nor is it safe for modes with signed
2468 zeros, since dividing 0 by a negative number gives -0.0 */
2469 if (trueop0 == CONST0_RTX (mode)
2470 && !HONOR_NANS (mode)
2471 && !HONOR_SIGNED_ZEROS (mode)
2472 && ! side_effects_p (op1))
2473 return op0;
2474 /* x/1.0 is x. */
2475 if (trueop1 == CONST1_RTX (mode)
2476 && !HONOR_SNANS (mode))
2477 return op0;
2479 if (GET_CODE (trueop1) == CONST_DOUBLE
2480 && trueop1 != CONST0_RTX (mode))
2482 REAL_VALUE_TYPE d;
2483 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2485 /* x/-1.0 is -x. */
2486 if (REAL_VALUES_EQUAL (d, dconstm1)
2487 && !HONOR_SNANS (mode))
2488 return simplify_gen_unary (NEG, mode, op0, mode);
2490 /* Change FP division by a constant into multiplication.
2491 Only do this with -funsafe-math-optimizations. */
2492 if (flag_unsafe_math_optimizations
2493 && !REAL_VALUES_EQUAL (d, dconst0))
2495 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2496 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2497 return simplify_gen_binary (MULT, mode, op0, tem);
2501 else
2503 /* 0/x is 0 (or x&0 if x has side-effects). */
2504 if (trueop0 == CONST0_RTX (mode))
2506 if (side_effects_p (op1))
2507 return simplify_gen_binary (AND, mode, op1, trueop0);
2508 return trueop0;
2510 /* x/1 is x. */
2511 if (trueop1 == CONST1_RTX (mode))
2512 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2513 /* x/-1 is -x. */
2514 if (trueop1 == constm1_rtx)
2516 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2517 return simplify_gen_unary (NEG, mode, x, mode);
2520 break;
2522 case UMOD:
2523 /* 0%x is 0 (or x&0 if x has side-effects). */
2524 if (trueop0 == CONST0_RTX (mode))
2526 if (side_effects_p (op1))
2527 return simplify_gen_binary (AND, mode, op1, trueop0);
2528 return trueop0;
2530 /* x%1 is 0 (of x&0 if x has side-effects). */
2531 if (trueop1 == CONST1_RTX (mode))
2533 if (side_effects_p (op0))
2534 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2535 return CONST0_RTX (mode);
2537 /* Implement modulus by power of two as AND. */
2538 if (GET_CODE (trueop1) == CONST_INT
2539 && exact_log2 (INTVAL (trueop1)) > 0)
2540 return simplify_gen_binary (AND, mode, op0,
2541 GEN_INT (INTVAL (op1) - 1));
2542 break;
2544 case MOD:
2545 /* 0%x is 0 (or x&0 if x has side-effects). */
2546 if (trueop0 == CONST0_RTX (mode))
2548 if (side_effects_p (op1))
2549 return simplify_gen_binary (AND, mode, op1, trueop0);
2550 return trueop0;
2552 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2553 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2555 if (side_effects_p (op0))
2556 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2557 return CONST0_RTX (mode);
2559 break;
2561 case ROTATERT:
2562 case ROTATE:
2563 case ASHIFTRT:
2564 if (trueop1 == CONST0_RTX (mode))
2565 return op0;
2566 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2567 return op0;
2568 /* Rotating ~0 always results in ~0. */
2569 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2570 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2571 && ! side_effects_p (op1))
2572 return op0;
2573 break;
2575 case ASHIFT:
2576 case SS_ASHIFT:
2577 if (trueop1 == CONST0_RTX (mode))
2578 return op0;
2579 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2580 return op0;
2581 break;
2583 case LSHIFTRT:
2584 if (trueop1 == CONST0_RTX (mode))
2585 return op0;
2586 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2587 return op0;
2588 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2589 if (GET_CODE (op0) == CLZ
2590 && GET_CODE (trueop1) == CONST_INT
2591 && STORE_FLAG_VALUE == 1
2592 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2594 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2595 unsigned HOST_WIDE_INT zero_val = 0;
2597 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2598 && zero_val == GET_MODE_BITSIZE (imode)
2599 && INTVAL (trueop1) == exact_log2 (zero_val))
2600 return simplify_gen_relational (EQ, mode, imode,
2601 XEXP (op0, 0), const0_rtx);
2603 break;
2605 case SMIN:
2606 if (width <= HOST_BITS_PER_WIDE_INT
2607 && GET_CODE (trueop1) == CONST_INT
2608 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2609 && ! side_effects_p (op0))
2610 return op1;
2611 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2612 return op0;
2613 tem = simplify_associative_operation (code, mode, op0, op1);
2614 if (tem)
2615 return tem;
2616 break;
2618 case SMAX:
2619 if (width <= HOST_BITS_PER_WIDE_INT
2620 && GET_CODE (trueop1) == CONST_INT
2621 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2622 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2623 && ! side_effects_p (op0))
2624 return op1;
2625 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2626 return op0;
2627 tem = simplify_associative_operation (code, mode, op0, op1);
2628 if (tem)
2629 return tem;
2630 break;
2632 case UMIN:
2633 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2634 return op1;
2635 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2636 return op0;
2637 tem = simplify_associative_operation (code, mode, op0, op1);
2638 if (tem)
2639 return tem;
2640 break;
2642 case UMAX:
2643 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2644 return op1;
2645 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2646 return op0;
2647 tem = simplify_associative_operation (code, mode, op0, op1);
2648 if (tem)
2649 return tem;
2650 break;
2652 case SS_PLUS:
2653 case US_PLUS:
2654 case SS_MINUS:
2655 case US_MINUS:
2656 /* ??? There are simplifications that can be done. */
2657 return 0;
2659 case VEC_SELECT:
2660 if (!VECTOR_MODE_P (mode))
2662 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2663 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2664 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2665 gcc_assert (XVECLEN (trueop1, 0) == 1);
2666 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2668 if (GET_CODE (trueop0) == CONST_VECTOR)
2669 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2670 (trueop1, 0, 0)));
2672 else
2674 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2675 gcc_assert (GET_MODE_INNER (mode)
2676 == GET_MODE_INNER (GET_MODE (trueop0)));
2677 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2679 if (GET_CODE (trueop0) == CONST_VECTOR)
2681 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2682 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2683 rtvec v = rtvec_alloc (n_elts);
2684 unsigned int i;
2686 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2687 for (i = 0; i < n_elts; i++)
2689 rtx x = XVECEXP (trueop1, 0, i);
2691 gcc_assert (GET_CODE (x) == CONST_INT);
2692 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2693 INTVAL (x));
2696 return gen_rtx_CONST_VECTOR (mode, v);
2700 if (XVECLEN (trueop1, 0) == 1
2701 && GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT
2702 && GET_CODE (trueop0) == VEC_CONCAT)
2704 rtx vec = trueop0;
2705 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2707 /* Try to find the element in the VEC_CONCAT. */
2708 while (GET_MODE (vec) != mode
2709 && GET_CODE (vec) == VEC_CONCAT)
2711 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2712 if (offset < vec_size)
2713 vec = XEXP (vec, 0);
2714 else
2716 offset -= vec_size;
2717 vec = XEXP (vec, 1);
2719 vec = avoid_constant_pool_reference (vec);
2722 if (GET_MODE (vec) == mode)
2723 return vec;
2726 return 0;
2727 case VEC_CONCAT:
2729 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2730 ? GET_MODE (trueop0)
2731 : GET_MODE_INNER (mode));
2732 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2733 ? GET_MODE (trueop1)
2734 : GET_MODE_INNER (mode));
2736 gcc_assert (VECTOR_MODE_P (mode));
2737 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2738 == GET_MODE_SIZE (mode));
2740 if (VECTOR_MODE_P (op0_mode))
2741 gcc_assert (GET_MODE_INNER (mode)
2742 == GET_MODE_INNER (op0_mode));
2743 else
2744 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2746 if (VECTOR_MODE_P (op1_mode))
2747 gcc_assert (GET_MODE_INNER (mode)
2748 == GET_MODE_INNER (op1_mode));
2749 else
2750 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2752 if ((GET_CODE (trueop0) == CONST_VECTOR
2753 || GET_CODE (trueop0) == CONST_INT
2754 || GET_CODE (trueop0) == CONST_DOUBLE)
2755 && (GET_CODE (trueop1) == CONST_VECTOR
2756 || GET_CODE (trueop1) == CONST_INT
2757 || GET_CODE (trueop1) == CONST_DOUBLE))
2759 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2760 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2761 rtvec v = rtvec_alloc (n_elts);
2762 unsigned int i;
2763 unsigned in_n_elts = 1;
2765 if (VECTOR_MODE_P (op0_mode))
2766 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2767 for (i = 0; i < n_elts; i++)
2769 if (i < in_n_elts)
2771 if (!VECTOR_MODE_P (op0_mode))
2772 RTVEC_ELT (v, i) = trueop0;
2773 else
2774 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2776 else
2778 if (!VECTOR_MODE_P (op1_mode))
2779 RTVEC_ELT (v, i) = trueop1;
2780 else
2781 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2782 i - in_n_elts);
2786 return gen_rtx_CONST_VECTOR (mode, v);
2789 return 0;
2791 default:
2792 gcc_unreachable ();
2795 return 0;
2799 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
2800 rtx op0, rtx op1)
2802 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
2803 HOST_WIDE_INT val;
2804 unsigned int width = GET_MODE_BITSIZE (mode);
2806 if (VECTOR_MODE_P (mode)
2807 && code != VEC_CONCAT
2808 && GET_CODE (op0) == CONST_VECTOR
2809 && GET_CODE (op1) == CONST_VECTOR)
2811 unsigned n_elts = GET_MODE_NUNITS (mode);
2812 enum machine_mode op0mode = GET_MODE (op0);
2813 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2814 enum machine_mode op1mode = GET_MODE (op1);
2815 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2816 rtvec v = rtvec_alloc (n_elts);
2817 unsigned int i;
2819 gcc_assert (op0_n_elts == n_elts);
2820 gcc_assert (op1_n_elts == n_elts);
2821 for (i = 0; i < n_elts; i++)
2823 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2824 CONST_VECTOR_ELT (op0, i),
2825 CONST_VECTOR_ELT (op1, i));
2826 if (!x)
2827 return 0;
2828 RTVEC_ELT (v, i) = x;
2831 return gen_rtx_CONST_VECTOR (mode, v);
2834 if (VECTOR_MODE_P (mode)
2835 && code == VEC_CONCAT
2836 && CONSTANT_P (op0) && CONSTANT_P (op1))
2838 unsigned n_elts = GET_MODE_NUNITS (mode);
2839 rtvec v = rtvec_alloc (n_elts);
2841 gcc_assert (n_elts >= 2);
2842 if (n_elts == 2)
2844 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2845 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2847 RTVEC_ELT (v, 0) = op0;
2848 RTVEC_ELT (v, 1) = op1;
2850 else
2852 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2853 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2854 unsigned i;
2856 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2857 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2858 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2860 for (i = 0; i < op0_n_elts; ++i)
2861 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2862 for (i = 0; i < op1_n_elts; ++i)
2863 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2866 return gen_rtx_CONST_VECTOR (mode, v);
2869 if (SCALAR_FLOAT_MODE_P (mode)
2870 && GET_CODE (op0) == CONST_DOUBLE
2871 && GET_CODE (op1) == CONST_DOUBLE
2872 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2874 if (code == AND
2875 || code == IOR
2876 || code == XOR)
2878 long tmp0[4];
2879 long tmp1[4];
2880 REAL_VALUE_TYPE r;
2881 int i;
2883 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
2884 GET_MODE (op0));
2885 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
2886 GET_MODE (op1));
2887 for (i = 0; i < 4; i++)
2889 switch (code)
2891 case AND:
2892 tmp0[i] &= tmp1[i];
2893 break;
2894 case IOR:
2895 tmp0[i] |= tmp1[i];
2896 break;
2897 case XOR:
2898 tmp0[i] ^= tmp1[i];
2899 break;
2900 default:
2901 gcc_unreachable ();
2904 real_from_target (&r, tmp0, mode);
2905 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
2907 else
2909 REAL_VALUE_TYPE f0, f1, value, result;
2910 bool inexact;
2912 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
2913 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
2914 real_convert (&f0, mode, &f0);
2915 real_convert (&f1, mode, &f1);
2917 if (HONOR_SNANS (mode)
2918 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
2919 return 0;
2921 if (code == DIV
2922 && REAL_VALUES_EQUAL (f1, dconst0)
2923 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
2924 return 0;
2926 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2927 && flag_trapping_math
2928 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
2930 int s0 = REAL_VALUE_NEGATIVE (f0);
2931 int s1 = REAL_VALUE_NEGATIVE (f1);
2933 switch (code)
2935 case PLUS:
2936 /* Inf + -Inf = NaN plus exception. */
2937 if (s0 != s1)
2938 return 0;
2939 break;
2940 case MINUS:
2941 /* Inf - Inf = NaN plus exception. */
2942 if (s0 == s1)
2943 return 0;
2944 break;
2945 case DIV:
2946 /* Inf / Inf = NaN plus exception. */
2947 return 0;
2948 default:
2949 break;
2953 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2954 && flag_trapping_math
2955 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
2956 || (REAL_VALUE_ISINF (f1)
2957 && REAL_VALUES_EQUAL (f0, dconst0))))
2958 /* Inf * 0 = NaN plus exception. */
2959 return 0;
2961 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
2962 &f0, &f1);
2963 real_convert (&result, mode, &value);
2965 /* Don't constant fold this floating point operation if
2966 the result has overflowed and flag_trapping_math. */
2968 if (flag_trapping_math
2969 && MODE_HAS_INFINITIES (mode)
2970 && REAL_VALUE_ISINF (result)
2971 && !REAL_VALUE_ISINF (f0)
2972 && !REAL_VALUE_ISINF (f1))
2973 /* Overflow plus exception. */
2974 return 0;
2976 /* Don't constant fold this floating point operation if the
2977 result may dependent upon the run-time rounding mode and
2978 flag_rounding_math is set, or if GCC's software emulation
2979 is unable to accurately represent the result. */
2981 if ((flag_rounding_math
2982 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
2983 && !flag_unsafe_math_optimizations))
2984 && (inexact || !real_identical (&result, &value)))
2985 return NULL_RTX;
2987 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
2991 /* We can fold some multi-word operations. */
2992 if (GET_MODE_CLASS (mode) == MODE_INT
2993 && width == HOST_BITS_PER_WIDE_INT * 2
2994 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
2995 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
2997 unsigned HOST_WIDE_INT l1, l2, lv, lt;
2998 HOST_WIDE_INT h1, h2, hv, ht;
3000 if (GET_CODE (op0) == CONST_DOUBLE)
3001 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
3002 else
3003 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
3005 if (GET_CODE (op1) == CONST_DOUBLE)
3006 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
3007 else
3008 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
3010 switch (code)
3012 case MINUS:
3013 /* A - B == A + (-B). */
3014 neg_double (l2, h2, &lv, &hv);
3015 l2 = lv, h2 = hv;
3017 /* Fall through.... */
3019 case PLUS:
3020 add_double (l1, h1, l2, h2, &lv, &hv);
3021 break;
3023 case MULT:
3024 mul_double (l1, h1, l2, h2, &lv, &hv);
3025 break;
3027 case DIV:
3028 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3029 &lv, &hv, &lt, &ht))
3030 return 0;
3031 break;
3033 case MOD:
3034 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3035 &lt, &ht, &lv, &hv))
3036 return 0;
3037 break;
3039 case UDIV:
3040 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3041 &lv, &hv, &lt, &ht))
3042 return 0;
3043 break;
3045 case UMOD:
3046 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3047 &lt, &ht, &lv, &hv))
3048 return 0;
3049 break;
3051 case AND:
3052 lv = l1 & l2, hv = h1 & h2;
3053 break;
3055 case IOR:
3056 lv = l1 | l2, hv = h1 | h2;
3057 break;
3059 case XOR:
3060 lv = l1 ^ l2, hv = h1 ^ h2;
3061 break;
3063 case SMIN:
3064 if (h1 < h2
3065 || (h1 == h2
3066 && ((unsigned HOST_WIDE_INT) l1
3067 < (unsigned HOST_WIDE_INT) l2)))
3068 lv = l1, hv = h1;
3069 else
3070 lv = l2, hv = h2;
3071 break;
3073 case SMAX:
3074 if (h1 > h2
3075 || (h1 == h2
3076 && ((unsigned HOST_WIDE_INT) l1
3077 > (unsigned HOST_WIDE_INT) l2)))
3078 lv = l1, hv = h1;
3079 else
3080 lv = l2, hv = h2;
3081 break;
3083 case UMIN:
3084 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
3085 || (h1 == h2
3086 && ((unsigned HOST_WIDE_INT) l1
3087 < (unsigned HOST_WIDE_INT) l2)))
3088 lv = l1, hv = h1;
3089 else
3090 lv = l2, hv = h2;
3091 break;
3093 case UMAX:
3094 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
3095 || (h1 == h2
3096 && ((unsigned HOST_WIDE_INT) l1
3097 > (unsigned HOST_WIDE_INT) l2)))
3098 lv = l1, hv = h1;
3099 else
3100 lv = l2, hv = h2;
3101 break;
3103 case LSHIFTRT: case ASHIFTRT:
3104 case ASHIFT:
3105 case ROTATE: case ROTATERT:
3106 if (SHIFT_COUNT_TRUNCATED)
3107 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
3109 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
3110 return 0;
3112 if (code == LSHIFTRT || code == ASHIFTRT)
3113 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
3114 code == ASHIFTRT);
3115 else if (code == ASHIFT)
3116 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
3117 else if (code == ROTATE)
3118 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3119 else /* code == ROTATERT */
3120 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3121 break;
3123 default:
3124 return 0;
3127 return immed_double_const (lv, hv, mode);
3130 if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
3131 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3133 /* Get the integer argument values in two forms:
3134 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3136 arg0 = INTVAL (op0);
3137 arg1 = INTVAL (op1);
3139 if (width < HOST_BITS_PER_WIDE_INT)
3141 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
3142 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
3144 arg0s = arg0;
3145 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3146 arg0s |= ((HOST_WIDE_INT) (-1) << width);
3148 arg1s = arg1;
3149 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3150 arg1s |= ((HOST_WIDE_INT) (-1) << width);
3152 else
3154 arg0s = arg0;
3155 arg1s = arg1;
3158 /* Compute the value of the arithmetic. */
3160 switch (code)
3162 case PLUS:
3163 val = arg0s + arg1s;
3164 break;
3166 case MINUS:
3167 val = arg0s - arg1s;
3168 break;
3170 case MULT:
3171 val = arg0s * arg1s;
3172 break;
3174 case DIV:
3175 if (arg1s == 0
3176 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3177 && arg1s == -1))
3178 return 0;
3179 val = arg0s / arg1s;
3180 break;
3182 case MOD:
3183 if (arg1s == 0
3184 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3185 && arg1s == -1))
3186 return 0;
3187 val = arg0s % arg1s;
3188 break;
3190 case UDIV:
3191 if (arg1 == 0
3192 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3193 && arg1s == -1))
3194 return 0;
3195 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3196 break;
3198 case UMOD:
3199 if (arg1 == 0
3200 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3201 && arg1s == -1))
3202 return 0;
3203 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3204 break;
3206 case AND:
3207 val = arg0 & arg1;
3208 break;
3210 case IOR:
3211 val = arg0 | arg1;
3212 break;
3214 case XOR:
3215 val = arg0 ^ arg1;
3216 break;
3218 case LSHIFTRT:
3219 case ASHIFT:
3220 case ASHIFTRT:
3221 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3222 the value is in range. We can't return any old value for
3223 out-of-range arguments because either the middle-end (via
3224 shift_truncation_mask) or the back-end might be relying on
3225 target-specific knowledge. Nor can we rely on
3226 shift_truncation_mask, since the shift might not be part of an
3227 ashlM3, lshrM3 or ashrM3 instruction. */
3228 if (SHIFT_COUNT_TRUNCATED)
3229 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3230 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3231 return 0;
3233 val = (code == ASHIFT
3234 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3235 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3237 /* Sign-extend the result for arithmetic right shifts. */
3238 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3239 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3240 break;
3242 case ROTATERT:
3243 if (arg1 < 0)
3244 return 0;
3246 arg1 %= width;
3247 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3248 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3249 break;
3251 case ROTATE:
3252 if (arg1 < 0)
3253 return 0;
3255 arg1 %= width;
3256 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3257 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3258 break;
3260 case COMPARE:
3261 /* Do nothing here. */
3262 return 0;
3264 case SMIN:
3265 val = arg0s <= arg1s ? arg0s : arg1s;
3266 break;
3268 case UMIN:
3269 val = ((unsigned HOST_WIDE_INT) arg0
3270 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3271 break;
3273 case SMAX:
3274 val = arg0s > arg1s ? arg0s : arg1s;
3275 break;
3277 case UMAX:
3278 val = ((unsigned HOST_WIDE_INT) arg0
3279 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3280 break;
3282 case SS_PLUS:
3283 case US_PLUS:
3284 case SS_MINUS:
3285 case US_MINUS:
3286 case SS_ASHIFT:
3287 /* ??? There are simplifications that can be done. */
3288 return 0;
3290 default:
3291 gcc_unreachable ();
3294 return gen_int_mode (val, mode);
3297 return NULL_RTX;
3302 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3303 PLUS or MINUS.
3305 Rather than test for specific case, we do this by a brute-force method
3306 and do all possible simplifications until no more changes occur. Then
3307 we rebuild the operation. */
3309 struct simplify_plus_minus_op_data
3311 rtx op;
3312 short neg;
3315 static int
3316 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
3318 const struct simplify_plus_minus_op_data *d1 = p1;
3319 const struct simplify_plus_minus_op_data *d2 = p2;
3320 int result;
3322 result = (commutative_operand_precedence (d2->op)
3323 - commutative_operand_precedence (d1->op));
3324 if (result)
3325 return result;
3327 /* Group together equal REGs to do more simplification. */
3328 if (REG_P (d1->op) && REG_P (d2->op))
3329 return REGNO (d1->op) - REGNO (d2->op);
3330 else
3331 return 0;
3334 static rtx
3335 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3336 rtx op1)
3338 struct simplify_plus_minus_op_data ops[8];
3339 rtx result, tem;
3340 int n_ops = 2, input_ops = 2;
3341 int changed, n_constants = 0, canonicalized = 0;
3342 int i, j;
3344 memset (ops, 0, sizeof ops);
3346 /* Set up the two operands and then expand them until nothing has been
3347 changed. If we run out of room in our array, give up; this should
3348 almost never happen. */
3350 ops[0].op = op0;
3351 ops[0].neg = 0;
3352 ops[1].op = op1;
3353 ops[1].neg = (code == MINUS);
3357 changed = 0;
3359 for (i = 0; i < n_ops; i++)
3361 rtx this_op = ops[i].op;
3362 int this_neg = ops[i].neg;
3363 enum rtx_code this_code = GET_CODE (this_op);
3365 switch (this_code)
3367 case PLUS:
3368 case MINUS:
3369 if (n_ops == 7)
3370 return NULL_RTX;
3372 ops[n_ops].op = XEXP (this_op, 1);
3373 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3374 n_ops++;
3376 ops[i].op = XEXP (this_op, 0);
3377 input_ops++;
3378 changed = 1;
3379 canonicalized |= this_neg;
3380 break;
3382 case NEG:
3383 ops[i].op = XEXP (this_op, 0);
3384 ops[i].neg = ! this_neg;
3385 changed = 1;
3386 canonicalized = 1;
3387 break;
3389 case CONST:
3390 if (n_ops < 7
3391 && GET_CODE (XEXP (this_op, 0)) == PLUS
3392 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3393 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3395 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3396 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3397 ops[n_ops].neg = this_neg;
3398 n_ops++;
3399 changed = 1;
3400 canonicalized = 1;
3402 break;
3404 case NOT:
3405 /* ~a -> (-a - 1) */
3406 if (n_ops != 7)
3408 ops[n_ops].op = constm1_rtx;
3409 ops[n_ops++].neg = this_neg;
3410 ops[i].op = XEXP (this_op, 0);
3411 ops[i].neg = !this_neg;
3412 changed = 1;
3413 canonicalized = 1;
3415 break;
3417 case CONST_INT:
3418 n_constants++;
3419 if (this_neg)
3421 ops[i].op = neg_const_int (mode, this_op);
3422 ops[i].neg = 0;
3423 changed = 1;
3424 canonicalized = 1;
3426 break;
3428 default:
3429 break;
3433 while (changed);
3435 if (n_constants > 1)
3436 canonicalized = 1;
3438 gcc_assert (n_ops >= 2);
3440 /* If we only have two operands, we can avoid the loops. */
3441 if (n_ops == 2)
3443 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3444 rtx lhs, rhs;
3446 /* Get the two operands. Be careful with the order, especially for
3447 the cases where code == MINUS. */
3448 if (ops[0].neg && ops[1].neg)
3450 lhs = gen_rtx_NEG (mode, ops[0].op);
3451 rhs = ops[1].op;
3453 else if (ops[0].neg)
3455 lhs = ops[1].op;
3456 rhs = ops[0].op;
3458 else
3460 lhs = ops[0].op;
3461 rhs = ops[1].op;
3464 return simplify_const_binary_operation (code, mode, lhs, rhs);
3467 /* Now simplify each pair of operands until nothing changes. */
3470 /* Insertion sort is good enough for an eight-element array. */
3471 for (i = 1; i < n_ops; i++)
3473 struct simplify_plus_minus_op_data save;
3474 j = i - 1;
3475 if (simplify_plus_minus_op_data_cmp (&ops[j], &ops[i]) < 0)
3476 continue;
3478 canonicalized = 1;
3479 save = ops[i];
3481 ops[j + 1] = ops[j];
3482 while (j-- && simplify_plus_minus_op_data_cmp (&ops[j], &save) > 0);
3483 ops[j + 1] = save;
3486 /* This is only useful the first time through. */
3487 if (!canonicalized)
3488 return NULL_RTX;
3490 changed = 0;
3491 for (i = n_ops - 1; i > 0; i--)
3492 for (j = i - 1; j >= 0; j--)
3494 rtx lhs = ops[j].op, rhs = ops[i].op;
3495 int lneg = ops[j].neg, rneg = ops[i].neg;
3497 if (lhs != 0 && rhs != 0)
3499 enum rtx_code ncode = PLUS;
3501 if (lneg != rneg)
3503 ncode = MINUS;
3504 if (lneg)
3505 tem = lhs, lhs = rhs, rhs = tem;
3507 else if (swap_commutative_operands_p (lhs, rhs))
3508 tem = lhs, lhs = rhs, rhs = tem;
3510 if ((GET_CODE (lhs) == CONST || GET_CODE (lhs) == CONST_INT)
3511 && (GET_CODE (rhs) == CONST || GET_CODE (rhs) == CONST_INT))
3513 rtx tem_lhs, tem_rhs;
3515 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3516 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3517 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3519 if (tem && !CONSTANT_P (tem))
3520 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3522 else
3523 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3525 /* Reject "simplifications" that just wrap the two
3526 arguments in a CONST. Failure to do so can result
3527 in infinite recursion with simplify_binary_operation
3528 when it calls us to simplify CONST operations. */
3529 if (tem
3530 && ! (GET_CODE (tem) == CONST
3531 && GET_CODE (XEXP (tem, 0)) == ncode
3532 && XEXP (XEXP (tem, 0), 0) == lhs
3533 && XEXP (XEXP (tem, 0), 1) == rhs))
3535 lneg &= rneg;
3536 if (GET_CODE (tem) == NEG)
3537 tem = XEXP (tem, 0), lneg = !lneg;
3538 if (GET_CODE (tem) == CONST_INT && lneg)
3539 tem = neg_const_int (mode, tem), lneg = 0;
3541 ops[i].op = tem;
3542 ops[i].neg = lneg;
3543 ops[j].op = NULL_RTX;
3544 changed = 1;
3549 /* Pack all the operands to the lower-numbered entries. */
3550 for (i = 0, j = 0; j < n_ops; j++)
3551 if (ops[j].op)
3553 ops[i] = ops[j];
3554 i++;
3556 n_ops = i;
3558 while (changed);
3560 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3561 if (n_ops == 2
3562 && GET_CODE (ops[1].op) == CONST_INT
3563 && CONSTANT_P (ops[0].op)
3564 && ops[0].neg)
3565 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3567 /* We suppressed creation of trivial CONST expressions in the
3568 combination loop to avoid recursion. Create one manually now.
3569 The combination loop should have ensured that there is exactly
3570 one CONST_INT, and the sort will have ensured that it is last
3571 in the array and that any other constant will be next-to-last. */
3573 if (n_ops > 1
3574 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
3575 && CONSTANT_P (ops[n_ops - 2].op))
3577 rtx value = ops[n_ops - 1].op;
3578 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3579 value = neg_const_int (mode, value);
3580 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3581 n_ops--;
3584 /* Put a non-negated operand first, if possible. */
3586 for (i = 0; i < n_ops && ops[i].neg; i++)
3587 continue;
3588 if (i == n_ops)
3589 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3590 else if (i != 0)
3592 tem = ops[0].op;
3593 ops[0] = ops[i];
3594 ops[i].op = tem;
3595 ops[i].neg = 1;
3598 /* Now make the result by performing the requested operations. */
3599 result = ops[0].op;
3600 for (i = 1; i < n_ops; i++)
3601 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3602 mode, result, ops[i].op);
3604 return result;
3607 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3608 static bool
3609 plus_minus_operand_p (rtx x)
3611 return GET_CODE (x) == PLUS
3612 || GET_CODE (x) == MINUS
3613 || (GET_CODE (x) == CONST
3614 && GET_CODE (XEXP (x, 0)) == PLUS
3615 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3616 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3619 /* Like simplify_binary_operation except used for relational operators.
3620 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3621 not also be VOIDmode.
3623 CMP_MODE specifies in which mode the comparison is done in, so it is
3624 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3625 the operands or, if both are VOIDmode, the operands are compared in
3626 "infinite precision". */
3628 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3629 enum machine_mode cmp_mode, rtx op0, rtx op1)
3631 rtx tem, trueop0, trueop1;
3633 if (cmp_mode == VOIDmode)
3634 cmp_mode = GET_MODE (op0);
3635 if (cmp_mode == VOIDmode)
3636 cmp_mode = GET_MODE (op1);
3638 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3639 if (tem)
3641 if (SCALAR_FLOAT_MODE_P (mode))
3643 if (tem == const0_rtx)
3644 return CONST0_RTX (mode);
3645 #ifdef FLOAT_STORE_FLAG_VALUE
3647 REAL_VALUE_TYPE val;
3648 val = FLOAT_STORE_FLAG_VALUE (mode);
3649 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3651 #else
3652 return NULL_RTX;
3653 #endif
3655 if (VECTOR_MODE_P (mode))
3657 if (tem == const0_rtx)
3658 return CONST0_RTX (mode);
3659 #ifdef VECTOR_STORE_FLAG_VALUE
3661 int i, units;
3662 rtvec v;
3664 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3665 if (val == NULL_RTX)
3666 return NULL_RTX;
3667 if (val == const1_rtx)
3668 return CONST1_RTX (mode);
3670 units = GET_MODE_NUNITS (mode);
3671 v = rtvec_alloc (units);
3672 for (i = 0; i < units; i++)
3673 RTVEC_ELT (v, i) = val;
3674 return gen_rtx_raw_CONST_VECTOR (mode, v);
3676 #else
3677 return NULL_RTX;
3678 #endif
3681 return tem;
3684 /* For the following tests, ensure const0_rtx is op1. */
3685 if (swap_commutative_operands_p (op0, op1)
3686 || (op0 == const0_rtx && op1 != const0_rtx))
3687 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3689 /* If op0 is a compare, extract the comparison arguments from it. */
3690 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3691 return simplify_relational_operation (code, mode, VOIDmode,
3692 XEXP (op0, 0), XEXP (op0, 1));
3694 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
3695 || CC0_P (op0))
3696 return NULL_RTX;
3698 trueop0 = avoid_constant_pool_reference (op0);
3699 trueop1 = avoid_constant_pool_reference (op1);
3700 return simplify_relational_operation_1 (code, mode, cmp_mode,
3701 trueop0, trueop1);
3704 /* This part of simplify_relational_operation is only used when CMP_MODE
3705 is not in class MODE_CC (i.e. it is a real comparison).
3707 MODE is the mode of the result, while CMP_MODE specifies in which
3708 mode the comparison is done in, so it is the mode of the operands. */
3710 static rtx
3711 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
3712 enum machine_mode cmp_mode, rtx op0, rtx op1)
3714 enum rtx_code op0code = GET_CODE (op0);
3716 if (op1 == const0_rtx && COMPARISON_P (op0))
3718 /* If op0 is a comparison, extract the comparison arguments
3719 from it. */
3720 if (code == NE)
3722 if (GET_MODE (op0) == mode)
3723 return simplify_rtx (op0);
3724 else
3725 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
3726 XEXP (op0, 0), XEXP (op0, 1));
3728 else if (code == EQ)
3730 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
3731 if (new_code != UNKNOWN)
3732 return simplify_gen_relational (new_code, mode, VOIDmode,
3733 XEXP (op0, 0), XEXP (op0, 1));
3737 if (op1 == const0_rtx)
3739 /* Canonicalize (GTU x 0) as (NE x 0). */
3740 if (code == GTU)
3741 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
3742 /* Canonicalize (LEU x 0) as (EQ x 0). */
3743 if (code == LEU)
3744 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
3746 else if (op1 == const1_rtx)
3748 switch (code)
3750 case GE:
3751 /* Canonicalize (GE x 1) as (GT x 0). */
3752 return simplify_gen_relational (GT, mode, cmp_mode,
3753 op0, const0_rtx);
3754 case GEU:
3755 /* Canonicalize (GEU x 1) as (NE x 0). */
3756 return simplify_gen_relational (NE, mode, cmp_mode,
3757 op0, const0_rtx);
3758 case LT:
3759 /* Canonicalize (LT x 1) as (LE x 0). */
3760 return simplify_gen_relational (LE, mode, cmp_mode,
3761 op0, const0_rtx);
3762 case LTU:
3763 /* Canonicalize (LTU x 1) as (EQ x 0). */
3764 return simplify_gen_relational (EQ, mode, cmp_mode,
3765 op0, const0_rtx);
3766 default:
3767 break;
3770 else if (op1 == constm1_rtx)
3772 /* Canonicalize (LE x -1) as (LT x 0). */
3773 if (code == LE)
3774 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
3775 /* Canonicalize (GT x -1) as (GE x 0). */
3776 if (code == GT)
3777 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
3780 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
3781 if ((code == EQ || code == NE)
3782 && (op0code == PLUS || op0code == MINUS)
3783 && CONSTANT_P (op1)
3784 && CONSTANT_P (XEXP (op0, 1))
3785 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
3787 rtx x = XEXP (op0, 0);
3788 rtx c = XEXP (op0, 1);
3790 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
3791 cmp_mode, op1, c);
3792 return simplify_gen_relational (code, mode, cmp_mode, x, c);
3795 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3796 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3797 if (code == NE
3798 && op1 == const0_rtx
3799 && GET_MODE_CLASS (mode) == MODE_INT
3800 && cmp_mode != VOIDmode
3801 /* ??? Work-around BImode bugs in the ia64 backend. */
3802 && mode != BImode
3803 && cmp_mode != BImode
3804 && nonzero_bits (op0, cmp_mode) == 1
3805 && STORE_FLAG_VALUE == 1)
3806 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
3807 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
3808 : lowpart_subreg (mode, op0, cmp_mode);
3810 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
3811 if ((code == EQ || code == NE)
3812 && op1 == const0_rtx
3813 && op0code == XOR)
3814 return simplify_gen_relational (code, mode, cmp_mode,
3815 XEXP (op0, 0), XEXP (op0, 1));
3817 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
3818 if ((code == EQ || code == NE)
3819 && op0code == XOR
3820 && rtx_equal_p (XEXP (op0, 0), op1)
3821 && !side_effects_p (XEXP (op0, 0)))
3822 return simplify_gen_relational (code, mode, cmp_mode,
3823 XEXP (op0, 1), const0_rtx);
3825 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
3826 if ((code == EQ || code == NE)
3827 && op0code == XOR
3828 && rtx_equal_p (XEXP (op0, 1), op1)
3829 && !side_effects_p (XEXP (op0, 1)))
3830 return simplify_gen_relational (code, mode, cmp_mode,
3831 XEXP (op0, 0), const0_rtx);
3833 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
3834 if ((code == EQ || code == NE)
3835 && op0code == XOR
3836 && (GET_CODE (op1) == CONST_INT
3837 || GET_CODE (op1) == CONST_DOUBLE)
3838 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
3839 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
3840 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
3841 simplify_gen_binary (XOR, cmp_mode,
3842 XEXP (op0, 1), op1));
3844 if (op0code == POPCOUNT && op1 == const0_rtx)
3845 switch (code)
3847 case EQ:
3848 case LE:
3849 case LEU:
3850 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
3851 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
3852 XEXP (op0, 0), const0_rtx);
3854 case NE:
3855 case GT:
3856 case GTU:
3857 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
3858 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
3859 XEXP (op0, 0), const0_rtx);
3861 default:
3862 break;
3865 return NULL_RTX;
3868 /* Check if the given comparison (done in the given MODE) is actually a
3869 tautology or a contradiction.
3870 If no simplification is possible, this function returns zero.
3871 Otherwise, it returns either const_true_rtx or const0_rtx. */
3874 simplify_const_relational_operation (enum rtx_code code,
3875 enum machine_mode mode,
3876 rtx op0, rtx op1)
3878 int equal, op0lt, op0ltu, op1lt, op1ltu;
3879 rtx tem;
3880 rtx trueop0;
3881 rtx trueop1;
3883 gcc_assert (mode != VOIDmode
3884 || (GET_MODE (op0) == VOIDmode
3885 && GET_MODE (op1) == VOIDmode));
3887 /* If op0 is a compare, extract the comparison arguments from it. */
3888 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3890 op1 = XEXP (op0, 1);
3891 op0 = XEXP (op0, 0);
3893 if (GET_MODE (op0) != VOIDmode)
3894 mode = GET_MODE (op0);
3895 else if (GET_MODE (op1) != VOIDmode)
3896 mode = GET_MODE (op1);
3897 else
3898 return 0;
3901 /* We can't simplify MODE_CC values since we don't know what the
3902 actual comparison is. */
3903 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
3904 return 0;
3906 /* Make sure the constant is second. */
3907 if (swap_commutative_operands_p (op0, op1))
3909 tem = op0, op0 = op1, op1 = tem;
3910 code = swap_condition (code);
3913 trueop0 = avoid_constant_pool_reference (op0);
3914 trueop1 = avoid_constant_pool_reference (op1);
3916 /* For integer comparisons of A and B maybe we can simplify A - B and can
3917 then simplify a comparison of that with zero. If A and B are both either
3918 a register or a CONST_INT, this can't help; testing for these cases will
3919 prevent infinite recursion here and speed things up.
3921 We can only do this for EQ and NE comparisons as otherwise we may
3922 lose or introduce overflow which we cannot disregard as undefined as
3923 we do not know the signedness of the operation on either the left or
3924 the right hand side of the comparison. */
3926 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
3927 && (code == EQ || code == NE)
3928 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
3929 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
3930 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
3931 /* We cannot do this if tem is a nonzero address. */
3932 && ! nonzero_address_p (tem))
3933 return simplify_const_relational_operation (signed_condition (code),
3934 mode, tem, const0_rtx);
3936 if (! HONOR_NANS (mode) && code == ORDERED)
3937 return const_true_rtx;
3939 if (! HONOR_NANS (mode) && code == UNORDERED)
3940 return const0_rtx;
3942 /* For modes without NaNs, if the two operands are equal, we know the
3943 result except if they have side-effects. */
3944 if (! HONOR_NANS (GET_MODE (trueop0))
3945 && rtx_equal_p (trueop0, trueop1)
3946 && ! side_effects_p (trueop0))
3947 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
3949 /* If the operands are floating-point constants, see if we can fold
3950 the result. */
3951 else if (GET_CODE (trueop0) == CONST_DOUBLE
3952 && GET_CODE (trueop1) == CONST_DOUBLE
3953 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
3955 REAL_VALUE_TYPE d0, d1;
3957 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
3958 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
3960 /* Comparisons are unordered iff at least one of the values is NaN. */
3961 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
3962 switch (code)
3964 case UNEQ:
3965 case UNLT:
3966 case UNGT:
3967 case UNLE:
3968 case UNGE:
3969 case NE:
3970 case UNORDERED:
3971 return const_true_rtx;
3972 case EQ:
3973 case LT:
3974 case GT:
3975 case LE:
3976 case GE:
3977 case LTGT:
3978 case ORDERED:
3979 return const0_rtx;
3980 default:
3981 return 0;
3984 equal = REAL_VALUES_EQUAL (d0, d1);
3985 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
3986 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
3989 /* Otherwise, see if the operands are both integers. */
3990 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
3991 && (GET_CODE (trueop0) == CONST_DOUBLE
3992 || GET_CODE (trueop0) == CONST_INT)
3993 && (GET_CODE (trueop1) == CONST_DOUBLE
3994 || GET_CODE (trueop1) == CONST_INT))
3996 int width = GET_MODE_BITSIZE (mode);
3997 HOST_WIDE_INT l0s, h0s, l1s, h1s;
3998 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4000 /* Get the two words comprising each integer constant. */
4001 if (GET_CODE (trueop0) == CONST_DOUBLE)
4003 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4004 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4006 else
4008 l0u = l0s = INTVAL (trueop0);
4009 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4012 if (GET_CODE (trueop1) == CONST_DOUBLE)
4014 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4015 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4017 else
4019 l1u = l1s = INTVAL (trueop1);
4020 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4023 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4024 we have to sign or zero-extend the values. */
4025 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4027 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
4028 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
4030 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
4031 l0s |= ((HOST_WIDE_INT) (-1) << width);
4033 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
4034 l1s |= ((HOST_WIDE_INT) (-1) << width);
4036 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4037 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4039 equal = (h0u == h1u && l0u == l1u);
4040 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
4041 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
4042 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
4043 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
4046 /* Otherwise, there are some code-specific tests we can make. */
4047 else
4049 /* Optimize comparisons with upper and lower bounds. */
4050 if (SCALAR_INT_MODE_P (mode)
4051 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
4053 rtx mmin, mmax;
4054 int sign;
4056 if (code == GEU
4057 || code == LEU
4058 || code == GTU
4059 || code == LTU)
4060 sign = 0;
4061 else
4062 sign = 1;
4064 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
4066 tem = NULL_RTX;
4067 switch (code)
4069 case GEU:
4070 case GE:
4071 /* x >= min is always true. */
4072 if (rtx_equal_p (trueop1, mmin))
4073 tem = const_true_rtx;
4074 else
4075 break;
4077 case LEU:
4078 case LE:
4079 /* x <= max is always true. */
4080 if (rtx_equal_p (trueop1, mmax))
4081 tem = const_true_rtx;
4082 break;
4084 case GTU:
4085 case GT:
4086 /* x > max is always false. */
4087 if (rtx_equal_p (trueop1, mmax))
4088 tem = const0_rtx;
4089 break;
4091 case LTU:
4092 case LT:
4093 /* x < min is always false. */
4094 if (rtx_equal_p (trueop1, mmin))
4095 tem = const0_rtx;
4096 break;
4098 default:
4099 break;
4101 if (tem == const0_rtx
4102 || tem == const_true_rtx)
4103 return tem;
4106 switch (code)
4108 case EQ:
4109 if (trueop1 == const0_rtx && nonzero_address_p (op0))
4110 return const0_rtx;
4111 break;
4113 case NE:
4114 if (trueop1 == const0_rtx && nonzero_address_p (op0))
4115 return const_true_rtx;
4116 break;
4118 case LT:
4119 /* Optimize abs(x) < 0.0. */
4120 if (trueop1 == CONST0_RTX (mode)
4121 && !HONOR_SNANS (mode)
4122 && (!INTEGRAL_MODE_P (mode)
4123 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4125 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
4126 : trueop0;
4127 if (GET_CODE (tem) == ABS)
4129 if (INTEGRAL_MODE_P (mode)
4130 && (issue_strict_overflow_warning
4131 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4132 warning (OPT_Wstrict_overflow,
4133 ("assuming signed overflow does not occur when "
4134 "assuming abs (x) < 0 is false"));
4135 return const0_rtx;
4139 /* Optimize popcount (x) < 0. */
4140 if (GET_CODE (trueop0) == POPCOUNT && trueop1 == const0_rtx)
4141 return const_true_rtx;
4142 break;
4144 case GE:
4145 /* Optimize abs(x) >= 0.0. */
4146 if (trueop1 == CONST0_RTX (mode)
4147 && !HONOR_NANS (mode)
4148 && (!INTEGRAL_MODE_P (mode)
4149 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4151 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
4152 : trueop0;
4153 if (GET_CODE (tem) == ABS)
4155 if (INTEGRAL_MODE_P (mode)
4156 && (issue_strict_overflow_warning
4157 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4158 warning (OPT_Wstrict_overflow,
4159 ("assuming signed overflow does not occur when "
4160 "assuming abs (x) >= 0 is true"));
4161 return const_true_rtx;
4165 /* Optimize popcount (x) >= 0. */
4166 if (GET_CODE (trueop0) == POPCOUNT && trueop1 == const0_rtx)
4167 return const_true_rtx;
4168 break;
4170 case UNGE:
4171 /* Optimize ! (abs(x) < 0.0). */
4172 if (trueop1 == CONST0_RTX (mode))
4174 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
4175 : trueop0;
4176 if (GET_CODE (tem) == ABS)
4177 return const_true_rtx;
4179 break;
4181 default:
4182 break;
4185 return 0;
4188 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
4189 as appropriate. */
4190 switch (code)
4192 case EQ:
4193 case UNEQ:
4194 return equal ? const_true_rtx : const0_rtx;
4195 case NE:
4196 case LTGT:
4197 return ! equal ? const_true_rtx : const0_rtx;
4198 case LT:
4199 case UNLT:
4200 return op0lt ? const_true_rtx : const0_rtx;
4201 case GT:
4202 case UNGT:
4203 return op1lt ? const_true_rtx : const0_rtx;
4204 case LTU:
4205 return op0ltu ? const_true_rtx : const0_rtx;
4206 case GTU:
4207 return op1ltu ? const_true_rtx : const0_rtx;
4208 case LE:
4209 case UNLE:
4210 return equal || op0lt ? const_true_rtx : const0_rtx;
4211 case GE:
4212 case UNGE:
4213 return equal || op1lt ? const_true_rtx : const0_rtx;
4214 case LEU:
4215 return equal || op0ltu ? const_true_rtx : const0_rtx;
4216 case GEU:
4217 return equal || op1ltu ? const_true_rtx : const0_rtx;
4218 case ORDERED:
4219 return const_true_rtx;
4220 case UNORDERED:
4221 return const0_rtx;
4222 default:
4223 gcc_unreachable ();
4227 /* Simplify CODE, an operation with result mode MODE and three operands,
4228 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4229 a constant. Return 0 if no simplifications is possible. */
4232 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4233 enum machine_mode op0_mode, rtx op0, rtx op1,
4234 rtx op2)
4236 unsigned int width = GET_MODE_BITSIZE (mode);
4238 /* VOIDmode means "infinite" precision. */
4239 if (width == 0)
4240 width = HOST_BITS_PER_WIDE_INT;
4242 switch (code)
4244 case SIGN_EXTRACT:
4245 case ZERO_EXTRACT:
4246 if (GET_CODE (op0) == CONST_INT
4247 && GET_CODE (op1) == CONST_INT
4248 && GET_CODE (op2) == CONST_INT
4249 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4250 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4252 /* Extracting a bit-field from a constant */
4253 HOST_WIDE_INT val = INTVAL (op0);
4255 if (BITS_BIG_ENDIAN)
4256 val >>= (GET_MODE_BITSIZE (op0_mode)
4257 - INTVAL (op2) - INTVAL (op1));
4258 else
4259 val >>= INTVAL (op2);
4261 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4263 /* First zero-extend. */
4264 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4265 /* If desired, propagate sign bit. */
4266 if (code == SIGN_EXTRACT
4267 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4268 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4271 /* Clear the bits that don't belong in our mode,
4272 unless they and our sign bit are all one.
4273 So we get either a reasonable negative value or a reasonable
4274 unsigned value for this mode. */
4275 if (width < HOST_BITS_PER_WIDE_INT
4276 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4277 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4278 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4280 return gen_int_mode (val, mode);
4282 break;
4284 case IF_THEN_ELSE:
4285 if (GET_CODE (op0) == CONST_INT)
4286 return op0 != const0_rtx ? op1 : op2;
4288 /* Convert c ? a : a into "a". */
4289 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4290 return op1;
4292 /* Convert a != b ? a : b into "a". */
4293 if (GET_CODE (op0) == NE
4294 && ! side_effects_p (op0)
4295 && ! HONOR_NANS (mode)
4296 && ! HONOR_SIGNED_ZEROS (mode)
4297 && ((rtx_equal_p (XEXP (op0, 0), op1)
4298 && rtx_equal_p (XEXP (op0, 1), op2))
4299 || (rtx_equal_p (XEXP (op0, 0), op2)
4300 && rtx_equal_p (XEXP (op0, 1), op1))))
4301 return op1;
4303 /* Convert a == b ? a : b into "b". */
4304 if (GET_CODE (op0) == EQ
4305 && ! side_effects_p (op0)
4306 && ! HONOR_NANS (mode)
4307 && ! HONOR_SIGNED_ZEROS (mode)
4308 && ((rtx_equal_p (XEXP (op0, 0), op1)
4309 && rtx_equal_p (XEXP (op0, 1), op2))
4310 || (rtx_equal_p (XEXP (op0, 0), op2)
4311 && rtx_equal_p (XEXP (op0, 1), op1))))
4312 return op2;
4314 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4316 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4317 ? GET_MODE (XEXP (op0, 1))
4318 : GET_MODE (XEXP (op0, 0)));
4319 rtx temp;
4321 /* Look for happy constants in op1 and op2. */
4322 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
4324 HOST_WIDE_INT t = INTVAL (op1);
4325 HOST_WIDE_INT f = INTVAL (op2);
4327 if (t == STORE_FLAG_VALUE && f == 0)
4328 code = GET_CODE (op0);
4329 else if (t == 0 && f == STORE_FLAG_VALUE)
4331 enum rtx_code tmp;
4332 tmp = reversed_comparison_code (op0, NULL_RTX);
4333 if (tmp == UNKNOWN)
4334 break;
4335 code = tmp;
4337 else
4338 break;
4340 return simplify_gen_relational (code, mode, cmp_mode,
4341 XEXP (op0, 0), XEXP (op0, 1));
4344 if (cmp_mode == VOIDmode)
4345 cmp_mode = op0_mode;
4346 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4347 cmp_mode, XEXP (op0, 0),
4348 XEXP (op0, 1));
4350 /* See if any simplifications were possible. */
4351 if (temp)
4353 if (GET_CODE (temp) == CONST_INT)
4354 return temp == const0_rtx ? op2 : op1;
4355 else if (temp)
4356 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4359 break;
4361 case VEC_MERGE:
4362 gcc_assert (GET_MODE (op0) == mode);
4363 gcc_assert (GET_MODE (op1) == mode);
4364 gcc_assert (VECTOR_MODE_P (mode));
4365 op2 = avoid_constant_pool_reference (op2);
4366 if (GET_CODE (op2) == CONST_INT)
4368 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4369 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4370 int mask = (1 << n_elts) - 1;
4372 if (!(INTVAL (op2) & mask))
4373 return op1;
4374 if ((INTVAL (op2) & mask) == mask)
4375 return op0;
4377 op0 = avoid_constant_pool_reference (op0);
4378 op1 = avoid_constant_pool_reference (op1);
4379 if (GET_CODE (op0) == CONST_VECTOR
4380 && GET_CODE (op1) == CONST_VECTOR)
4382 rtvec v = rtvec_alloc (n_elts);
4383 unsigned int i;
4385 for (i = 0; i < n_elts; i++)
4386 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4387 ? CONST_VECTOR_ELT (op0, i)
4388 : CONST_VECTOR_ELT (op1, i));
4389 return gen_rtx_CONST_VECTOR (mode, v);
4392 break;
4394 default:
4395 gcc_unreachable ();
4398 return 0;
4401 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
4402 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
4404 Works by unpacking OP into a collection of 8-bit values
4405 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4406 and then repacking them again for OUTERMODE. */
4408 static rtx
4409 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4410 enum machine_mode innermode, unsigned int byte)
4412 /* We support up to 512-bit values (for V8DFmode). */
4413 enum {
4414 max_bitsize = 512,
4415 value_bit = 8,
4416 value_mask = (1 << value_bit) - 1
4418 unsigned char value[max_bitsize / value_bit];
4419 int value_start;
4420 int i;
4421 int elem;
4423 int num_elem;
4424 rtx * elems;
4425 int elem_bitsize;
4426 rtx result_s;
4427 rtvec result_v = NULL;
4428 enum mode_class outer_class;
4429 enum machine_mode outer_submode;
4431 /* Some ports misuse CCmode. */
4432 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
4433 return op;
4435 /* We have no way to represent a complex constant at the rtl level. */
4436 if (COMPLEX_MODE_P (outermode))
4437 return NULL_RTX;
4439 /* Unpack the value. */
4441 if (GET_CODE (op) == CONST_VECTOR)
4443 num_elem = CONST_VECTOR_NUNITS (op);
4444 elems = &CONST_VECTOR_ELT (op, 0);
4445 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4447 else
4449 num_elem = 1;
4450 elems = &op;
4451 elem_bitsize = max_bitsize;
4453 /* If this asserts, it is too complicated; reducing value_bit may help. */
4454 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4455 /* I don't know how to handle endianness of sub-units. */
4456 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4458 for (elem = 0; elem < num_elem; elem++)
4460 unsigned char * vp;
4461 rtx el = elems[elem];
4463 /* Vectors are kept in target memory order. (This is probably
4464 a mistake.) */
4466 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4467 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4468 / BITS_PER_UNIT);
4469 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4470 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4471 unsigned bytele = (subword_byte % UNITS_PER_WORD
4472 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4473 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4476 switch (GET_CODE (el))
4478 case CONST_INT:
4479 for (i = 0;
4480 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4481 i += value_bit)
4482 *vp++ = INTVAL (el) >> i;
4483 /* CONST_INTs are always logically sign-extended. */
4484 for (; i < elem_bitsize; i += value_bit)
4485 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4486 break;
4488 case CONST_DOUBLE:
4489 if (GET_MODE (el) == VOIDmode)
4491 /* If this triggers, someone should have generated a
4492 CONST_INT instead. */
4493 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4495 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4496 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4497 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4499 *vp++
4500 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4501 i += value_bit;
4503 /* It shouldn't matter what's done here, so fill it with
4504 zero. */
4505 for (; i < elem_bitsize; i += value_bit)
4506 *vp++ = 0;
4508 else
4510 long tmp[max_bitsize / 32];
4511 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4513 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4514 gcc_assert (bitsize <= elem_bitsize);
4515 gcc_assert (bitsize % value_bit == 0);
4517 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4518 GET_MODE (el));
4520 /* real_to_target produces its result in words affected by
4521 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4522 and use WORDS_BIG_ENDIAN instead; see the documentation
4523 of SUBREG in rtl.texi. */
4524 for (i = 0; i < bitsize; i += value_bit)
4526 int ibase;
4527 if (WORDS_BIG_ENDIAN)
4528 ibase = bitsize - 1 - i;
4529 else
4530 ibase = i;
4531 *vp++ = tmp[ibase / 32] >> i % 32;
4534 /* It shouldn't matter what's done here, so fill it with
4535 zero. */
4536 for (; i < elem_bitsize; i += value_bit)
4537 *vp++ = 0;
4539 break;
4541 default:
4542 gcc_unreachable ();
4546 /* Now, pick the right byte to start with. */
4547 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4548 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4549 will already have offset 0. */
4550 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4552 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4553 - byte);
4554 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4555 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4556 byte = (subword_byte % UNITS_PER_WORD
4557 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4560 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4561 so if it's become negative it will instead be very large.) */
4562 gcc_assert (byte < GET_MODE_SIZE (innermode));
4564 /* Convert from bytes to chunks of size value_bit. */
4565 value_start = byte * (BITS_PER_UNIT / value_bit);
4567 /* Re-pack the value. */
4569 if (VECTOR_MODE_P (outermode))
4571 num_elem = GET_MODE_NUNITS (outermode);
4572 result_v = rtvec_alloc (num_elem);
4573 elems = &RTVEC_ELT (result_v, 0);
4574 outer_submode = GET_MODE_INNER (outermode);
4576 else
4578 num_elem = 1;
4579 elems = &result_s;
4580 outer_submode = outermode;
4583 outer_class = GET_MODE_CLASS (outer_submode);
4584 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
4586 gcc_assert (elem_bitsize % value_bit == 0);
4587 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
4589 for (elem = 0; elem < num_elem; elem++)
4591 unsigned char *vp;
4593 /* Vectors are stored in target memory order. (This is probably
4594 a mistake.) */
4596 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4597 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4598 / BITS_PER_UNIT);
4599 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4600 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4601 unsigned bytele = (subword_byte % UNITS_PER_WORD
4602 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4603 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
4606 switch (outer_class)
4608 case MODE_INT:
4609 case MODE_PARTIAL_INT:
4611 unsigned HOST_WIDE_INT hi = 0, lo = 0;
4613 for (i = 0;
4614 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4615 i += value_bit)
4616 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
4617 for (; i < elem_bitsize; i += value_bit)
4618 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
4619 << (i - HOST_BITS_PER_WIDE_INT));
4621 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4622 know why. */
4623 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4624 elems[elem] = gen_int_mode (lo, outer_submode);
4625 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
4626 elems[elem] = immed_double_const (lo, hi, outer_submode);
4627 else
4628 return NULL_RTX;
4630 break;
4632 case MODE_FLOAT:
4633 case MODE_DECIMAL_FLOAT:
4635 REAL_VALUE_TYPE r;
4636 long tmp[max_bitsize / 32];
4638 /* real_from_target wants its input in words affected by
4639 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4640 and use WORDS_BIG_ENDIAN instead; see the documentation
4641 of SUBREG in rtl.texi. */
4642 for (i = 0; i < max_bitsize / 32; i++)
4643 tmp[i] = 0;
4644 for (i = 0; i < elem_bitsize; i += value_bit)
4646 int ibase;
4647 if (WORDS_BIG_ENDIAN)
4648 ibase = elem_bitsize - 1 - i;
4649 else
4650 ibase = i;
4651 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
4654 real_from_target (&r, tmp, outer_submode);
4655 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
4657 break;
4659 default:
4660 gcc_unreachable ();
4663 if (VECTOR_MODE_P (outermode))
4664 return gen_rtx_CONST_VECTOR (outermode, result_v);
4665 else
4666 return result_s;
4669 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
4670 Return 0 if no simplifications are possible. */
4672 simplify_subreg (enum machine_mode outermode, rtx op,
4673 enum machine_mode innermode, unsigned int byte)
4675 /* Little bit of sanity checking. */
4676 gcc_assert (innermode != VOIDmode);
4677 gcc_assert (outermode != VOIDmode);
4678 gcc_assert (innermode != BLKmode);
4679 gcc_assert (outermode != BLKmode);
4681 gcc_assert (GET_MODE (op) == innermode
4682 || GET_MODE (op) == VOIDmode);
4684 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
4685 gcc_assert (byte < GET_MODE_SIZE (innermode));
4687 if (outermode == innermode && !byte)
4688 return op;
4690 if (GET_CODE (op) == CONST_INT
4691 || GET_CODE (op) == CONST_DOUBLE
4692 || GET_CODE (op) == CONST_VECTOR)
4693 return simplify_immed_subreg (outermode, op, innermode, byte);
4695 /* Changing mode twice with SUBREG => just change it once,
4696 or not at all if changing back op starting mode. */
4697 if (GET_CODE (op) == SUBREG)
4699 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
4700 int final_offset = byte + SUBREG_BYTE (op);
4701 rtx newx;
4703 if (outermode == innermostmode
4704 && byte == 0 && SUBREG_BYTE (op) == 0)
4705 return SUBREG_REG (op);
4707 /* The SUBREG_BYTE represents offset, as if the value were stored
4708 in memory. Irritating exception is paradoxical subreg, where
4709 we define SUBREG_BYTE to be 0. On big endian machines, this
4710 value should be negative. For a moment, undo this exception. */
4711 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4713 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
4714 if (WORDS_BIG_ENDIAN)
4715 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4716 if (BYTES_BIG_ENDIAN)
4717 final_offset += difference % UNITS_PER_WORD;
4719 if (SUBREG_BYTE (op) == 0
4720 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
4722 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
4723 if (WORDS_BIG_ENDIAN)
4724 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4725 if (BYTES_BIG_ENDIAN)
4726 final_offset += difference % UNITS_PER_WORD;
4729 /* See whether resulting subreg will be paradoxical. */
4730 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
4732 /* In nonparadoxical subregs we can't handle negative offsets. */
4733 if (final_offset < 0)
4734 return NULL_RTX;
4735 /* Bail out in case resulting subreg would be incorrect. */
4736 if (final_offset % GET_MODE_SIZE (outermode)
4737 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
4738 return NULL_RTX;
4740 else
4742 int offset = 0;
4743 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
4745 /* In paradoxical subreg, see if we are still looking on lower part.
4746 If so, our SUBREG_BYTE will be 0. */
4747 if (WORDS_BIG_ENDIAN)
4748 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4749 if (BYTES_BIG_ENDIAN)
4750 offset += difference % UNITS_PER_WORD;
4751 if (offset == final_offset)
4752 final_offset = 0;
4753 else
4754 return NULL_RTX;
4757 /* Recurse for further possible simplifications. */
4758 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
4759 final_offset);
4760 if (newx)
4761 return newx;
4762 if (validate_subreg (outermode, innermostmode,
4763 SUBREG_REG (op), final_offset))
4764 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
4765 return NULL_RTX;
4768 /* Merge implicit and explicit truncations. */
4770 if (GET_CODE (op) == TRUNCATE
4771 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
4772 && subreg_lowpart_offset (outermode, innermode) == byte)
4773 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
4774 GET_MODE (XEXP (op, 0)));
4776 /* SUBREG of a hard register => just change the register number
4777 and/or mode. If the hard register is not valid in that mode,
4778 suppress this simplification. If the hard register is the stack,
4779 frame, or argument pointer, leave this as a SUBREG. */
4781 if (REG_P (op)
4782 && REGNO (op) < FIRST_PSEUDO_REGISTER
4783 #ifdef CANNOT_CHANGE_MODE_CLASS
4784 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
4785 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
4786 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
4787 #endif
4788 && ((reload_completed && !frame_pointer_needed)
4789 || (REGNO (op) != FRAME_POINTER_REGNUM
4790 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
4791 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
4792 #endif
4794 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4795 && REGNO (op) != ARG_POINTER_REGNUM
4796 #endif
4797 && REGNO (op) != STACK_POINTER_REGNUM
4798 && subreg_offset_representable_p (REGNO (op), innermode,
4799 byte, outermode))
4801 unsigned int regno = REGNO (op);
4802 unsigned int final_regno
4803 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
4805 /* ??? We do allow it if the current REG is not valid for
4806 its mode. This is a kludge to work around how float/complex
4807 arguments are passed on 32-bit SPARC and should be fixed. */
4808 if (HARD_REGNO_MODE_OK (final_regno, outermode)
4809 || ! HARD_REGNO_MODE_OK (regno, innermode))
4811 rtx x;
4812 int final_offset = byte;
4814 /* Adjust offset for paradoxical subregs. */
4815 if (byte == 0
4816 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4818 int difference = (GET_MODE_SIZE (innermode)
4819 - GET_MODE_SIZE (outermode));
4820 if (WORDS_BIG_ENDIAN)
4821 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4822 if (BYTES_BIG_ENDIAN)
4823 final_offset += difference % UNITS_PER_WORD;
4826 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
4828 /* Propagate original regno. We don't have any way to specify
4829 the offset inside original regno, so do so only for lowpart.
4830 The information is used only by alias analysis that can not
4831 grog partial register anyway. */
4833 if (subreg_lowpart_offset (outermode, innermode) == byte)
4834 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
4835 return x;
4839 /* If we have a SUBREG of a register that we are replacing and we are
4840 replacing it with a MEM, make a new MEM and try replacing the
4841 SUBREG with it. Don't do this if the MEM has a mode-dependent address
4842 or if we would be widening it. */
4844 if (MEM_P (op)
4845 && ! mode_dependent_address_p (XEXP (op, 0))
4846 /* Allow splitting of volatile memory references in case we don't
4847 have instruction to move the whole thing. */
4848 && (! MEM_VOLATILE_P (op)
4849 || ! have_insn_for (SET, innermode))
4850 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
4851 return adjust_address_nv (op, outermode, byte);
4853 /* Handle complex values represented as CONCAT
4854 of real and imaginary part. */
4855 if (GET_CODE (op) == CONCAT)
4857 unsigned int part_size, final_offset;
4858 rtx part, res;
4860 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
4861 if (byte < part_size)
4863 part = XEXP (op, 0);
4864 final_offset = byte;
4866 else
4868 part = XEXP (op, 1);
4869 final_offset = byte - part_size;
4872 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
4873 return NULL_RTX;
4875 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
4876 if (res)
4877 return res;
4878 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
4879 return gen_rtx_SUBREG (outermode, part, final_offset);
4880 return NULL_RTX;
4883 /* Optimize SUBREG truncations of zero and sign extended values. */
4884 if ((GET_CODE (op) == ZERO_EXTEND
4885 || GET_CODE (op) == SIGN_EXTEND)
4886 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
4888 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
4890 /* If we're requesting the lowpart of a zero or sign extension,
4891 there are three possibilities. If the outermode is the same
4892 as the origmode, we can omit both the extension and the subreg.
4893 If the outermode is not larger than the origmode, we can apply
4894 the truncation without the extension. Finally, if the outermode
4895 is larger than the origmode, but both are integer modes, we
4896 can just extend to the appropriate mode. */
4897 if (bitpos == 0)
4899 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
4900 if (outermode == origmode)
4901 return XEXP (op, 0);
4902 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
4903 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
4904 subreg_lowpart_offset (outermode,
4905 origmode));
4906 if (SCALAR_INT_MODE_P (outermode))
4907 return simplify_gen_unary (GET_CODE (op), outermode,
4908 XEXP (op, 0), origmode);
4911 /* A SUBREG resulting from a zero extension may fold to zero if
4912 it extracts higher bits that the ZERO_EXTEND's source bits. */
4913 if (GET_CODE (op) == ZERO_EXTEND
4914 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
4915 return CONST0_RTX (outermode);
4918 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
4919 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
4920 the outer subreg is effectively a truncation to the original mode. */
4921 if ((GET_CODE (op) == LSHIFTRT
4922 || GET_CODE (op) == ASHIFTRT)
4923 && SCALAR_INT_MODE_P (outermode)
4924 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
4925 to avoid the possibility that an outer LSHIFTRT shifts by more
4926 than the sign extension's sign_bit_copies and introduces zeros
4927 into the high bits of the result. */
4928 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
4929 && GET_CODE (XEXP (op, 1)) == CONST_INT
4930 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
4931 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4932 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4933 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4934 return simplify_gen_binary (ASHIFTRT, outermode,
4935 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4937 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
4938 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
4939 the outer subreg is effectively a truncation to the original mode. */
4940 if ((GET_CODE (op) == LSHIFTRT
4941 || GET_CODE (op) == ASHIFTRT)
4942 && SCALAR_INT_MODE_P (outermode)
4943 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4944 && GET_CODE (XEXP (op, 1)) == CONST_INT
4945 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4946 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4947 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4948 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4949 return simplify_gen_binary (LSHIFTRT, outermode,
4950 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4952 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
4953 to (ashift:QI (x:QI) C), where C is a suitable small constant and
4954 the outer subreg is effectively a truncation to the original mode. */
4955 if (GET_CODE (op) == ASHIFT
4956 && SCALAR_INT_MODE_P (outermode)
4957 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4958 && GET_CODE (XEXP (op, 1)) == CONST_INT
4959 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4960 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
4961 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4962 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4963 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4964 return simplify_gen_binary (ASHIFT, outermode,
4965 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4967 return NULL_RTX;
4970 /* Make a SUBREG operation or equivalent if it folds. */
4973 simplify_gen_subreg (enum machine_mode outermode, rtx op,
4974 enum machine_mode innermode, unsigned int byte)
4976 rtx newx;
4978 newx = simplify_subreg (outermode, op, innermode, byte);
4979 if (newx)
4980 return newx;
4982 if (GET_CODE (op) == SUBREG
4983 || GET_CODE (op) == CONCAT
4984 || GET_MODE (op) == VOIDmode)
4985 return NULL_RTX;
4987 if (validate_subreg (outermode, innermode, op, byte))
4988 return gen_rtx_SUBREG (outermode, op, byte);
4990 return NULL_RTX;
4993 /* Simplify X, an rtx expression.
4995 Return the simplified expression or NULL if no simplifications
4996 were possible.
4998 This is the preferred entry point into the simplification routines;
4999 however, we still allow passes to call the more specific routines.
5001 Right now GCC has three (yes, three) major bodies of RTL simplification
5002 code that need to be unified.
5004 1. fold_rtx in cse.c. This code uses various CSE specific
5005 information to aid in RTL simplification.
5007 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5008 it uses combine specific information to aid in RTL
5009 simplification.
5011 3. The routines in this file.
5014 Long term we want to only have one body of simplification code; to
5015 get to that state I recommend the following steps:
5017 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5018 which are not pass dependent state into these routines.
5020 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5021 use this routine whenever possible.
5023 3. Allow for pass dependent state to be provided to these
5024 routines and add simplifications based on the pass dependent
5025 state. Remove code from cse.c & combine.c that becomes
5026 redundant/dead.
5028 It will take time, but ultimately the compiler will be easier to
5029 maintain and improve. It's totally silly that when we add a
5030 simplification that it needs to be added to 4 places (3 for RTL
5031 simplification and 1 for tree simplification. */
5034 simplify_rtx (rtx x)
5036 enum rtx_code code = GET_CODE (x);
5037 enum machine_mode mode = GET_MODE (x);
5039 switch (GET_RTX_CLASS (code))
5041 case RTX_UNARY:
5042 return simplify_unary_operation (code, mode,
5043 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5044 case RTX_COMM_ARITH:
5045 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5046 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5048 /* Fall through.... */
5050 case RTX_BIN_ARITH:
5051 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5053 case RTX_TERNARY:
5054 case RTX_BITFIELD_OPS:
5055 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5056 XEXP (x, 0), XEXP (x, 1),
5057 XEXP (x, 2));
5059 case RTX_COMPARE:
5060 case RTX_COMM_COMPARE:
5061 return simplify_relational_operation (code, mode,
5062 ((GET_MODE (XEXP (x, 0))
5063 != VOIDmode)
5064 ? GET_MODE (XEXP (x, 0))
5065 : GET_MODE (XEXP (x, 1))),
5066 XEXP (x, 0),
5067 XEXP (x, 1));
5069 case RTX_EXTRA:
5070 if (code == SUBREG)
5071 return simplify_subreg (mode, SUBREG_REG (x),
5072 GET_MODE (SUBREG_REG (x)),
5073 SUBREG_BYTE (x));
5074 break;
5076 case RTX_OBJ:
5077 if (code == LO_SUM)
5079 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5080 if (GET_CODE (XEXP (x, 0)) == HIGH
5081 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5082 return XEXP (x, 1);
5084 break;
5086 default:
5087 break;
5089 return NULL;