Enable dumping of alias graphs.
[official-gcc/Ramakrishna.git] / gcc / simplify-rtx.c
blob0450ea083f4f23da928d9afa3f597d1c466ce3b2
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, const_rtx);
53 static bool plus_minus_operand_p (const_rtx);
54 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
57 unsigned int);
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
59 rtx, rtx);
60 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
61 enum machine_mode, rtx, rtx);
62 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
63 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
64 rtx, rtx, rtx, rtx);
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
68 static rtx
69 neg_const_int (enum machine_mode mode, const_rtx i)
71 return gen_int_mode (- INTVAL (i), mode);
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
77 bool
78 mode_signbit_p (enum machine_mode mode, const_rtx x)
80 unsigned HOST_WIDE_INT val;
81 unsigned int width;
83 if (GET_MODE_CLASS (mode) != MODE_INT)
84 return false;
86 width = GET_MODE_BITSIZE (mode);
87 if (width == 0)
88 return false;
90 if (width <= HOST_BITS_PER_WIDE_INT
91 && CONST_INT_P (x))
92 val = INTVAL (x);
93 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x) == 0)
97 val = CONST_DOUBLE_HIGH (x);
98 width -= HOST_BITS_PER_WIDE_INT;
100 else
101 return false;
103 if (width < HOST_BITS_PER_WIDE_INT)
104 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
105 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
112 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
113 rtx op1)
115 rtx tem;
117 /* If this simplifies, do it. */
118 tem = simplify_binary_operation (code, mode, op0, op1);
119 if (tem)
120 return tem;
122 /* Put complex operands first and constants second if commutative. */
123 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
124 && swap_commutative_operands_p (op0, op1))
125 tem = op0, op0 = op1, op1 = tem;
127 return gen_rtx_fmt_ee (code, mode, op0, op1);
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
133 avoid_constant_pool_reference (rtx x)
135 rtx c, tmp, addr;
136 enum machine_mode cmode;
137 HOST_WIDE_INT offset = 0;
139 switch (GET_CODE (x))
141 case MEM:
142 break;
144 case FLOAT_EXTEND:
145 /* Handle float extensions of constant pool references. */
146 tmp = XEXP (x, 0);
147 c = avoid_constant_pool_reference (tmp);
148 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
150 REAL_VALUE_TYPE d;
152 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
155 return x;
157 default:
158 return x;
161 if (GET_MODE (x) == BLKmode)
162 return x;
164 addr = XEXP (x, 0);
166 /* Call target hook to avoid the effects of -fpic etc.... */
167 addr = targetm.delegitimize_address (addr);
169 /* Split the address into a base and integer offset. */
170 if (GET_CODE (addr) == CONST
171 && GET_CODE (XEXP (addr, 0)) == PLUS
172 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
174 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
175 addr = XEXP (XEXP (addr, 0), 0);
178 if (GET_CODE (addr) == LO_SUM)
179 addr = XEXP (addr, 1);
181 /* If this is a constant pool reference, we can turn it into its
182 constant and hope that simplifications happen. */
183 if (GET_CODE (addr) == SYMBOL_REF
184 && CONSTANT_POOL_ADDRESS_P (addr))
186 c = get_pool_constant (addr);
187 cmode = get_pool_mode (addr);
189 /* If we're accessing the constant in a different mode than it was
190 originally stored, attempt to fix that up via subreg simplifications.
191 If that fails we have no choice but to return the original memory. */
192 if (offset != 0 || cmode != GET_MODE (x))
194 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
195 if (tem && CONSTANT_P (tem))
196 return tem;
198 else
199 return c;
202 return x;
205 /* Simplify a MEM based on its attributes. This is the default
206 delegitimize_address target hook, and it's recommended that every
207 overrider call it. */
210 delegitimize_mem_from_attrs (rtx x)
212 if (MEM_P (x)
213 && MEM_EXPR (x)
214 && (!MEM_OFFSET (x)
215 || GET_CODE (MEM_OFFSET (x)) == CONST_INT))
217 tree decl = MEM_EXPR (x);
218 enum machine_mode mode = GET_MODE (x);
219 HOST_WIDE_INT offset = 0;
221 switch (TREE_CODE (decl))
223 default:
224 decl = NULL;
225 break;
227 case VAR_DECL:
228 break;
230 case ARRAY_REF:
231 case ARRAY_RANGE_REF:
232 case COMPONENT_REF:
233 case BIT_FIELD_REF:
234 case REALPART_EXPR:
235 case IMAGPART_EXPR:
236 case VIEW_CONVERT_EXPR:
238 HOST_WIDE_INT bitsize, bitpos;
239 tree toffset;
240 int unsignedp = 0, volatilep = 0;
242 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
243 &mode, &unsignedp, &volatilep, false);
244 if (bitsize != GET_MODE_BITSIZE (mode)
245 || (bitpos % BITS_PER_UNIT)
246 || (toffset && !host_integerp (toffset, 0)))
247 decl = NULL;
248 else
250 offset += bitpos / BITS_PER_UNIT;
251 if (toffset)
252 offset += TREE_INT_CST_LOW (toffset);
254 break;
258 if (decl
259 && mode == GET_MODE (x)
260 && TREE_CODE (decl) == VAR_DECL
261 && (TREE_STATIC (decl)
262 || DECL_THREAD_LOCAL_P (decl))
263 && DECL_RTL_SET_P (decl)
264 && MEM_P (DECL_RTL (decl)))
266 rtx newx;
268 if (MEM_OFFSET (x))
269 offset += INTVAL (MEM_OFFSET (x));
271 newx = DECL_RTL (decl);
273 if (MEM_P (newx))
275 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
277 /* Avoid creating a new MEM needlessly if we already had
278 the same address. We do if there's no OFFSET and the
279 old address X is identical to NEWX, or if X is of the
280 form (plus NEWX OFFSET), or the NEWX is of the form
281 (plus Y (const_int Z)) and X is that with the offset
282 added: (plus Y (const_int Z+OFFSET)). */
283 if (!((offset == 0
284 || (GET_CODE (o) == PLUS
285 && GET_CODE (XEXP (o, 1)) == CONST_INT
286 && (offset == INTVAL (XEXP (o, 1))
287 || (GET_CODE (n) == PLUS
288 && GET_CODE (XEXP (n, 1)) == CONST_INT
289 && (INTVAL (XEXP (n, 1)) + offset
290 == INTVAL (XEXP (o, 1)))
291 && (n = XEXP (n, 0))))
292 && (o = XEXP (o, 0))))
293 && rtx_equal_p (o, n)))
294 x = adjust_address_nv (newx, mode, offset);
296 else if (GET_MODE (x) == GET_MODE (newx)
297 && offset == 0)
298 x = newx;
302 return x;
305 /* Make a unary operation by first seeing if it folds and otherwise making
306 the specified operation. */
309 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
310 enum machine_mode op_mode)
312 rtx tem;
314 /* If this simplifies, use it. */
315 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
316 return tem;
318 return gen_rtx_fmt_e (code, mode, op);
321 /* Likewise for ternary operations. */
324 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
325 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
327 rtx tem;
329 /* If this simplifies, use it. */
330 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
331 op0, op1, op2)))
332 return tem;
334 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
337 /* Likewise, for relational operations.
338 CMP_MODE specifies mode comparison is done in. */
341 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
342 enum machine_mode cmp_mode, rtx op0, rtx op1)
344 rtx tem;
346 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
347 op0, op1)))
348 return tem;
350 return gen_rtx_fmt_ee (code, mode, op0, op1);
353 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
354 resulting RTX. Return a new RTX which is as simplified as possible. */
357 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
359 enum rtx_code code = GET_CODE (x);
360 enum machine_mode mode = GET_MODE (x);
361 enum machine_mode op_mode;
362 rtx op0, op1, op2;
364 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
365 to build a new expression substituting recursively. If we can't do
366 anything, return our input. */
368 if (x == old_rtx)
369 return new_rtx;
371 switch (GET_RTX_CLASS (code))
373 case RTX_UNARY:
374 op0 = XEXP (x, 0);
375 op_mode = GET_MODE (op0);
376 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
377 if (op0 == XEXP (x, 0))
378 return x;
379 return simplify_gen_unary (code, mode, op0, op_mode);
381 case RTX_BIN_ARITH:
382 case RTX_COMM_ARITH:
383 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
384 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
385 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
386 return x;
387 return simplify_gen_binary (code, mode, op0, op1);
389 case RTX_COMPARE:
390 case RTX_COMM_COMPARE:
391 op0 = XEXP (x, 0);
392 op1 = XEXP (x, 1);
393 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
394 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
395 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
396 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
397 return x;
398 return simplify_gen_relational (code, mode, op_mode, op0, op1);
400 case RTX_TERNARY:
401 case RTX_BITFIELD_OPS:
402 op0 = XEXP (x, 0);
403 op_mode = GET_MODE (op0);
404 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
405 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
406 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
407 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
408 return x;
409 if (op_mode == VOIDmode)
410 op_mode = GET_MODE (op0);
411 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
413 case RTX_EXTRA:
414 /* The only case we try to handle is a SUBREG. */
415 if (code == SUBREG)
417 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
418 if (op0 == SUBREG_REG (x))
419 return x;
420 op0 = simplify_gen_subreg (GET_MODE (x), op0,
421 GET_MODE (SUBREG_REG (x)),
422 SUBREG_BYTE (x));
423 return op0 ? op0 : x;
425 break;
427 case RTX_OBJ:
428 if (code == MEM)
430 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
431 if (op0 == XEXP (x, 0))
432 return x;
433 return replace_equiv_address_nv (x, op0);
435 else if (code == LO_SUM)
437 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
438 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
440 /* (lo_sum (high x) x) -> x */
441 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
442 return op1;
444 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
445 return x;
446 return gen_rtx_LO_SUM (mode, op0, op1);
448 else if (code == REG)
450 if (rtx_equal_p (x, old_rtx))
451 return new_rtx;
453 break;
455 default:
456 break;
458 return x;
461 /* Try to simplify a unary operation CODE whose output mode is to be
462 MODE with input operand OP whose mode was originally OP_MODE.
463 Return zero if no simplification can be made. */
465 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
466 rtx op, enum machine_mode op_mode)
468 rtx trueop, tem;
470 if (GET_CODE (op) == CONST)
471 op = XEXP (op, 0);
473 trueop = avoid_constant_pool_reference (op);
475 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
476 if (tem)
477 return tem;
479 return simplify_unary_operation_1 (code, mode, op);
482 /* Perform some simplifications we can do even if the operands
483 aren't constant. */
484 static rtx
485 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
487 enum rtx_code reversed;
488 rtx temp;
490 switch (code)
492 case NOT:
493 /* (not (not X)) == X. */
494 if (GET_CODE (op) == NOT)
495 return XEXP (op, 0);
497 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
498 comparison is all ones. */
499 if (COMPARISON_P (op)
500 && (mode == BImode || STORE_FLAG_VALUE == -1)
501 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
502 return simplify_gen_relational (reversed, mode, VOIDmode,
503 XEXP (op, 0), XEXP (op, 1));
505 /* (not (plus X -1)) can become (neg X). */
506 if (GET_CODE (op) == PLUS
507 && XEXP (op, 1) == constm1_rtx)
508 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
510 /* Similarly, (not (neg X)) is (plus X -1). */
511 if (GET_CODE (op) == NEG)
512 return plus_constant (XEXP (op, 0), -1);
514 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
515 if (GET_CODE (op) == XOR
516 && CONST_INT_P (XEXP (op, 1))
517 && (temp = simplify_unary_operation (NOT, mode,
518 XEXP (op, 1), mode)) != 0)
519 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
521 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
522 if (GET_CODE (op) == PLUS
523 && CONST_INT_P (XEXP (op, 1))
524 && mode_signbit_p (mode, XEXP (op, 1))
525 && (temp = simplify_unary_operation (NOT, mode,
526 XEXP (op, 1), mode)) != 0)
527 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
530 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
531 operands other than 1, but that is not valid. We could do a
532 similar simplification for (not (lshiftrt C X)) where C is
533 just the sign bit, but this doesn't seem common enough to
534 bother with. */
535 if (GET_CODE (op) == ASHIFT
536 && XEXP (op, 0) == const1_rtx)
538 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
539 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
542 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
543 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
544 so we can perform the above simplification. */
546 if (STORE_FLAG_VALUE == -1
547 && GET_CODE (op) == ASHIFTRT
548 && GET_CODE (XEXP (op, 1))
549 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
550 return simplify_gen_relational (GE, mode, VOIDmode,
551 XEXP (op, 0), const0_rtx);
554 if (GET_CODE (op) == SUBREG
555 && subreg_lowpart_p (op)
556 && (GET_MODE_SIZE (GET_MODE (op))
557 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
558 && GET_CODE (SUBREG_REG (op)) == ASHIFT
559 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
561 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
562 rtx x;
564 x = gen_rtx_ROTATE (inner_mode,
565 simplify_gen_unary (NOT, inner_mode, const1_rtx,
566 inner_mode),
567 XEXP (SUBREG_REG (op), 1));
568 return rtl_hooks.gen_lowpart_no_emit (mode, x);
571 /* Apply De Morgan's laws to reduce number of patterns for machines
572 with negating logical insns (and-not, nand, etc.). If result has
573 only one NOT, put it first, since that is how the patterns are
574 coded. */
576 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
578 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
579 enum machine_mode op_mode;
581 op_mode = GET_MODE (in1);
582 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
584 op_mode = GET_MODE (in2);
585 if (op_mode == VOIDmode)
586 op_mode = mode;
587 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
589 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
591 rtx tem = in2;
592 in2 = in1; in1 = tem;
595 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
596 mode, in1, in2);
598 break;
600 case NEG:
601 /* (neg (neg X)) == X. */
602 if (GET_CODE (op) == NEG)
603 return XEXP (op, 0);
605 /* (neg (plus X 1)) can become (not X). */
606 if (GET_CODE (op) == PLUS
607 && XEXP (op, 1) == const1_rtx)
608 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
610 /* Similarly, (neg (not X)) is (plus X 1). */
611 if (GET_CODE (op) == NOT)
612 return plus_constant (XEXP (op, 0), 1);
614 /* (neg (minus X Y)) can become (minus Y X). This transformation
615 isn't safe for modes with signed zeros, since if X and Y are
616 both +0, (minus Y X) is the same as (minus X Y). If the
617 rounding mode is towards +infinity (or -infinity) then the two
618 expressions will be rounded differently. */
619 if (GET_CODE (op) == MINUS
620 && !HONOR_SIGNED_ZEROS (mode)
621 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
622 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
624 if (GET_CODE (op) == PLUS
625 && !HONOR_SIGNED_ZEROS (mode)
626 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
628 /* (neg (plus A C)) is simplified to (minus -C A). */
629 if (CONST_INT_P (XEXP (op, 1))
630 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
632 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
633 if (temp)
634 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
637 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
638 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
639 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
642 /* (neg (mult A B)) becomes (mult (neg A) B).
643 This works even for floating-point values. */
644 if (GET_CODE (op) == MULT
645 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
647 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
648 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
651 /* NEG commutes with ASHIFT since it is multiplication. Only do
652 this if we can then eliminate the NEG (e.g., if the operand
653 is a constant). */
654 if (GET_CODE (op) == ASHIFT)
656 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
657 if (temp)
658 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
661 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
662 C is equal to the width of MODE minus 1. */
663 if (GET_CODE (op) == ASHIFTRT
664 && CONST_INT_P (XEXP (op, 1))
665 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
666 return simplify_gen_binary (LSHIFTRT, mode,
667 XEXP (op, 0), XEXP (op, 1));
669 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
670 C is equal to the width of MODE minus 1. */
671 if (GET_CODE (op) == LSHIFTRT
672 && CONST_INT_P (XEXP (op, 1))
673 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
674 return simplify_gen_binary (ASHIFTRT, mode,
675 XEXP (op, 0), XEXP (op, 1));
677 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
678 if (GET_CODE (op) == XOR
679 && XEXP (op, 1) == const1_rtx
680 && nonzero_bits (XEXP (op, 0), mode) == 1)
681 return plus_constant (XEXP (op, 0), -1);
683 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
684 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
685 if (GET_CODE (op) == LT
686 && XEXP (op, 1) == const0_rtx
687 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
689 enum machine_mode inner = GET_MODE (XEXP (op, 0));
690 int isize = GET_MODE_BITSIZE (inner);
691 if (STORE_FLAG_VALUE == 1)
693 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
694 GEN_INT (isize - 1));
695 if (mode == inner)
696 return temp;
697 if (GET_MODE_BITSIZE (mode) > isize)
698 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
699 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
701 else if (STORE_FLAG_VALUE == -1)
703 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
704 GEN_INT (isize - 1));
705 if (mode == inner)
706 return temp;
707 if (GET_MODE_BITSIZE (mode) > isize)
708 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
709 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
712 break;
714 case TRUNCATE:
715 /* We can't handle truncation to a partial integer mode here
716 because we don't know the real bitsize of the partial
717 integer mode. */
718 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
719 break;
721 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
722 if ((GET_CODE (op) == SIGN_EXTEND
723 || GET_CODE (op) == ZERO_EXTEND)
724 && GET_MODE (XEXP (op, 0)) == mode)
725 return XEXP (op, 0);
727 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
728 (OP:SI foo:SI) if OP is NEG or ABS. */
729 if ((GET_CODE (op) == ABS
730 || GET_CODE (op) == NEG)
731 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
732 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
733 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
734 return simplify_gen_unary (GET_CODE (op), mode,
735 XEXP (XEXP (op, 0), 0), mode);
737 /* (truncate:A (subreg:B (truncate:C X) 0)) is
738 (truncate:A X). */
739 if (GET_CODE (op) == SUBREG
740 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
741 && subreg_lowpart_p (op))
742 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
743 GET_MODE (XEXP (SUBREG_REG (op), 0)));
745 /* If we know that the value is already truncated, we can
746 replace the TRUNCATE with a SUBREG. Note that this is also
747 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
748 modes we just have to apply a different definition for
749 truncation. But don't do this for an (LSHIFTRT (MULT ...))
750 since this will cause problems with the umulXi3_highpart
751 patterns. */
752 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
753 GET_MODE_BITSIZE (GET_MODE (op)))
754 ? (num_sign_bit_copies (op, GET_MODE (op))
755 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
756 - GET_MODE_BITSIZE (mode)))
757 : truncated_to_mode (mode, op))
758 && ! (GET_CODE (op) == LSHIFTRT
759 && GET_CODE (XEXP (op, 0)) == MULT))
760 return rtl_hooks.gen_lowpart_no_emit (mode, op);
762 /* A truncate of a comparison can be replaced with a subreg if
763 STORE_FLAG_VALUE permits. This is like the previous test,
764 but it works even if the comparison is done in a mode larger
765 than HOST_BITS_PER_WIDE_INT. */
766 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
767 && COMPARISON_P (op)
768 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
769 return rtl_hooks.gen_lowpart_no_emit (mode, op);
770 break;
772 case FLOAT_TRUNCATE:
773 if (DECIMAL_FLOAT_MODE_P (mode))
774 break;
776 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
777 if (GET_CODE (op) == FLOAT_EXTEND
778 && GET_MODE (XEXP (op, 0)) == mode)
779 return XEXP (op, 0);
781 /* (float_truncate:SF (float_truncate:DF foo:XF))
782 = (float_truncate:SF foo:XF).
783 This may eliminate double rounding, so it is unsafe.
785 (float_truncate:SF (float_extend:XF foo:DF))
786 = (float_truncate:SF foo:DF).
788 (float_truncate:DF (float_extend:XF foo:SF))
789 = (float_extend:SF foo:DF). */
790 if ((GET_CODE (op) == FLOAT_TRUNCATE
791 && flag_unsafe_math_optimizations)
792 || GET_CODE (op) == FLOAT_EXTEND)
793 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
794 0)))
795 > GET_MODE_SIZE (mode)
796 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
797 mode,
798 XEXP (op, 0), mode);
800 /* (float_truncate (float x)) is (float x) */
801 if (GET_CODE (op) == FLOAT
802 && (flag_unsafe_math_optimizations
803 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
804 && ((unsigned)significand_size (GET_MODE (op))
805 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
806 - num_sign_bit_copies (XEXP (op, 0),
807 GET_MODE (XEXP (op, 0))))))))
808 return simplify_gen_unary (FLOAT, mode,
809 XEXP (op, 0),
810 GET_MODE (XEXP (op, 0)));
812 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
813 (OP:SF foo:SF) if OP is NEG or ABS. */
814 if ((GET_CODE (op) == ABS
815 || GET_CODE (op) == NEG)
816 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
817 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
818 return simplify_gen_unary (GET_CODE (op), mode,
819 XEXP (XEXP (op, 0), 0), mode);
821 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
822 is (float_truncate:SF x). */
823 if (GET_CODE (op) == SUBREG
824 && subreg_lowpart_p (op)
825 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
826 return SUBREG_REG (op);
827 break;
829 case FLOAT_EXTEND:
830 if (DECIMAL_FLOAT_MODE_P (mode))
831 break;
833 /* (float_extend (float_extend x)) is (float_extend x)
835 (float_extend (float x)) is (float x) assuming that double
836 rounding can't happen.
838 if (GET_CODE (op) == FLOAT_EXTEND
839 || (GET_CODE (op) == FLOAT
840 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
841 && ((unsigned)significand_size (GET_MODE (op))
842 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
843 - num_sign_bit_copies (XEXP (op, 0),
844 GET_MODE (XEXP (op, 0)))))))
845 return simplify_gen_unary (GET_CODE (op), mode,
846 XEXP (op, 0),
847 GET_MODE (XEXP (op, 0)));
849 break;
851 case ABS:
852 /* (abs (neg <foo>)) -> (abs <foo>) */
853 if (GET_CODE (op) == NEG)
854 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
855 GET_MODE (XEXP (op, 0)));
857 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
858 do nothing. */
859 if (GET_MODE (op) == VOIDmode)
860 break;
862 /* If operand is something known to be positive, ignore the ABS. */
863 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
864 || ((GET_MODE_BITSIZE (GET_MODE (op))
865 <= HOST_BITS_PER_WIDE_INT)
866 && ((nonzero_bits (op, GET_MODE (op))
867 & ((HOST_WIDE_INT) 1
868 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
869 == 0)))
870 return op;
872 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
873 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
874 return gen_rtx_NEG (mode, op);
876 break;
878 case FFS:
879 /* (ffs (*_extend <X>)) = (ffs <X>) */
880 if (GET_CODE (op) == SIGN_EXTEND
881 || GET_CODE (op) == ZERO_EXTEND)
882 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
883 GET_MODE (XEXP (op, 0)));
884 break;
886 case POPCOUNT:
887 switch (GET_CODE (op))
889 case BSWAP:
890 case ZERO_EXTEND:
891 /* (popcount (zero_extend <X>)) = (popcount <X>) */
892 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
893 GET_MODE (XEXP (op, 0)));
895 case ROTATE:
896 case ROTATERT:
897 /* Rotations don't affect popcount. */
898 if (!side_effects_p (XEXP (op, 1)))
899 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
900 GET_MODE (XEXP (op, 0)));
901 break;
903 default:
904 break;
906 break;
908 case PARITY:
909 switch (GET_CODE (op))
911 case NOT:
912 case BSWAP:
913 case ZERO_EXTEND:
914 case SIGN_EXTEND:
915 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
916 GET_MODE (XEXP (op, 0)));
918 case ROTATE:
919 case ROTATERT:
920 /* Rotations don't affect parity. */
921 if (!side_effects_p (XEXP (op, 1)))
922 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
923 GET_MODE (XEXP (op, 0)));
924 break;
926 default:
927 break;
929 break;
931 case BSWAP:
932 /* (bswap (bswap x)) -> x. */
933 if (GET_CODE (op) == BSWAP)
934 return XEXP (op, 0);
935 break;
937 case FLOAT:
938 /* (float (sign_extend <X>)) = (float <X>). */
939 if (GET_CODE (op) == SIGN_EXTEND)
940 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
941 GET_MODE (XEXP (op, 0)));
942 break;
944 case SIGN_EXTEND:
945 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
946 becomes just the MINUS if its mode is MODE. This allows
947 folding switch statements on machines using casesi (such as
948 the VAX). */
949 if (GET_CODE (op) == TRUNCATE
950 && GET_MODE (XEXP (op, 0)) == mode
951 && GET_CODE (XEXP (op, 0)) == MINUS
952 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
953 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
954 return XEXP (op, 0);
956 /* Check for a sign extension of a subreg of a promoted
957 variable, where the promotion is sign-extended, and the
958 target mode is the same as the variable's promotion. */
959 if (GET_CODE (op) == SUBREG
960 && SUBREG_PROMOTED_VAR_P (op)
961 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
962 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
963 return rtl_hooks.gen_lowpart_no_emit (mode, op);
965 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
966 if (! POINTERS_EXTEND_UNSIGNED
967 && mode == Pmode && GET_MODE (op) == ptr_mode
968 && (CONSTANT_P (op)
969 || (GET_CODE (op) == SUBREG
970 && REG_P (SUBREG_REG (op))
971 && REG_POINTER (SUBREG_REG (op))
972 && GET_MODE (SUBREG_REG (op)) == Pmode)))
973 return convert_memory_address (Pmode, op);
974 #endif
975 break;
977 case ZERO_EXTEND:
978 /* Check for a zero extension of a subreg of a promoted
979 variable, where the promotion is zero-extended, and the
980 target mode is the same as the variable's promotion. */
981 if (GET_CODE (op) == SUBREG
982 && SUBREG_PROMOTED_VAR_P (op)
983 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
984 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
985 return rtl_hooks.gen_lowpart_no_emit (mode, op);
987 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
988 if (POINTERS_EXTEND_UNSIGNED > 0
989 && mode == Pmode && GET_MODE (op) == ptr_mode
990 && (CONSTANT_P (op)
991 || (GET_CODE (op) == SUBREG
992 && REG_P (SUBREG_REG (op))
993 && REG_POINTER (SUBREG_REG (op))
994 && GET_MODE (SUBREG_REG (op)) == Pmode)))
995 return convert_memory_address (Pmode, op);
996 #endif
997 break;
999 default:
1000 break;
1003 return 0;
1006 /* Try to compute the value of a unary operation CODE whose output mode is to
1007 be MODE with input operand OP whose mode was originally OP_MODE.
1008 Return zero if the value cannot be computed. */
1010 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1011 rtx op, enum machine_mode op_mode)
1013 unsigned int width = GET_MODE_BITSIZE (mode);
1015 if (code == VEC_DUPLICATE)
1017 gcc_assert (VECTOR_MODE_P (mode));
1018 if (GET_MODE (op) != VOIDmode)
1020 if (!VECTOR_MODE_P (GET_MODE (op)))
1021 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1022 else
1023 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1024 (GET_MODE (op)));
1026 if (CONST_INT_P (op) || GET_CODE (op) == CONST_DOUBLE
1027 || GET_CODE (op) == CONST_VECTOR)
1029 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1030 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1031 rtvec v = rtvec_alloc (n_elts);
1032 unsigned int i;
1034 if (GET_CODE (op) != CONST_VECTOR)
1035 for (i = 0; i < n_elts; i++)
1036 RTVEC_ELT (v, i) = op;
1037 else
1039 enum machine_mode inmode = GET_MODE (op);
1040 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1041 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1043 gcc_assert (in_n_elts < n_elts);
1044 gcc_assert ((n_elts % in_n_elts) == 0);
1045 for (i = 0; i < n_elts; i++)
1046 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1048 return gen_rtx_CONST_VECTOR (mode, v);
1052 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1054 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1055 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1056 enum machine_mode opmode = GET_MODE (op);
1057 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1058 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1059 rtvec v = rtvec_alloc (n_elts);
1060 unsigned int i;
1062 gcc_assert (op_n_elts == n_elts);
1063 for (i = 0; i < n_elts; i++)
1065 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1066 CONST_VECTOR_ELT (op, i),
1067 GET_MODE_INNER (opmode));
1068 if (!x)
1069 return 0;
1070 RTVEC_ELT (v, i) = x;
1072 return gen_rtx_CONST_VECTOR (mode, v);
1075 /* The order of these tests is critical so that, for example, we don't
1076 check the wrong mode (input vs. output) for a conversion operation,
1077 such as FIX. At some point, this should be simplified. */
1079 if (code == FLOAT && GET_MODE (op) == VOIDmode
1080 && (GET_CODE (op) == CONST_DOUBLE || CONST_INT_P (op)))
1082 HOST_WIDE_INT hv, lv;
1083 REAL_VALUE_TYPE d;
1085 if (CONST_INT_P (op))
1086 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1087 else
1088 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1090 REAL_VALUE_FROM_INT (d, lv, hv, mode);
1091 d = real_value_truncate (mode, d);
1092 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1094 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
1095 && (GET_CODE (op) == CONST_DOUBLE
1096 || CONST_INT_P (op)))
1098 HOST_WIDE_INT hv, lv;
1099 REAL_VALUE_TYPE d;
1101 if (CONST_INT_P (op))
1102 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1103 else
1104 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1106 if (op_mode == VOIDmode)
1108 /* We don't know how to interpret negative-looking numbers in
1109 this case, so don't try to fold those. */
1110 if (hv < 0)
1111 return 0;
1113 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
1115 else
1116 hv = 0, lv &= GET_MODE_MASK (op_mode);
1118 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1119 d = real_value_truncate (mode, d);
1120 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1123 if (CONST_INT_P (op)
1124 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1126 HOST_WIDE_INT arg0 = INTVAL (op);
1127 HOST_WIDE_INT val;
1129 switch (code)
1131 case NOT:
1132 val = ~ arg0;
1133 break;
1135 case NEG:
1136 val = - arg0;
1137 break;
1139 case ABS:
1140 val = (arg0 >= 0 ? arg0 : - arg0);
1141 break;
1143 case FFS:
1144 /* Don't use ffs here. Instead, get low order bit and then its
1145 number. If arg0 is zero, this will return 0, as desired. */
1146 arg0 &= GET_MODE_MASK (mode);
1147 val = exact_log2 (arg0 & (- arg0)) + 1;
1148 break;
1150 case CLZ:
1151 arg0 &= GET_MODE_MASK (mode);
1152 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1154 else
1155 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1156 break;
1158 case CTZ:
1159 arg0 &= GET_MODE_MASK (mode);
1160 if (arg0 == 0)
1162 /* Even if the value at zero is undefined, we have to come
1163 up with some replacement. Seems good enough. */
1164 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1165 val = GET_MODE_BITSIZE (mode);
1167 else
1168 val = exact_log2 (arg0 & -arg0);
1169 break;
1171 case POPCOUNT:
1172 arg0 &= GET_MODE_MASK (mode);
1173 val = 0;
1174 while (arg0)
1175 val++, arg0 &= arg0 - 1;
1176 break;
1178 case PARITY:
1179 arg0 &= GET_MODE_MASK (mode);
1180 val = 0;
1181 while (arg0)
1182 val++, arg0 &= arg0 - 1;
1183 val &= 1;
1184 break;
1186 case BSWAP:
1188 unsigned int s;
1190 val = 0;
1191 for (s = 0; s < width; s += 8)
1193 unsigned int d = width - s - 8;
1194 unsigned HOST_WIDE_INT byte;
1195 byte = (arg0 >> s) & 0xff;
1196 val |= byte << d;
1199 break;
1201 case TRUNCATE:
1202 val = arg0;
1203 break;
1205 case ZERO_EXTEND:
1206 /* When zero-extending a CONST_INT, we need to know its
1207 original mode. */
1208 gcc_assert (op_mode != VOIDmode);
1209 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1211 /* If we were really extending the mode,
1212 we would have to distinguish between zero-extension
1213 and sign-extension. */
1214 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1215 val = arg0;
1217 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1218 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1219 else
1220 return 0;
1221 break;
1223 case SIGN_EXTEND:
1224 if (op_mode == VOIDmode)
1225 op_mode = mode;
1226 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1228 /* If we were really extending the mode,
1229 we would have to distinguish between zero-extension
1230 and sign-extension. */
1231 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1232 val = arg0;
1234 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1237 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1238 if (val
1239 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1240 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1242 else
1243 return 0;
1244 break;
1246 case SQRT:
1247 case FLOAT_EXTEND:
1248 case FLOAT_TRUNCATE:
1249 case SS_TRUNCATE:
1250 case US_TRUNCATE:
1251 case SS_NEG:
1252 case US_NEG:
1253 return 0;
1255 default:
1256 gcc_unreachable ();
1259 return gen_int_mode (val, mode);
1262 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1263 for a DImode operation on a CONST_INT. */
1264 else if (GET_MODE (op) == VOIDmode
1265 && width <= HOST_BITS_PER_WIDE_INT * 2
1266 && (GET_CODE (op) == CONST_DOUBLE
1267 || CONST_INT_P (op)))
1269 unsigned HOST_WIDE_INT l1, lv;
1270 HOST_WIDE_INT h1, hv;
1272 if (GET_CODE (op) == CONST_DOUBLE)
1273 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1274 else
1275 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1277 switch (code)
1279 case NOT:
1280 lv = ~ l1;
1281 hv = ~ h1;
1282 break;
1284 case NEG:
1285 neg_double (l1, h1, &lv, &hv);
1286 break;
1288 case ABS:
1289 if (h1 < 0)
1290 neg_double (l1, h1, &lv, &hv);
1291 else
1292 lv = l1, hv = h1;
1293 break;
1295 case FFS:
1296 hv = 0;
1297 if (l1 == 0)
1299 if (h1 == 0)
1300 lv = 0;
1301 else
1302 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1304 else
1305 lv = exact_log2 (l1 & -l1) + 1;
1306 break;
1308 case CLZ:
1309 hv = 0;
1310 if (h1 != 0)
1311 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1312 - HOST_BITS_PER_WIDE_INT;
1313 else if (l1 != 0)
1314 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1315 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1316 lv = GET_MODE_BITSIZE (mode);
1317 break;
1319 case CTZ:
1320 hv = 0;
1321 if (l1 != 0)
1322 lv = exact_log2 (l1 & -l1);
1323 else if (h1 != 0)
1324 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1325 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1326 lv = GET_MODE_BITSIZE (mode);
1327 break;
1329 case POPCOUNT:
1330 hv = 0;
1331 lv = 0;
1332 while (l1)
1333 lv++, l1 &= l1 - 1;
1334 while (h1)
1335 lv++, h1 &= h1 - 1;
1336 break;
1338 case PARITY:
1339 hv = 0;
1340 lv = 0;
1341 while (l1)
1342 lv++, l1 &= l1 - 1;
1343 while (h1)
1344 lv++, h1 &= h1 - 1;
1345 lv &= 1;
1346 break;
1348 case BSWAP:
1350 unsigned int s;
1352 hv = 0;
1353 lv = 0;
1354 for (s = 0; s < width; s += 8)
1356 unsigned int d = width - s - 8;
1357 unsigned HOST_WIDE_INT byte;
1359 if (s < HOST_BITS_PER_WIDE_INT)
1360 byte = (l1 >> s) & 0xff;
1361 else
1362 byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1364 if (d < HOST_BITS_PER_WIDE_INT)
1365 lv |= byte << d;
1366 else
1367 hv |= byte << (d - HOST_BITS_PER_WIDE_INT);
1370 break;
1372 case TRUNCATE:
1373 /* This is just a change-of-mode, so do nothing. */
1374 lv = l1, hv = h1;
1375 break;
1377 case ZERO_EXTEND:
1378 gcc_assert (op_mode != VOIDmode);
1380 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1381 return 0;
1383 hv = 0;
1384 lv = l1 & GET_MODE_MASK (op_mode);
1385 break;
1387 case SIGN_EXTEND:
1388 if (op_mode == VOIDmode
1389 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1390 return 0;
1391 else
1393 lv = l1 & GET_MODE_MASK (op_mode);
1394 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1395 && (lv & ((HOST_WIDE_INT) 1
1396 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1397 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1399 hv = HWI_SIGN_EXTEND (lv);
1401 break;
1403 case SQRT:
1404 return 0;
1406 default:
1407 return 0;
1410 return immed_double_const (lv, hv, mode);
1413 else if (GET_CODE (op) == CONST_DOUBLE
1414 && SCALAR_FLOAT_MODE_P (mode))
1416 REAL_VALUE_TYPE d, t;
1417 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1419 switch (code)
1421 case SQRT:
1422 if (HONOR_SNANS (mode) && real_isnan (&d))
1423 return 0;
1424 real_sqrt (&t, mode, &d);
1425 d = t;
1426 break;
1427 case ABS:
1428 d = REAL_VALUE_ABS (d);
1429 break;
1430 case NEG:
1431 d = REAL_VALUE_NEGATE (d);
1432 break;
1433 case FLOAT_TRUNCATE:
1434 d = real_value_truncate (mode, d);
1435 break;
1436 case FLOAT_EXTEND:
1437 /* All this does is change the mode. */
1438 break;
1439 case FIX:
1440 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1441 break;
1442 case NOT:
1444 long tmp[4];
1445 int i;
1447 real_to_target (tmp, &d, GET_MODE (op));
1448 for (i = 0; i < 4; i++)
1449 tmp[i] = ~tmp[i];
1450 real_from_target (&d, tmp, mode);
1451 break;
1453 default:
1454 gcc_unreachable ();
1456 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1459 else if (GET_CODE (op) == CONST_DOUBLE
1460 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1461 && GET_MODE_CLASS (mode) == MODE_INT
1462 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1464 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1465 operators are intentionally left unspecified (to ease implementation
1466 by target backends), for consistency, this routine implements the
1467 same semantics for constant folding as used by the middle-end. */
1469 /* This was formerly used only for non-IEEE float.
1470 eggert@twinsun.com says it is safe for IEEE also. */
1471 HOST_WIDE_INT xh, xl, th, tl;
1472 REAL_VALUE_TYPE x, t;
1473 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1474 switch (code)
1476 case FIX:
1477 if (REAL_VALUE_ISNAN (x))
1478 return const0_rtx;
1480 /* Test against the signed upper bound. */
1481 if (width > HOST_BITS_PER_WIDE_INT)
1483 th = ((unsigned HOST_WIDE_INT) 1
1484 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1485 tl = -1;
1487 else
1489 th = 0;
1490 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1492 real_from_integer (&t, VOIDmode, tl, th, 0);
1493 if (REAL_VALUES_LESS (t, x))
1495 xh = th;
1496 xl = tl;
1497 break;
1500 /* Test against the signed lower bound. */
1501 if (width > HOST_BITS_PER_WIDE_INT)
1503 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1504 tl = 0;
1506 else
1508 th = -1;
1509 tl = (HOST_WIDE_INT) -1 << (width - 1);
1511 real_from_integer (&t, VOIDmode, tl, th, 0);
1512 if (REAL_VALUES_LESS (x, t))
1514 xh = th;
1515 xl = tl;
1516 break;
1518 REAL_VALUE_TO_INT (&xl, &xh, x);
1519 break;
1521 case UNSIGNED_FIX:
1522 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1523 return const0_rtx;
1525 /* Test against the unsigned upper bound. */
1526 if (width == 2*HOST_BITS_PER_WIDE_INT)
1528 th = -1;
1529 tl = -1;
1531 else if (width >= HOST_BITS_PER_WIDE_INT)
1533 th = ((unsigned HOST_WIDE_INT) 1
1534 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1535 tl = -1;
1537 else
1539 th = 0;
1540 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1542 real_from_integer (&t, VOIDmode, tl, th, 1);
1543 if (REAL_VALUES_LESS (t, x))
1545 xh = th;
1546 xl = tl;
1547 break;
1550 REAL_VALUE_TO_INT (&xl, &xh, x);
1551 break;
1553 default:
1554 gcc_unreachable ();
1556 return immed_double_const (xl, xh, mode);
1559 return NULL_RTX;
1562 /* Subroutine of simplify_binary_operation to simplify a commutative,
1563 associative binary operation CODE with result mode MODE, operating
1564 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1565 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1566 canonicalization is possible. */
1568 static rtx
1569 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1570 rtx op0, rtx op1)
1572 rtx tem;
1574 /* Linearize the operator to the left. */
1575 if (GET_CODE (op1) == code)
1577 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1578 if (GET_CODE (op0) == code)
1580 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1581 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1584 /* "a op (b op c)" becomes "(b op c) op a". */
1585 if (! swap_commutative_operands_p (op1, op0))
1586 return simplify_gen_binary (code, mode, op1, op0);
1588 tem = op0;
1589 op0 = op1;
1590 op1 = tem;
1593 if (GET_CODE (op0) == code)
1595 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1596 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1598 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1599 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1602 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1603 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1604 if (tem != 0)
1605 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1607 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1608 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1609 if (tem != 0)
1610 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1613 return 0;
1617 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1618 and OP1. Return 0 if no simplification is possible.
1620 Don't use this for relational operations such as EQ or LT.
1621 Use simplify_relational_operation instead. */
1623 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1624 rtx op0, rtx op1)
1626 rtx trueop0, trueop1;
1627 rtx tem;
1629 /* Relational operations don't work here. We must know the mode
1630 of the operands in order to do the comparison correctly.
1631 Assuming a full word can give incorrect results.
1632 Consider comparing 128 with -128 in QImode. */
1633 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1634 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1636 /* Make sure the constant is second. */
1637 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1638 && swap_commutative_operands_p (op0, op1))
1640 tem = op0, op0 = op1, op1 = tem;
1643 trueop0 = avoid_constant_pool_reference (op0);
1644 trueop1 = avoid_constant_pool_reference (op1);
1646 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1647 if (tem)
1648 return tem;
1649 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1652 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1653 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1654 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1655 actual constants. */
1657 static rtx
1658 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1659 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1661 rtx tem, reversed, opleft, opright;
1662 HOST_WIDE_INT val;
1663 unsigned int width = GET_MODE_BITSIZE (mode);
1665 /* Even if we can't compute a constant result,
1666 there are some cases worth simplifying. */
1668 switch (code)
1670 case PLUS:
1671 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1672 when x is NaN, infinite, or finite and nonzero. They aren't
1673 when x is -0 and the rounding mode is not towards -infinity,
1674 since (-0) + 0 is then 0. */
1675 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1676 return op0;
1678 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1679 transformations are safe even for IEEE. */
1680 if (GET_CODE (op0) == NEG)
1681 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1682 else if (GET_CODE (op1) == NEG)
1683 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1685 /* (~a) + 1 -> -a */
1686 if (INTEGRAL_MODE_P (mode)
1687 && GET_CODE (op0) == NOT
1688 && trueop1 == const1_rtx)
1689 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1691 /* Handle both-operands-constant cases. We can only add
1692 CONST_INTs to constants since the sum of relocatable symbols
1693 can't be handled by most assemblers. Don't add CONST_INT
1694 to CONST_INT since overflow won't be computed properly if wider
1695 than HOST_BITS_PER_WIDE_INT. */
1697 if ((GET_CODE (op0) == CONST
1698 || GET_CODE (op0) == SYMBOL_REF
1699 || GET_CODE (op0) == LABEL_REF)
1700 && CONST_INT_P (op1))
1701 return plus_constant (op0, INTVAL (op1));
1702 else if ((GET_CODE (op1) == CONST
1703 || GET_CODE (op1) == SYMBOL_REF
1704 || GET_CODE (op1) == LABEL_REF)
1705 && CONST_INT_P (op0))
1706 return plus_constant (op1, INTVAL (op0));
1708 /* See if this is something like X * C - X or vice versa or
1709 if the multiplication is written as a shift. If so, we can
1710 distribute and make a new multiply, shift, or maybe just
1711 have X (if C is 2 in the example above). But don't make
1712 something more expensive than we had before. */
1714 if (SCALAR_INT_MODE_P (mode))
1716 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1717 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1718 rtx lhs = op0, rhs = op1;
1720 if (GET_CODE (lhs) == NEG)
1722 coeff0l = -1;
1723 coeff0h = -1;
1724 lhs = XEXP (lhs, 0);
1726 else if (GET_CODE (lhs) == MULT
1727 && CONST_INT_P (XEXP (lhs, 1)))
1729 coeff0l = INTVAL (XEXP (lhs, 1));
1730 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1731 lhs = XEXP (lhs, 0);
1733 else if (GET_CODE (lhs) == ASHIFT
1734 && CONST_INT_P (XEXP (lhs, 1))
1735 && INTVAL (XEXP (lhs, 1)) >= 0
1736 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1738 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1739 coeff0h = 0;
1740 lhs = XEXP (lhs, 0);
1743 if (GET_CODE (rhs) == NEG)
1745 coeff1l = -1;
1746 coeff1h = -1;
1747 rhs = XEXP (rhs, 0);
1749 else if (GET_CODE (rhs) == MULT
1750 && CONST_INT_P (XEXP (rhs, 1)))
1752 coeff1l = INTVAL (XEXP (rhs, 1));
1753 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1754 rhs = XEXP (rhs, 0);
1756 else if (GET_CODE (rhs) == ASHIFT
1757 && CONST_INT_P (XEXP (rhs, 1))
1758 && INTVAL (XEXP (rhs, 1)) >= 0
1759 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1761 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1762 coeff1h = 0;
1763 rhs = XEXP (rhs, 0);
1766 if (rtx_equal_p (lhs, rhs))
1768 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1769 rtx coeff;
1770 unsigned HOST_WIDE_INT l;
1771 HOST_WIDE_INT h;
1772 bool speed = optimize_function_for_speed_p (cfun);
1774 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1775 coeff = immed_double_const (l, h, mode);
1777 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1778 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
1779 ? tem : 0;
1783 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1784 if ((CONST_INT_P (op1)
1785 || GET_CODE (op1) == CONST_DOUBLE)
1786 && GET_CODE (op0) == XOR
1787 && (CONST_INT_P (XEXP (op0, 1))
1788 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1789 && mode_signbit_p (mode, op1))
1790 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1791 simplify_gen_binary (XOR, mode, op1,
1792 XEXP (op0, 1)));
1794 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1795 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1796 && GET_CODE (op0) == MULT
1797 && GET_CODE (XEXP (op0, 0)) == NEG)
1799 rtx in1, in2;
1801 in1 = XEXP (XEXP (op0, 0), 0);
1802 in2 = XEXP (op0, 1);
1803 return simplify_gen_binary (MINUS, mode, op1,
1804 simplify_gen_binary (MULT, mode,
1805 in1, in2));
1808 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1809 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1810 is 1. */
1811 if (COMPARISON_P (op0)
1812 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1813 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1814 && (reversed = reversed_comparison (op0, mode)))
1815 return
1816 simplify_gen_unary (NEG, mode, reversed, mode);
1818 /* If one of the operands is a PLUS or a MINUS, see if we can
1819 simplify this by the associative law.
1820 Don't use the associative law for floating point.
1821 The inaccuracy makes it nonassociative,
1822 and subtle programs can break if operations are associated. */
1824 if (INTEGRAL_MODE_P (mode)
1825 && (plus_minus_operand_p (op0)
1826 || plus_minus_operand_p (op1))
1827 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1828 return tem;
1830 /* Reassociate floating point addition only when the user
1831 specifies associative math operations. */
1832 if (FLOAT_MODE_P (mode)
1833 && flag_associative_math)
1835 tem = simplify_associative_operation (code, mode, op0, op1);
1836 if (tem)
1837 return tem;
1839 break;
1841 case COMPARE:
1842 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1843 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1844 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1845 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1847 rtx xop00 = XEXP (op0, 0);
1848 rtx xop10 = XEXP (op1, 0);
1850 #ifdef HAVE_cc0
1851 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1852 #else
1853 if (REG_P (xop00) && REG_P (xop10)
1854 && GET_MODE (xop00) == GET_MODE (xop10)
1855 && REGNO (xop00) == REGNO (xop10)
1856 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1857 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1858 #endif
1859 return xop00;
1861 break;
1863 case MINUS:
1864 /* We can't assume x-x is 0 even with non-IEEE floating point,
1865 but since it is zero except in very strange circumstances, we
1866 will treat it as zero with -ffinite-math-only. */
1867 if (rtx_equal_p (trueop0, trueop1)
1868 && ! side_effects_p (op0)
1869 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
1870 return CONST0_RTX (mode);
1872 /* Change subtraction from zero into negation. (0 - x) is the
1873 same as -x when x is NaN, infinite, or finite and nonzero.
1874 But if the mode has signed zeros, and does not round towards
1875 -infinity, then 0 - 0 is 0, not -0. */
1876 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1877 return simplify_gen_unary (NEG, mode, op1, mode);
1879 /* (-1 - a) is ~a. */
1880 if (trueop0 == constm1_rtx)
1881 return simplify_gen_unary (NOT, mode, op1, mode);
1883 /* Subtracting 0 has no effect unless the mode has signed zeros
1884 and supports rounding towards -infinity. In such a case,
1885 0 - 0 is -0. */
1886 if (!(HONOR_SIGNED_ZEROS (mode)
1887 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1888 && trueop1 == CONST0_RTX (mode))
1889 return op0;
1891 /* See if this is something like X * C - X or vice versa or
1892 if the multiplication is written as a shift. If so, we can
1893 distribute and make a new multiply, shift, or maybe just
1894 have X (if C is 2 in the example above). But don't make
1895 something more expensive than we had before. */
1897 if (SCALAR_INT_MODE_P (mode))
1899 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1900 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1901 rtx lhs = op0, rhs = op1;
1903 if (GET_CODE (lhs) == NEG)
1905 coeff0l = -1;
1906 coeff0h = -1;
1907 lhs = XEXP (lhs, 0);
1909 else if (GET_CODE (lhs) == MULT
1910 && CONST_INT_P (XEXP (lhs, 1)))
1912 coeff0l = INTVAL (XEXP (lhs, 1));
1913 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1914 lhs = XEXP (lhs, 0);
1916 else if (GET_CODE (lhs) == ASHIFT
1917 && CONST_INT_P (XEXP (lhs, 1))
1918 && INTVAL (XEXP (lhs, 1)) >= 0
1919 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1921 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1922 coeff0h = 0;
1923 lhs = XEXP (lhs, 0);
1926 if (GET_CODE (rhs) == NEG)
1928 negcoeff1l = 1;
1929 negcoeff1h = 0;
1930 rhs = XEXP (rhs, 0);
1932 else if (GET_CODE (rhs) == MULT
1933 && CONST_INT_P (XEXP (rhs, 1)))
1935 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1936 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1937 rhs = XEXP (rhs, 0);
1939 else if (GET_CODE (rhs) == ASHIFT
1940 && CONST_INT_P (XEXP (rhs, 1))
1941 && INTVAL (XEXP (rhs, 1)) >= 0
1942 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1944 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1945 negcoeff1h = -1;
1946 rhs = XEXP (rhs, 0);
1949 if (rtx_equal_p (lhs, rhs))
1951 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1952 rtx coeff;
1953 unsigned HOST_WIDE_INT l;
1954 HOST_WIDE_INT h;
1955 bool speed = optimize_function_for_speed_p (cfun);
1957 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1958 coeff = immed_double_const (l, h, mode);
1960 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1961 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
1962 ? tem : 0;
1966 /* (a - (-b)) -> (a + b). True even for IEEE. */
1967 if (GET_CODE (op1) == NEG)
1968 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1970 /* (-x - c) may be simplified as (-c - x). */
1971 if (GET_CODE (op0) == NEG
1972 && (CONST_INT_P (op1)
1973 || GET_CODE (op1) == CONST_DOUBLE))
1975 tem = simplify_unary_operation (NEG, mode, op1, mode);
1976 if (tem)
1977 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1980 /* Don't let a relocatable value get a negative coeff. */
1981 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
1982 return simplify_gen_binary (PLUS, mode,
1983 op0,
1984 neg_const_int (mode, op1));
1986 /* (x - (x & y)) -> (x & ~y) */
1987 if (GET_CODE (op1) == AND)
1989 if (rtx_equal_p (op0, XEXP (op1, 0)))
1991 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1992 GET_MODE (XEXP (op1, 1)));
1993 return simplify_gen_binary (AND, mode, op0, tem);
1995 if (rtx_equal_p (op0, XEXP (op1, 1)))
1997 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1998 GET_MODE (XEXP (op1, 0)));
1999 return simplify_gen_binary (AND, mode, op0, tem);
2003 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2004 by reversing the comparison code if valid. */
2005 if (STORE_FLAG_VALUE == 1
2006 && trueop0 == const1_rtx
2007 && COMPARISON_P (op1)
2008 && (reversed = reversed_comparison (op1, mode)))
2009 return reversed;
2011 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2012 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2013 && GET_CODE (op1) == MULT
2014 && GET_CODE (XEXP (op1, 0)) == NEG)
2016 rtx in1, in2;
2018 in1 = XEXP (XEXP (op1, 0), 0);
2019 in2 = XEXP (op1, 1);
2020 return simplify_gen_binary (PLUS, mode,
2021 simplify_gen_binary (MULT, mode,
2022 in1, in2),
2023 op0);
2026 /* Canonicalize (minus (neg A) (mult B C)) to
2027 (minus (mult (neg B) C) A). */
2028 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2029 && GET_CODE (op1) == MULT
2030 && GET_CODE (op0) == NEG)
2032 rtx in1, in2;
2034 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2035 in2 = XEXP (op1, 1);
2036 return simplify_gen_binary (MINUS, mode,
2037 simplify_gen_binary (MULT, mode,
2038 in1, in2),
2039 XEXP (op0, 0));
2042 /* If one of the operands is a PLUS or a MINUS, see if we can
2043 simplify this by the associative law. This will, for example,
2044 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2045 Don't use the associative law for floating point.
2046 The inaccuracy makes it nonassociative,
2047 and subtle programs can break if operations are associated. */
2049 if (INTEGRAL_MODE_P (mode)
2050 && (plus_minus_operand_p (op0)
2051 || plus_minus_operand_p (op1))
2052 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2053 return tem;
2054 break;
2056 case MULT:
2057 if (trueop1 == constm1_rtx)
2058 return simplify_gen_unary (NEG, mode, op0, mode);
2060 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2061 x is NaN, since x * 0 is then also NaN. Nor is it valid
2062 when the mode has signed zeros, since multiplying a negative
2063 number by 0 will give -0, not 0. */
2064 if (!HONOR_NANS (mode)
2065 && !HONOR_SIGNED_ZEROS (mode)
2066 && trueop1 == CONST0_RTX (mode)
2067 && ! side_effects_p (op0))
2068 return op1;
2070 /* In IEEE floating point, x*1 is not equivalent to x for
2071 signalling NaNs. */
2072 if (!HONOR_SNANS (mode)
2073 && trueop1 == CONST1_RTX (mode))
2074 return op0;
2076 /* Convert multiply by constant power of two into shift unless
2077 we are still generating RTL. This test is a kludge. */
2078 if (CONST_INT_P (trueop1)
2079 && (val = exact_log2 (INTVAL (trueop1))) >= 0
2080 /* If the mode is larger than the host word size, and the
2081 uppermost bit is set, then this isn't a power of two due
2082 to implicit sign extension. */
2083 && (width <= HOST_BITS_PER_WIDE_INT
2084 || val != HOST_BITS_PER_WIDE_INT - 1))
2085 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2087 /* Likewise for multipliers wider than a word. */
2088 if (GET_CODE (trueop1) == CONST_DOUBLE
2089 && (GET_MODE (trueop1) == VOIDmode
2090 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
2091 && GET_MODE (op0) == mode
2092 && CONST_DOUBLE_LOW (trueop1) == 0
2093 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
2094 return simplify_gen_binary (ASHIFT, mode, op0,
2095 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2097 /* x*2 is x+x and x*(-1) is -x */
2098 if (GET_CODE (trueop1) == CONST_DOUBLE
2099 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2100 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2101 && GET_MODE (op0) == mode)
2103 REAL_VALUE_TYPE d;
2104 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2106 if (REAL_VALUES_EQUAL (d, dconst2))
2107 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2109 if (!HONOR_SNANS (mode)
2110 && REAL_VALUES_EQUAL (d, dconstm1))
2111 return simplify_gen_unary (NEG, mode, op0, mode);
2114 /* Optimize -x * -x as x * x. */
2115 if (FLOAT_MODE_P (mode)
2116 && GET_CODE (op0) == NEG
2117 && GET_CODE (op1) == NEG
2118 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2119 && !side_effects_p (XEXP (op0, 0)))
2120 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2122 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2123 if (SCALAR_FLOAT_MODE_P (mode)
2124 && GET_CODE (op0) == ABS
2125 && GET_CODE (op1) == ABS
2126 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2127 && !side_effects_p (XEXP (op0, 0)))
2128 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2130 /* Reassociate multiplication, but for floating point MULTs
2131 only when the user specifies unsafe math optimizations. */
2132 if (! FLOAT_MODE_P (mode)
2133 || flag_unsafe_math_optimizations)
2135 tem = simplify_associative_operation (code, mode, op0, op1);
2136 if (tem)
2137 return tem;
2139 break;
2141 case IOR:
2142 if (trueop1 == const0_rtx)
2143 return op0;
2144 if (CONST_INT_P (trueop1)
2145 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2146 == GET_MODE_MASK (mode)))
2147 return op1;
2148 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2149 return op0;
2150 /* A | (~A) -> -1 */
2151 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2152 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2153 && ! side_effects_p (op0)
2154 && SCALAR_INT_MODE_P (mode))
2155 return constm1_rtx;
2157 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2158 if (CONST_INT_P (op1)
2159 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2160 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
2161 return op1;
2163 /* Canonicalize (X & C1) | C2. */
2164 if (GET_CODE (op0) == AND
2165 && CONST_INT_P (trueop1)
2166 && CONST_INT_P (XEXP (op0, 1)))
2168 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2169 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2170 HOST_WIDE_INT c2 = INTVAL (trueop1);
2172 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2173 if ((c1 & c2) == c1
2174 && !side_effects_p (XEXP (op0, 0)))
2175 return trueop1;
2177 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2178 if (((c1|c2) & mask) == mask)
2179 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2181 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2182 if (((c1 & ~c2) & mask) != (c1 & mask))
2184 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2185 gen_int_mode (c1 & ~c2, mode));
2186 return simplify_gen_binary (IOR, mode, tem, op1);
2190 /* Convert (A & B) | A to A. */
2191 if (GET_CODE (op0) == AND
2192 && (rtx_equal_p (XEXP (op0, 0), op1)
2193 || rtx_equal_p (XEXP (op0, 1), op1))
2194 && ! side_effects_p (XEXP (op0, 0))
2195 && ! side_effects_p (XEXP (op0, 1)))
2196 return op1;
2198 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2199 mode size to (rotate A CX). */
2201 if (GET_CODE (op1) == ASHIFT
2202 || GET_CODE (op1) == SUBREG)
2204 opleft = op1;
2205 opright = op0;
2207 else
2209 opright = op1;
2210 opleft = op0;
2213 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2214 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2215 && CONST_INT_P (XEXP (opleft, 1))
2216 && CONST_INT_P (XEXP (opright, 1))
2217 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2218 == GET_MODE_BITSIZE (mode)))
2219 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2221 /* Same, but for ashift that has been "simplified" to a wider mode
2222 by simplify_shift_const. */
2224 if (GET_CODE (opleft) == SUBREG
2225 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2226 && GET_CODE (opright) == LSHIFTRT
2227 && GET_CODE (XEXP (opright, 0)) == SUBREG
2228 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2229 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2230 && (GET_MODE_SIZE (GET_MODE (opleft))
2231 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2232 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2233 SUBREG_REG (XEXP (opright, 0)))
2234 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2235 && CONST_INT_P (XEXP (opright, 1))
2236 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2237 == GET_MODE_BITSIZE (mode)))
2238 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2239 XEXP (SUBREG_REG (opleft), 1));
2241 /* If we have (ior (and (X C1) C2)), simplify this by making
2242 C1 as small as possible if C1 actually changes. */
2243 if (CONST_INT_P (op1)
2244 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2245 || INTVAL (op1) > 0)
2246 && GET_CODE (op0) == AND
2247 && CONST_INT_P (XEXP (op0, 1))
2248 && CONST_INT_P (op1)
2249 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2250 return simplify_gen_binary (IOR, mode,
2251 simplify_gen_binary
2252 (AND, mode, XEXP (op0, 0),
2253 GEN_INT (INTVAL (XEXP (op0, 1))
2254 & ~INTVAL (op1))),
2255 op1);
2257 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2258 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2259 the PLUS does not affect any of the bits in OP1: then we can do
2260 the IOR as a PLUS and we can associate. This is valid if OP1
2261 can be safely shifted left C bits. */
2262 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2263 && GET_CODE (XEXP (op0, 0)) == PLUS
2264 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2265 && CONST_INT_P (XEXP (op0, 1))
2266 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2268 int count = INTVAL (XEXP (op0, 1));
2269 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2271 if (mask >> count == INTVAL (trueop1)
2272 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2273 return simplify_gen_binary (ASHIFTRT, mode,
2274 plus_constant (XEXP (op0, 0), mask),
2275 XEXP (op0, 1));
2278 tem = simplify_associative_operation (code, mode, op0, op1);
2279 if (tem)
2280 return tem;
2281 break;
2283 case XOR:
2284 if (trueop1 == const0_rtx)
2285 return op0;
2286 if (CONST_INT_P (trueop1)
2287 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2288 == GET_MODE_MASK (mode)))
2289 return simplify_gen_unary (NOT, mode, op0, mode);
2290 if (rtx_equal_p (trueop0, trueop1)
2291 && ! side_effects_p (op0)
2292 && GET_MODE_CLASS (mode) != MODE_CC)
2293 return CONST0_RTX (mode);
2295 /* Canonicalize XOR of the most significant bit to PLUS. */
2296 if ((CONST_INT_P (op1)
2297 || GET_CODE (op1) == CONST_DOUBLE)
2298 && mode_signbit_p (mode, op1))
2299 return simplify_gen_binary (PLUS, mode, op0, op1);
2300 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2301 if ((CONST_INT_P (op1)
2302 || GET_CODE (op1) == CONST_DOUBLE)
2303 && GET_CODE (op0) == PLUS
2304 && (CONST_INT_P (XEXP (op0, 1))
2305 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2306 && mode_signbit_p (mode, XEXP (op0, 1)))
2307 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2308 simplify_gen_binary (XOR, mode, op1,
2309 XEXP (op0, 1)));
2311 /* If we are XORing two things that have no bits in common,
2312 convert them into an IOR. This helps to detect rotation encoded
2313 using those methods and possibly other simplifications. */
2315 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2316 && (nonzero_bits (op0, mode)
2317 & nonzero_bits (op1, mode)) == 0)
2318 return (simplify_gen_binary (IOR, mode, op0, op1));
2320 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2321 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2322 (NOT y). */
2324 int num_negated = 0;
2326 if (GET_CODE (op0) == NOT)
2327 num_negated++, op0 = XEXP (op0, 0);
2328 if (GET_CODE (op1) == NOT)
2329 num_negated++, op1 = XEXP (op1, 0);
2331 if (num_negated == 2)
2332 return simplify_gen_binary (XOR, mode, op0, op1);
2333 else if (num_negated == 1)
2334 return simplify_gen_unary (NOT, mode,
2335 simplify_gen_binary (XOR, mode, op0, op1),
2336 mode);
2339 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2340 correspond to a machine insn or result in further simplifications
2341 if B is a constant. */
2343 if (GET_CODE (op0) == AND
2344 && rtx_equal_p (XEXP (op0, 1), op1)
2345 && ! side_effects_p (op1))
2346 return simplify_gen_binary (AND, mode,
2347 simplify_gen_unary (NOT, mode,
2348 XEXP (op0, 0), mode),
2349 op1);
2351 else if (GET_CODE (op0) == AND
2352 && rtx_equal_p (XEXP (op0, 0), op1)
2353 && ! side_effects_p (op1))
2354 return simplify_gen_binary (AND, mode,
2355 simplify_gen_unary (NOT, mode,
2356 XEXP (op0, 1), mode),
2357 op1);
2359 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2360 comparison if STORE_FLAG_VALUE is 1. */
2361 if (STORE_FLAG_VALUE == 1
2362 && trueop1 == const1_rtx
2363 && COMPARISON_P (op0)
2364 && (reversed = reversed_comparison (op0, mode)))
2365 return reversed;
2367 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2368 is (lt foo (const_int 0)), so we can perform the above
2369 simplification if STORE_FLAG_VALUE is 1. */
2371 if (STORE_FLAG_VALUE == 1
2372 && trueop1 == const1_rtx
2373 && GET_CODE (op0) == LSHIFTRT
2374 && CONST_INT_P (XEXP (op0, 1))
2375 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2376 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2378 /* (xor (comparison foo bar) (const_int sign-bit))
2379 when STORE_FLAG_VALUE is the sign bit. */
2380 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2381 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2382 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2383 && trueop1 == const_true_rtx
2384 && COMPARISON_P (op0)
2385 && (reversed = reversed_comparison (op0, mode)))
2386 return reversed;
2388 tem = simplify_associative_operation (code, mode, op0, op1);
2389 if (tem)
2390 return tem;
2391 break;
2393 case AND:
2394 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2395 return trueop1;
2396 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
2398 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2399 HOST_WIDE_INT nzop1;
2400 if (CONST_INT_P (trueop1))
2402 HOST_WIDE_INT val1 = INTVAL (trueop1);
2403 /* If we are turning off bits already known off in OP0, we need
2404 not do an AND. */
2405 if ((nzop0 & ~val1) == 0)
2406 return op0;
2408 nzop1 = nonzero_bits (trueop1, mode);
2409 /* If we are clearing all the nonzero bits, the result is zero. */
2410 if ((nzop1 & nzop0) == 0
2411 && !side_effects_p (op0) && !side_effects_p (op1))
2412 return CONST0_RTX (mode);
2414 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2415 && GET_MODE_CLASS (mode) != MODE_CC)
2416 return op0;
2417 /* A & (~A) -> 0 */
2418 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2419 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2420 && ! side_effects_p (op0)
2421 && GET_MODE_CLASS (mode) != MODE_CC)
2422 return CONST0_RTX (mode);
2424 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2425 there are no nonzero bits of C outside of X's mode. */
2426 if ((GET_CODE (op0) == SIGN_EXTEND
2427 || GET_CODE (op0) == ZERO_EXTEND)
2428 && CONST_INT_P (trueop1)
2429 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2430 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2431 & INTVAL (trueop1)) == 0)
2433 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2434 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2435 gen_int_mode (INTVAL (trueop1),
2436 imode));
2437 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2440 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2441 we might be able to further simplify the AND with X and potentially
2442 remove the truncation altogether. */
2443 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2445 rtx x = XEXP (op0, 0);
2446 enum machine_mode xmode = GET_MODE (x);
2447 tem = simplify_gen_binary (AND, xmode, x,
2448 gen_int_mode (INTVAL (trueop1), xmode));
2449 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2452 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2453 if (GET_CODE (op0) == IOR
2454 && CONST_INT_P (trueop1)
2455 && CONST_INT_P (XEXP (op0, 1)))
2457 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2458 return simplify_gen_binary (IOR, mode,
2459 simplify_gen_binary (AND, mode,
2460 XEXP (op0, 0), op1),
2461 gen_int_mode (tmp, mode));
2464 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2465 insn (and may simplify more). */
2466 if (GET_CODE (op0) == XOR
2467 && rtx_equal_p (XEXP (op0, 0), op1)
2468 && ! side_effects_p (op1))
2469 return simplify_gen_binary (AND, mode,
2470 simplify_gen_unary (NOT, mode,
2471 XEXP (op0, 1), mode),
2472 op1);
2474 if (GET_CODE (op0) == XOR
2475 && rtx_equal_p (XEXP (op0, 1), op1)
2476 && ! side_effects_p (op1))
2477 return simplify_gen_binary (AND, mode,
2478 simplify_gen_unary (NOT, mode,
2479 XEXP (op0, 0), mode),
2480 op1);
2482 /* Similarly for (~(A ^ B)) & A. */
2483 if (GET_CODE (op0) == NOT
2484 && GET_CODE (XEXP (op0, 0)) == XOR
2485 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2486 && ! side_effects_p (op1))
2487 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2489 if (GET_CODE (op0) == NOT
2490 && GET_CODE (XEXP (op0, 0)) == XOR
2491 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2492 && ! side_effects_p (op1))
2493 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2495 /* Convert (A | B) & A to A. */
2496 if (GET_CODE (op0) == IOR
2497 && (rtx_equal_p (XEXP (op0, 0), op1)
2498 || rtx_equal_p (XEXP (op0, 1), op1))
2499 && ! side_effects_p (XEXP (op0, 0))
2500 && ! side_effects_p (XEXP (op0, 1)))
2501 return op1;
2503 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2504 ((A & N) + B) & M -> (A + B) & M
2505 Similarly if (N & M) == 0,
2506 ((A | N) + B) & M -> (A + B) & M
2507 and for - instead of + and/or ^ instead of |.
2508 Also, if (N & M) == 0, then
2509 (A +- N) & M -> A & M. */
2510 if (CONST_INT_P (trueop1)
2511 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2512 && ~INTVAL (trueop1)
2513 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2514 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2516 rtx pmop[2];
2517 int which;
2519 pmop[0] = XEXP (op0, 0);
2520 pmop[1] = XEXP (op0, 1);
2522 if (CONST_INT_P (pmop[1])
2523 && (INTVAL (pmop[1]) & INTVAL (trueop1)) == 0)
2524 return simplify_gen_binary (AND, mode, pmop[0], op1);
2526 for (which = 0; which < 2; which++)
2528 tem = pmop[which];
2529 switch (GET_CODE (tem))
2531 case AND:
2532 if (CONST_INT_P (XEXP (tem, 1))
2533 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2534 == INTVAL (trueop1))
2535 pmop[which] = XEXP (tem, 0);
2536 break;
2537 case IOR:
2538 case XOR:
2539 if (CONST_INT_P (XEXP (tem, 1))
2540 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2541 pmop[which] = XEXP (tem, 0);
2542 break;
2543 default:
2544 break;
2548 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2550 tem = simplify_gen_binary (GET_CODE (op0), mode,
2551 pmop[0], pmop[1]);
2552 return simplify_gen_binary (code, mode, tem, op1);
2556 /* (and X (ior (not X) Y) -> (and X Y) */
2557 if (GET_CODE (op1) == IOR
2558 && GET_CODE (XEXP (op1, 0)) == NOT
2559 && op0 == XEXP (XEXP (op1, 0), 0))
2560 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2562 /* (and (ior (not X) Y) X) -> (and X Y) */
2563 if (GET_CODE (op0) == IOR
2564 && GET_CODE (XEXP (op0, 0)) == NOT
2565 && op1 == XEXP (XEXP (op0, 0), 0))
2566 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2568 tem = simplify_associative_operation (code, mode, op0, op1);
2569 if (tem)
2570 return tem;
2571 break;
2573 case UDIV:
2574 /* 0/x is 0 (or x&0 if x has side-effects). */
2575 if (trueop0 == CONST0_RTX (mode))
2577 if (side_effects_p (op1))
2578 return simplify_gen_binary (AND, mode, op1, trueop0);
2579 return trueop0;
2581 /* x/1 is x. */
2582 if (trueop1 == CONST1_RTX (mode))
2583 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2584 /* Convert divide by power of two into shift. */
2585 if (CONST_INT_P (trueop1)
2586 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2587 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2588 break;
2590 case DIV:
2591 /* Handle floating point and integers separately. */
2592 if (SCALAR_FLOAT_MODE_P (mode))
2594 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2595 safe for modes with NaNs, since 0.0 / 0.0 will then be
2596 NaN rather than 0.0. Nor is it safe for modes with signed
2597 zeros, since dividing 0 by a negative number gives -0.0 */
2598 if (trueop0 == CONST0_RTX (mode)
2599 && !HONOR_NANS (mode)
2600 && !HONOR_SIGNED_ZEROS (mode)
2601 && ! side_effects_p (op1))
2602 return op0;
2603 /* x/1.0 is x. */
2604 if (trueop1 == CONST1_RTX (mode)
2605 && !HONOR_SNANS (mode))
2606 return op0;
2608 if (GET_CODE (trueop1) == CONST_DOUBLE
2609 && trueop1 != CONST0_RTX (mode))
2611 REAL_VALUE_TYPE d;
2612 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2614 /* x/-1.0 is -x. */
2615 if (REAL_VALUES_EQUAL (d, dconstm1)
2616 && !HONOR_SNANS (mode))
2617 return simplify_gen_unary (NEG, mode, op0, mode);
2619 /* Change FP division by a constant into multiplication.
2620 Only do this with -freciprocal-math. */
2621 if (flag_reciprocal_math
2622 && !REAL_VALUES_EQUAL (d, dconst0))
2624 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2625 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2626 return simplify_gen_binary (MULT, mode, op0, tem);
2630 else
2632 /* 0/x is 0 (or x&0 if x has side-effects). */
2633 if (trueop0 == CONST0_RTX (mode))
2635 if (side_effects_p (op1))
2636 return simplify_gen_binary (AND, mode, op1, trueop0);
2637 return trueop0;
2639 /* x/1 is x. */
2640 if (trueop1 == CONST1_RTX (mode))
2641 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2642 /* x/-1 is -x. */
2643 if (trueop1 == constm1_rtx)
2645 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2646 return simplify_gen_unary (NEG, mode, x, mode);
2649 break;
2651 case UMOD:
2652 /* 0%x is 0 (or x&0 if x has side-effects). */
2653 if (trueop0 == CONST0_RTX (mode))
2655 if (side_effects_p (op1))
2656 return simplify_gen_binary (AND, mode, op1, trueop0);
2657 return trueop0;
2659 /* x%1 is 0 (of x&0 if x has side-effects). */
2660 if (trueop1 == CONST1_RTX (mode))
2662 if (side_effects_p (op0))
2663 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2664 return CONST0_RTX (mode);
2666 /* Implement modulus by power of two as AND. */
2667 if (CONST_INT_P (trueop1)
2668 && exact_log2 (INTVAL (trueop1)) > 0)
2669 return simplify_gen_binary (AND, mode, op0,
2670 GEN_INT (INTVAL (op1) - 1));
2671 break;
2673 case MOD:
2674 /* 0%x is 0 (or x&0 if x has side-effects). */
2675 if (trueop0 == CONST0_RTX (mode))
2677 if (side_effects_p (op1))
2678 return simplify_gen_binary (AND, mode, op1, trueop0);
2679 return trueop0;
2681 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2682 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2684 if (side_effects_p (op0))
2685 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2686 return CONST0_RTX (mode);
2688 break;
2690 case ROTATERT:
2691 case ROTATE:
2692 case ASHIFTRT:
2693 if (trueop1 == CONST0_RTX (mode))
2694 return op0;
2695 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2696 return op0;
2697 /* Rotating ~0 always results in ~0. */
2698 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
2699 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2700 && ! side_effects_p (op1))
2701 return op0;
2702 canonicalize_shift:
2703 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
2705 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
2706 if (val != INTVAL (op1))
2707 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
2709 break;
2711 case ASHIFT:
2712 case SS_ASHIFT:
2713 case US_ASHIFT:
2714 if (trueop1 == CONST0_RTX (mode))
2715 return op0;
2716 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2717 return op0;
2718 goto canonicalize_shift;
2720 case LSHIFTRT:
2721 if (trueop1 == CONST0_RTX (mode))
2722 return op0;
2723 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2724 return op0;
2725 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2726 if (GET_CODE (op0) == CLZ
2727 && CONST_INT_P (trueop1)
2728 && STORE_FLAG_VALUE == 1
2729 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2731 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2732 unsigned HOST_WIDE_INT zero_val = 0;
2734 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2735 && zero_val == GET_MODE_BITSIZE (imode)
2736 && INTVAL (trueop1) == exact_log2 (zero_val))
2737 return simplify_gen_relational (EQ, mode, imode,
2738 XEXP (op0, 0), const0_rtx);
2740 goto canonicalize_shift;
2742 case SMIN:
2743 if (width <= HOST_BITS_PER_WIDE_INT
2744 && CONST_INT_P (trueop1)
2745 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2746 && ! side_effects_p (op0))
2747 return op1;
2748 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2749 return op0;
2750 tem = simplify_associative_operation (code, mode, op0, op1);
2751 if (tem)
2752 return tem;
2753 break;
2755 case SMAX:
2756 if (width <= HOST_BITS_PER_WIDE_INT
2757 && CONST_INT_P (trueop1)
2758 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2759 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2760 && ! side_effects_p (op0))
2761 return op1;
2762 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2763 return op0;
2764 tem = simplify_associative_operation (code, mode, op0, op1);
2765 if (tem)
2766 return tem;
2767 break;
2769 case UMIN:
2770 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2771 return op1;
2772 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2773 return op0;
2774 tem = simplify_associative_operation (code, mode, op0, op1);
2775 if (tem)
2776 return tem;
2777 break;
2779 case UMAX:
2780 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2781 return op1;
2782 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2783 return op0;
2784 tem = simplify_associative_operation (code, mode, op0, op1);
2785 if (tem)
2786 return tem;
2787 break;
2789 case SS_PLUS:
2790 case US_PLUS:
2791 case SS_MINUS:
2792 case US_MINUS:
2793 case SS_MULT:
2794 case US_MULT:
2795 case SS_DIV:
2796 case US_DIV:
2797 /* ??? There are simplifications that can be done. */
2798 return 0;
2800 case VEC_SELECT:
2801 if (!VECTOR_MODE_P (mode))
2803 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2804 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2805 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2806 gcc_assert (XVECLEN (trueop1, 0) == 1);
2807 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
2809 if (GET_CODE (trueop0) == CONST_VECTOR)
2810 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2811 (trueop1, 0, 0)));
2813 /* Extract a scalar element from a nested VEC_SELECT expression
2814 (with optional nested VEC_CONCAT expression). Some targets
2815 (i386) extract scalar element from a vector using chain of
2816 nested VEC_SELECT expressions. When input operand is a memory
2817 operand, this operation can be simplified to a simple scalar
2818 load from an offseted memory address. */
2819 if (GET_CODE (trueop0) == VEC_SELECT)
2821 rtx op0 = XEXP (trueop0, 0);
2822 rtx op1 = XEXP (trueop0, 1);
2824 enum machine_mode opmode = GET_MODE (op0);
2825 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
2826 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
2828 int i = INTVAL (XVECEXP (trueop1, 0, 0));
2829 int elem;
2831 rtvec vec;
2832 rtx tmp_op, tmp;
2834 gcc_assert (GET_CODE (op1) == PARALLEL);
2835 gcc_assert (i < n_elts);
2837 /* Select element, pointed by nested selector. */
2838 elem = INTVAL (XVECEXP (op1, 0, i));
2840 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
2841 if (GET_CODE (op0) == VEC_CONCAT)
2843 rtx op00 = XEXP (op0, 0);
2844 rtx op01 = XEXP (op0, 1);
2846 enum machine_mode mode00, mode01;
2847 int n_elts00, n_elts01;
2849 mode00 = GET_MODE (op00);
2850 mode01 = GET_MODE (op01);
2852 /* Find out number of elements of each operand. */
2853 if (VECTOR_MODE_P (mode00))
2855 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
2856 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
2858 else
2859 n_elts00 = 1;
2861 if (VECTOR_MODE_P (mode01))
2863 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
2864 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
2866 else
2867 n_elts01 = 1;
2869 gcc_assert (n_elts == n_elts00 + n_elts01);
2871 /* Select correct operand of VEC_CONCAT
2872 and adjust selector. */
2873 if (elem < n_elts01)
2874 tmp_op = op00;
2875 else
2877 tmp_op = op01;
2878 elem -= n_elts00;
2881 else
2882 tmp_op = op0;
2884 vec = rtvec_alloc (1);
2885 RTVEC_ELT (vec, 0) = GEN_INT (elem);
2887 tmp = gen_rtx_fmt_ee (code, mode,
2888 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
2889 return tmp;
2892 else
2894 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2895 gcc_assert (GET_MODE_INNER (mode)
2896 == GET_MODE_INNER (GET_MODE (trueop0)));
2897 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2899 if (GET_CODE (trueop0) == CONST_VECTOR)
2901 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2902 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2903 rtvec v = rtvec_alloc (n_elts);
2904 unsigned int i;
2906 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2907 for (i = 0; i < n_elts; i++)
2909 rtx x = XVECEXP (trueop1, 0, i);
2911 gcc_assert (CONST_INT_P (x));
2912 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2913 INTVAL (x));
2916 return gen_rtx_CONST_VECTOR (mode, v);
2920 if (XVECLEN (trueop1, 0) == 1
2921 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
2922 && GET_CODE (trueop0) == VEC_CONCAT)
2924 rtx vec = trueop0;
2925 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2927 /* Try to find the element in the VEC_CONCAT. */
2928 while (GET_MODE (vec) != mode
2929 && GET_CODE (vec) == VEC_CONCAT)
2931 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2932 if (offset < vec_size)
2933 vec = XEXP (vec, 0);
2934 else
2936 offset -= vec_size;
2937 vec = XEXP (vec, 1);
2939 vec = avoid_constant_pool_reference (vec);
2942 if (GET_MODE (vec) == mode)
2943 return vec;
2946 return 0;
2947 case VEC_CONCAT:
2949 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2950 ? GET_MODE (trueop0)
2951 : GET_MODE_INNER (mode));
2952 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2953 ? GET_MODE (trueop1)
2954 : GET_MODE_INNER (mode));
2956 gcc_assert (VECTOR_MODE_P (mode));
2957 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2958 == GET_MODE_SIZE (mode));
2960 if (VECTOR_MODE_P (op0_mode))
2961 gcc_assert (GET_MODE_INNER (mode)
2962 == GET_MODE_INNER (op0_mode));
2963 else
2964 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2966 if (VECTOR_MODE_P (op1_mode))
2967 gcc_assert (GET_MODE_INNER (mode)
2968 == GET_MODE_INNER (op1_mode));
2969 else
2970 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2972 if ((GET_CODE (trueop0) == CONST_VECTOR
2973 || CONST_INT_P (trueop0)
2974 || GET_CODE (trueop0) == CONST_DOUBLE)
2975 && (GET_CODE (trueop1) == CONST_VECTOR
2976 || CONST_INT_P (trueop1)
2977 || GET_CODE (trueop1) == CONST_DOUBLE))
2979 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2980 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2981 rtvec v = rtvec_alloc (n_elts);
2982 unsigned int i;
2983 unsigned in_n_elts = 1;
2985 if (VECTOR_MODE_P (op0_mode))
2986 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2987 for (i = 0; i < n_elts; i++)
2989 if (i < in_n_elts)
2991 if (!VECTOR_MODE_P (op0_mode))
2992 RTVEC_ELT (v, i) = trueop0;
2993 else
2994 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2996 else
2998 if (!VECTOR_MODE_P (op1_mode))
2999 RTVEC_ELT (v, i) = trueop1;
3000 else
3001 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3002 i - in_n_elts);
3006 return gen_rtx_CONST_VECTOR (mode, v);
3009 return 0;
3011 default:
3012 gcc_unreachable ();
3015 return 0;
3019 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3020 rtx op0, rtx op1)
3022 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3023 HOST_WIDE_INT val;
3024 unsigned int width = GET_MODE_BITSIZE (mode);
3026 if (VECTOR_MODE_P (mode)
3027 && code != VEC_CONCAT
3028 && GET_CODE (op0) == CONST_VECTOR
3029 && GET_CODE (op1) == CONST_VECTOR)
3031 unsigned n_elts = GET_MODE_NUNITS (mode);
3032 enum machine_mode op0mode = GET_MODE (op0);
3033 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3034 enum machine_mode op1mode = GET_MODE (op1);
3035 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3036 rtvec v = rtvec_alloc (n_elts);
3037 unsigned int i;
3039 gcc_assert (op0_n_elts == n_elts);
3040 gcc_assert (op1_n_elts == n_elts);
3041 for (i = 0; i < n_elts; i++)
3043 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3044 CONST_VECTOR_ELT (op0, i),
3045 CONST_VECTOR_ELT (op1, i));
3046 if (!x)
3047 return 0;
3048 RTVEC_ELT (v, i) = x;
3051 return gen_rtx_CONST_VECTOR (mode, v);
3054 if (VECTOR_MODE_P (mode)
3055 && code == VEC_CONCAT
3056 && (CONST_INT_P (op0)
3057 || GET_CODE (op0) == CONST_DOUBLE
3058 || GET_CODE (op0) == CONST_FIXED)
3059 && (CONST_INT_P (op1)
3060 || GET_CODE (op1) == CONST_DOUBLE
3061 || GET_CODE (op1) == CONST_FIXED))
3063 unsigned n_elts = GET_MODE_NUNITS (mode);
3064 rtvec v = rtvec_alloc (n_elts);
3066 gcc_assert (n_elts >= 2);
3067 if (n_elts == 2)
3069 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3070 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3072 RTVEC_ELT (v, 0) = op0;
3073 RTVEC_ELT (v, 1) = op1;
3075 else
3077 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3078 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3079 unsigned i;
3081 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3082 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3083 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3085 for (i = 0; i < op0_n_elts; ++i)
3086 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3087 for (i = 0; i < op1_n_elts; ++i)
3088 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3091 return gen_rtx_CONST_VECTOR (mode, v);
3094 if (SCALAR_FLOAT_MODE_P (mode)
3095 && GET_CODE (op0) == CONST_DOUBLE
3096 && GET_CODE (op1) == CONST_DOUBLE
3097 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3099 if (code == AND
3100 || code == IOR
3101 || code == XOR)
3103 long tmp0[4];
3104 long tmp1[4];
3105 REAL_VALUE_TYPE r;
3106 int i;
3108 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3109 GET_MODE (op0));
3110 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3111 GET_MODE (op1));
3112 for (i = 0; i < 4; i++)
3114 switch (code)
3116 case AND:
3117 tmp0[i] &= tmp1[i];
3118 break;
3119 case IOR:
3120 tmp0[i] |= tmp1[i];
3121 break;
3122 case XOR:
3123 tmp0[i] ^= tmp1[i];
3124 break;
3125 default:
3126 gcc_unreachable ();
3129 real_from_target (&r, tmp0, mode);
3130 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3132 else
3134 REAL_VALUE_TYPE f0, f1, value, result;
3135 bool inexact;
3137 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3138 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3139 real_convert (&f0, mode, &f0);
3140 real_convert (&f1, mode, &f1);
3142 if (HONOR_SNANS (mode)
3143 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3144 return 0;
3146 if (code == DIV
3147 && REAL_VALUES_EQUAL (f1, dconst0)
3148 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3149 return 0;
3151 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3152 && flag_trapping_math
3153 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3155 int s0 = REAL_VALUE_NEGATIVE (f0);
3156 int s1 = REAL_VALUE_NEGATIVE (f1);
3158 switch (code)
3160 case PLUS:
3161 /* Inf + -Inf = NaN plus exception. */
3162 if (s0 != s1)
3163 return 0;
3164 break;
3165 case MINUS:
3166 /* Inf - Inf = NaN plus exception. */
3167 if (s0 == s1)
3168 return 0;
3169 break;
3170 case DIV:
3171 /* Inf / Inf = NaN plus exception. */
3172 return 0;
3173 default:
3174 break;
3178 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3179 && flag_trapping_math
3180 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3181 || (REAL_VALUE_ISINF (f1)
3182 && REAL_VALUES_EQUAL (f0, dconst0))))
3183 /* Inf * 0 = NaN plus exception. */
3184 return 0;
3186 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3187 &f0, &f1);
3188 real_convert (&result, mode, &value);
3190 /* Don't constant fold this floating point operation if
3191 the result has overflowed and flag_trapping_math. */
3193 if (flag_trapping_math
3194 && MODE_HAS_INFINITIES (mode)
3195 && REAL_VALUE_ISINF (result)
3196 && !REAL_VALUE_ISINF (f0)
3197 && !REAL_VALUE_ISINF (f1))
3198 /* Overflow plus exception. */
3199 return 0;
3201 /* Don't constant fold this floating point operation if the
3202 result may dependent upon the run-time rounding mode and
3203 flag_rounding_math is set, or if GCC's software emulation
3204 is unable to accurately represent the result. */
3206 if ((flag_rounding_math
3207 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3208 && (inexact || !real_identical (&result, &value)))
3209 return NULL_RTX;
3211 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3215 /* We can fold some multi-word operations. */
3216 if (GET_MODE_CLASS (mode) == MODE_INT
3217 && width == HOST_BITS_PER_WIDE_INT * 2
3218 && (GET_CODE (op0) == CONST_DOUBLE || CONST_INT_P (op0))
3219 && (GET_CODE (op1) == CONST_DOUBLE || CONST_INT_P (op1)))
3221 unsigned HOST_WIDE_INT l1, l2, lv, lt;
3222 HOST_WIDE_INT h1, h2, hv, ht;
3224 if (GET_CODE (op0) == CONST_DOUBLE)
3225 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
3226 else
3227 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
3229 if (GET_CODE (op1) == CONST_DOUBLE)
3230 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
3231 else
3232 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
3234 switch (code)
3236 case MINUS:
3237 /* A - B == A + (-B). */
3238 neg_double (l2, h2, &lv, &hv);
3239 l2 = lv, h2 = hv;
3241 /* Fall through.... */
3243 case PLUS:
3244 add_double (l1, h1, l2, h2, &lv, &hv);
3245 break;
3247 case MULT:
3248 mul_double (l1, h1, l2, h2, &lv, &hv);
3249 break;
3251 case DIV:
3252 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3253 &lv, &hv, &lt, &ht))
3254 return 0;
3255 break;
3257 case MOD:
3258 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3259 &lt, &ht, &lv, &hv))
3260 return 0;
3261 break;
3263 case UDIV:
3264 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3265 &lv, &hv, &lt, &ht))
3266 return 0;
3267 break;
3269 case UMOD:
3270 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3271 &lt, &ht, &lv, &hv))
3272 return 0;
3273 break;
3275 case AND:
3276 lv = l1 & l2, hv = h1 & h2;
3277 break;
3279 case IOR:
3280 lv = l1 | l2, hv = h1 | h2;
3281 break;
3283 case XOR:
3284 lv = l1 ^ l2, hv = h1 ^ h2;
3285 break;
3287 case SMIN:
3288 if (h1 < h2
3289 || (h1 == h2
3290 && ((unsigned HOST_WIDE_INT) l1
3291 < (unsigned HOST_WIDE_INT) l2)))
3292 lv = l1, hv = h1;
3293 else
3294 lv = l2, hv = h2;
3295 break;
3297 case SMAX:
3298 if (h1 > h2
3299 || (h1 == h2
3300 && ((unsigned HOST_WIDE_INT) l1
3301 > (unsigned HOST_WIDE_INT) l2)))
3302 lv = l1, hv = h1;
3303 else
3304 lv = l2, hv = h2;
3305 break;
3307 case UMIN:
3308 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
3309 || (h1 == h2
3310 && ((unsigned HOST_WIDE_INT) l1
3311 < (unsigned HOST_WIDE_INT) l2)))
3312 lv = l1, hv = h1;
3313 else
3314 lv = l2, hv = h2;
3315 break;
3317 case UMAX:
3318 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
3319 || (h1 == h2
3320 && ((unsigned HOST_WIDE_INT) l1
3321 > (unsigned HOST_WIDE_INT) l2)))
3322 lv = l1, hv = h1;
3323 else
3324 lv = l2, hv = h2;
3325 break;
3327 case LSHIFTRT: case ASHIFTRT:
3328 case ASHIFT:
3329 case ROTATE: case ROTATERT:
3330 if (SHIFT_COUNT_TRUNCATED)
3331 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
3333 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
3334 return 0;
3336 if (code == LSHIFTRT || code == ASHIFTRT)
3337 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
3338 code == ASHIFTRT);
3339 else if (code == ASHIFT)
3340 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
3341 else if (code == ROTATE)
3342 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3343 else /* code == ROTATERT */
3344 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3345 break;
3347 default:
3348 return 0;
3351 return immed_double_const (lv, hv, mode);
3354 if (CONST_INT_P (op0) && CONST_INT_P (op1)
3355 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3357 /* Get the integer argument values in two forms:
3358 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3360 arg0 = INTVAL (op0);
3361 arg1 = INTVAL (op1);
3363 if (width < HOST_BITS_PER_WIDE_INT)
3365 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
3366 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
3368 arg0s = arg0;
3369 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3370 arg0s |= ((HOST_WIDE_INT) (-1) << width);
3372 arg1s = arg1;
3373 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3374 arg1s |= ((HOST_WIDE_INT) (-1) << width);
3376 else
3378 arg0s = arg0;
3379 arg1s = arg1;
3382 /* Compute the value of the arithmetic. */
3384 switch (code)
3386 case PLUS:
3387 val = arg0s + arg1s;
3388 break;
3390 case MINUS:
3391 val = arg0s - arg1s;
3392 break;
3394 case MULT:
3395 val = arg0s * arg1s;
3396 break;
3398 case DIV:
3399 if (arg1s == 0
3400 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3401 && arg1s == -1))
3402 return 0;
3403 val = arg0s / arg1s;
3404 break;
3406 case MOD:
3407 if (arg1s == 0
3408 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3409 && arg1s == -1))
3410 return 0;
3411 val = arg0s % arg1s;
3412 break;
3414 case UDIV:
3415 if (arg1 == 0
3416 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3417 && arg1s == -1))
3418 return 0;
3419 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3420 break;
3422 case UMOD:
3423 if (arg1 == 0
3424 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3425 && arg1s == -1))
3426 return 0;
3427 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3428 break;
3430 case AND:
3431 val = arg0 & arg1;
3432 break;
3434 case IOR:
3435 val = arg0 | arg1;
3436 break;
3438 case XOR:
3439 val = arg0 ^ arg1;
3440 break;
3442 case LSHIFTRT:
3443 case ASHIFT:
3444 case ASHIFTRT:
3445 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3446 the value is in range. We can't return any old value for
3447 out-of-range arguments because either the middle-end (via
3448 shift_truncation_mask) or the back-end might be relying on
3449 target-specific knowledge. Nor can we rely on
3450 shift_truncation_mask, since the shift might not be part of an
3451 ashlM3, lshrM3 or ashrM3 instruction. */
3452 if (SHIFT_COUNT_TRUNCATED)
3453 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3454 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3455 return 0;
3457 val = (code == ASHIFT
3458 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3459 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3461 /* Sign-extend the result for arithmetic right shifts. */
3462 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3463 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3464 break;
3466 case ROTATERT:
3467 if (arg1 < 0)
3468 return 0;
3470 arg1 %= width;
3471 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3472 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3473 break;
3475 case ROTATE:
3476 if (arg1 < 0)
3477 return 0;
3479 arg1 %= width;
3480 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3481 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3482 break;
3484 case COMPARE:
3485 /* Do nothing here. */
3486 return 0;
3488 case SMIN:
3489 val = arg0s <= arg1s ? arg0s : arg1s;
3490 break;
3492 case UMIN:
3493 val = ((unsigned HOST_WIDE_INT) arg0
3494 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3495 break;
3497 case SMAX:
3498 val = arg0s > arg1s ? arg0s : arg1s;
3499 break;
3501 case UMAX:
3502 val = ((unsigned HOST_WIDE_INT) arg0
3503 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3504 break;
3506 case SS_PLUS:
3507 case US_PLUS:
3508 case SS_MINUS:
3509 case US_MINUS:
3510 case SS_MULT:
3511 case US_MULT:
3512 case SS_DIV:
3513 case US_DIV:
3514 case SS_ASHIFT:
3515 case US_ASHIFT:
3516 /* ??? There are simplifications that can be done. */
3517 return 0;
3519 default:
3520 gcc_unreachable ();
3523 return gen_int_mode (val, mode);
3526 return NULL_RTX;
3531 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3532 PLUS or MINUS.
3534 Rather than test for specific case, we do this by a brute-force method
3535 and do all possible simplifications until no more changes occur. Then
3536 we rebuild the operation. */
3538 struct simplify_plus_minus_op_data
3540 rtx op;
3541 short neg;
3544 static bool
3545 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3547 int result;
3549 result = (commutative_operand_precedence (y)
3550 - commutative_operand_precedence (x));
3551 if (result)
3552 return result > 0;
3554 /* Group together equal REGs to do more simplification. */
3555 if (REG_P (x) && REG_P (y))
3556 return REGNO (x) > REGNO (y);
3557 else
3558 return false;
3561 static rtx
3562 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3563 rtx op1)
3565 struct simplify_plus_minus_op_data ops[8];
3566 rtx result, tem;
3567 int n_ops = 2, input_ops = 2;
3568 int changed, n_constants = 0, canonicalized = 0;
3569 int i, j;
3571 memset (ops, 0, sizeof ops);
3573 /* Set up the two operands and then expand them until nothing has been
3574 changed. If we run out of room in our array, give up; this should
3575 almost never happen. */
3577 ops[0].op = op0;
3578 ops[0].neg = 0;
3579 ops[1].op = op1;
3580 ops[1].neg = (code == MINUS);
3584 changed = 0;
3586 for (i = 0; i < n_ops; i++)
3588 rtx this_op = ops[i].op;
3589 int this_neg = ops[i].neg;
3590 enum rtx_code this_code = GET_CODE (this_op);
3592 switch (this_code)
3594 case PLUS:
3595 case MINUS:
3596 if (n_ops == 7)
3597 return NULL_RTX;
3599 ops[n_ops].op = XEXP (this_op, 1);
3600 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3601 n_ops++;
3603 ops[i].op = XEXP (this_op, 0);
3604 input_ops++;
3605 changed = 1;
3606 canonicalized |= this_neg;
3607 break;
3609 case NEG:
3610 ops[i].op = XEXP (this_op, 0);
3611 ops[i].neg = ! this_neg;
3612 changed = 1;
3613 canonicalized = 1;
3614 break;
3616 case CONST:
3617 if (n_ops < 7
3618 && GET_CODE (XEXP (this_op, 0)) == PLUS
3619 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3620 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3622 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3623 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3624 ops[n_ops].neg = this_neg;
3625 n_ops++;
3626 changed = 1;
3627 canonicalized = 1;
3629 break;
3631 case NOT:
3632 /* ~a -> (-a - 1) */
3633 if (n_ops != 7)
3635 ops[n_ops].op = constm1_rtx;
3636 ops[n_ops++].neg = this_neg;
3637 ops[i].op = XEXP (this_op, 0);
3638 ops[i].neg = !this_neg;
3639 changed = 1;
3640 canonicalized = 1;
3642 break;
3644 case CONST_INT:
3645 n_constants++;
3646 if (this_neg)
3648 ops[i].op = neg_const_int (mode, this_op);
3649 ops[i].neg = 0;
3650 changed = 1;
3651 canonicalized = 1;
3653 break;
3655 default:
3656 break;
3660 while (changed);
3662 if (n_constants > 1)
3663 canonicalized = 1;
3665 gcc_assert (n_ops >= 2);
3667 /* If we only have two operands, we can avoid the loops. */
3668 if (n_ops == 2)
3670 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3671 rtx lhs, rhs;
3673 /* Get the two operands. Be careful with the order, especially for
3674 the cases where code == MINUS. */
3675 if (ops[0].neg && ops[1].neg)
3677 lhs = gen_rtx_NEG (mode, ops[0].op);
3678 rhs = ops[1].op;
3680 else if (ops[0].neg)
3682 lhs = ops[1].op;
3683 rhs = ops[0].op;
3685 else
3687 lhs = ops[0].op;
3688 rhs = ops[1].op;
3691 return simplify_const_binary_operation (code, mode, lhs, rhs);
3694 /* Now simplify each pair of operands until nothing changes. */
3697 /* Insertion sort is good enough for an eight-element array. */
3698 for (i = 1; i < n_ops; i++)
3700 struct simplify_plus_minus_op_data save;
3701 j = i - 1;
3702 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
3703 continue;
3705 canonicalized = 1;
3706 save = ops[i];
3708 ops[j + 1] = ops[j];
3709 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
3710 ops[j + 1] = save;
3713 changed = 0;
3714 for (i = n_ops - 1; i > 0; i--)
3715 for (j = i - 1; j >= 0; j--)
3717 rtx lhs = ops[j].op, rhs = ops[i].op;
3718 int lneg = ops[j].neg, rneg = ops[i].neg;
3720 if (lhs != 0 && rhs != 0)
3722 enum rtx_code ncode = PLUS;
3724 if (lneg != rneg)
3726 ncode = MINUS;
3727 if (lneg)
3728 tem = lhs, lhs = rhs, rhs = tem;
3730 else if (swap_commutative_operands_p (lhs, rhs))
3731 tem = lhs, lhs = rhs, rhs = tem;
3733 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
3734 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
3736 rtx tem_lhs, tem_rhs;
3738 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3739 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3740 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3742 if (tem && !CONSTANT_P (tem))
3743 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3745 else
3746 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3748 /* Reject "simplifications" that just wrap the two
3749 arguments in a CONST. Failure to do so can result
3750 in infinite recursion with simplify_binary_operation
3751 when it calls us to simplify CONST operations. */
3752 if (tem
3753 && ! (GET_CODE (tem) == CONST
3754 && GET_CODE (XEXP (tem, 0)) == ncode
3755 && XEXP (XEXP (tem, 0), 0) == lhs
3756 && XEXP (XEXP (tem, 0), 1) == rhs))
3758 lneg &= rneg;
3759 if (GET_CODE (tem) == NEG)
3760 tem = XEXP (tem, 0), lneg = !lneg;
3761 if (CONST_INT_P (tem) && lneg)
3762 tem = neg_const_int (mode, tem), lneg = 0;
3764 ops[i].op = tem;
3765 ops[i].neg = lneg;
3766 ops[j].op = NULL_RTX;
3767 changed = 1;
3768 canonicalized = 1;
3773 /* If nothing changed, fail. */
3774 if (!canonicalized)
3775 return NULL_RTX;
3777 /* Pack all the operands to the lower-numbered entries. */
3778 for (i = 0, j = 0; j < n_ops; j++)
3779 if (ops[j].op)
3781 ops[i] = ops[j];
3782 i++;
3784 n_ops = i;
3786 while (changed);
3788 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3789 if (n_ops == 2
3790 && CONST_INT_P (ops[1].op)
3791 && CONSTANT_P (ops[0].op)
3792 && ops[0].neg)
3793 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3795 /* We suppressed creation of trivial CONST expressions in the
3796 combination loop to avoid recursion. Create one manually now.
3797 The combination loop should have ensured that there is exactly
3798 one CONST_INT, and the sort will have ensured that it is last
3799 in the array and that any other constant will be next-to-last. */
3801 if (n_ops > 1
3802 && CONST_INT_P (ops[n_ops - 1].op)
3803 && CONSTANT_P (ops[n_ops - 2].op))
3805 rtx value = ops[n_ops - 1].op;
3806 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3807 value = neg_const_int (mode, value);
3808 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3809 n_ops--;
3812 /* Put a non-negated operand first, if possible. */
3814 for (i = 0; i < n_ops && ops[i].neg; i++)
3815 continue;
3816 if (i == n_ops)
3817 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3818 else if (i != 0)
3820 tem = ops[0].op;
3821 ops[0] = ops[i];
3822 ops[i].op = tem;
3823 ops[i].neg = 1;
3826 /* Now make the result by performing the requested operations. */
3827 result = ops[0].op;
3828 for (i = 1; i < n_ops; i++)
3829 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3830 mode, result, ops[i].op);
3832 return result;
3835 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3836 static bool
3837 plus_minus_operand_p (const_rtx x)
3839 return GET_CODE (x) == PLUS
3840 || GET_CODE (x) == MINUS
3841 || (GET_CODE (x) == CONST
3842 && GET_CODE (XEXP (x, 0)) == PLUS
3843 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3844 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3847 /* Like simplify_binary_operation except used for relational operators.
3848 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3849 not also be VOIDmode.
3851 CMP_MODE specifies in which mode the comparison is done in, so it is
3852 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3853 the operands or, if both are VOIDmode, the operands are compared in
3854 "infinite precision". */
3856 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3857 enum machine_mode cmp_mode, rtx op0, rtx op1)
3859 rtx tem, trueop0, trueop1;
3861 if (cmp_mode == VOIDmode)
3862 cmp_mode = GET_MODE (op0);
3863 if (cmp_mode == VOIDmode)
3864 cmp_mode = GET_MODE (op1);
3866 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3867 if (tem)
3869 if (SCALAR_FLOAT_MODE_P (mode))
3871 if (tem == const0_rtx)
3872 return CONST0_RTX (mode);
3873 #ifdef FLOAT_STORE_FLAG_VALUE
3875 REAL_VALUE_TYPE val;
3876 val = FLOAT_STORE_FLAG_VALUE (mode);
3877 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3879 #else
3880 return NULL_RTX;
3881 #endif
3883 if (VECTOR_MODE_P (mode))
3885 if (tem == const0_rtx)
3886 return CONST0_RTX (mode);
3887 #ifdef VECTOR_STORE_FLAG_VALUE
3889 int i, units;
3890 rtvec v;
3892 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3893 if (val == NULL_RTX)
3894 return NULL_RTX;
3895 if (val == const1_rtx)
3896 return CONST1_RTX (mode);
3898 units = GET_MODE_NUNITS (mode);
3899 v = rtvec_alloc (units);
3900 for (i = 0; i < units; i++)
3901 RTVEC_ELT (v, i) = val;
3902 return gen_rtx_raw_CONST_VECTOR (mode, v);
3904 #else
3905 return NULL_RTX;
3906 #endif
3909 return tem;
3912 /* For the following tests, ensure const0_rtx is op1. */
3913 if (swap_commutative_operands_p (op0, op1)
3914 || (op0 == const0_rtx && op1 != const0_rtx))
3915 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3917 /* If op0 is a compare, extract the comparison arguments from it. */
3918 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3919 return simplify_gen_relational (code, mode, VOIDmode,
3920 XEXP (op0, 0), XEXP (op0, 1));
3922 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
3923 || CC0_P (op0))
3924 return NULL_RTX;
3926 trueop0 = avoid_constant_pool_reference (op0);
3927 trueop1 = avoid_constant_pool_reference (op1);
3928 return simplify_relational_operation_1 (code, mode, cmp_mode,
3929 trueop0, trueop1);
3932 /* This part of simplify_relational_operation is only used when CMP_MODE
3933 is not in class MODE_CC (i.e. it is a real comparison).
3935 MODE is the mode of the result, while CMP_MODE specifies in which
3936 mode the comparison is done in, so it is the mode of the operands. */
3938 static rtx
3939 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
3940 enum machine_mode cmp_mode, rtx op0, rtx op1)
3942 enum rtx_code op0code = GET_CODE (op0);
3944 if (op1 == const0_rtx && COMPARISON_P (op0))
3946 /* If op0 is a comparison, extract the comparison arguments
3947 from it. */
3948 if (code == NE)
3950 if (GET_MODE (op0) == mode)
3951 return simplify_rtx (op0);
3952 else
3953 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
3954 XEXP (op0, 0), XEXP (op0, 1));
3956 else if (code == EQ)
3958 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
3959 if (new_code != UNKNOWN)
3960 return simplify_gen_relational (new_code, mode, VOIDmode,
3961 XEXP (op0, 0), XEXP (op0, 1));
3965 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
3966 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
3967 if ((code == LTU || code == GEU)
3968 && GET_CODE (op0) == PLUS
3969 && CONST_INT_P (XEXP (op0, 1))
3970 && (rtx_equal_p (op1, XEXP (op0, 0))
3971 || rtx_equal_p (op1, XEXP (op0, 1))))
3973 rtx new_cmp
3974 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
3975 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
3976 cmp_mode, XEXP (op0, 0), new_cmp);
3979 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
3980 if ((code == LTU || code == GEU)
3981 && GET_CODE (op0) == PLUS
3982 && rtx_equal_p (op1, XEXP (op0, 1))
3983 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
3984 && !rtx_equal_p (op1, XEXP (op0, 0)))
3985 return simplify_gen_relational (code, mode, cmp_mode, op0, XEXP (op0, 0));
3987 if (op1 == const0_rtx)
3989 /* Canonicalize (GTU x 0) as (NE x 0). */
3990 if (code == GTU)
3991 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
3992 /* Canonicalize (LEU x 0) as (EQ x 0). */
3993 if (code == LEU)
3994 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
3996 else if (op1 == const1_rtx)
3998 switch (code)
4000 case GE:
4001 /* Canonicalize (GE x 1) as (GT x 0). */
4002 return simplify_gen_relational (GT, mode, cmp_mode,
4003 op0, const0_rtx);
4004 case GEU:
4005 /* Canonicalize (GEU x 1) as (NE x 0). */
4006 return simplify_gen_relational (NE, mode, cmp_mode,
4007 op0, const0_rtx);
4008 case LT:
4009 /* Canonicalize (LT x 1) as (LE x 0). */
4010 return simplify_gen_relational (LE, mode, cmp_mode,
4011 op0, const0_rtx);
4012 case LTU:
4013 /* Canonicalize (LTU x 1) as (EQ x 0). */
4014 return simplify_gen_relational (EQ, mode, cmp_mode,
4015 op0, const0_rtx);
4016 default:
4017 break;
4020 else if (op1 == constm1_rtx)
4022 /* Canonicalize (LE x -1) as (LT x 0). */
4023 if (code == LE)
4024 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4025 /* Canonicalize (GT x -1) as (GE x 0). */
4026 if (code == GT)
4027 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4030 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4031 if ((code == EQ || code == NE)
4032 && (op0code == PLUS || op0code == MINUS)
4033 && CONSTANT_P (op1)
4034 && CONSTANT_P (XEXP (op0, 1))
4035 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4037 rtx x = XEXP (op0, 0);
4038 rtx c = XEXP (op0, 1);
4040 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
4041 cmp_mode, op1, c);
4042 return simplify_gen_relational (code, mode, cmp_mode, x, c);
4045 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4046 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4047 if (code == NE
4048 && op1 == const0_rtx
4049 && GET_MODE_CLASS (mode) == MODE_INT
4050 && cmp_mode != VOIDmode
4051 /* ??? Work-around BImode bugs in the ia64 backend. */
4052 && mode != BImode
4053 && cmp_mode != BImode
4054 && nonzero_bits (op0, cmp_mode) == 1
4055 && STORE_FLAG_VALUE == 1)
4056 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4057 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4058 : lowpart_subreg (mode, op0, cmp_mode);
4060 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4061 if ((code == EQ || code == NE)
4062 && op1 == const0_rtx
4063 && op0code == XOR)
4064 return simplify_gen_relational (code, mode, cmp_mode,
4065 XEXP (op0, 0), XEXP (op0, 1));
4067 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4068 if ((code == EQ || code == NE)
4069 && op0code == XOR
4070 && rtx_equal_p (XEXP (op0, 0), op1)
4071 && !side_effects_p (XEXP (op0, 0)))
4072 return simplify_gen_relational (code, mode, cmp_mode,
4073 XEXP (op0, 1), const0_rtx);
4075 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4076 if ((code == EQ || code == NE)
4077 && op0code == XOR
4078 && rtx_equal_p (XEXP (op0, 1), op1)
4079 && !side_effects_p (XEXP (op0, 1)))
4080 return simplify_gen_relational (code, mode, cmp_mode,
4081 XEXP (op0, 0), const0_rtx);
4083 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4084 if ((code == EQ || code == NE)
4085 && op0code == XOR
4086 && (CONST_INT_P (op1)
4087 || GET_CODE (op1) == CONST_DOUBLE)
4088 && (CONST_INT_P (XEXP (op0, 1))
4089 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
4090 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4091 simplify_gen_binary (XOR, cmp_mode,
4092 XEXP (op0, 1), op1));
4094 if (op0code == POPCOUNT && op1 == const0_rtx)
4095 switch (code)
4097 case EQ:
4098 case LE:
4099 case LEU:
4100 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4101 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4102 XEXP (op0, 0), const0_rtx);
4104 case NE:
4105 case GT:
4106 case GTU:
4107 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4108 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4109 XEXP (op0, 0), const0_rtx);
4111 default:
4112 break;
4115 return NULL_RTX;
4118 enum
4120 CMP_EQ = 1,
4121 CMP_LT = 2,
4122 CMP_GT = 4,
4123 CMP_LTU = 8,
4124 CMP_GTU = 16
4128 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4129 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4130 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4131 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4132 For floating-point comparisons, assume that the operands were ordered. */
4134 static rtx
4135 comparison_result (enum rtx_code code, int known_results)
4137 switch (code)
4139 case EQ:
4140 case UNEQ:
4141 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4142 case NE:
4143 case LTGT:
4144 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4146 case LT:
4147 case UNLT:
4148 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4149 case GE:
4150 case UNGE:
4151 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4153 case GT:
4154 case UNGT:
4155 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4156 case LE:
4157 case UNLE:
4158 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4160 case LTU:
4161 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4162 case GEU:
4163 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4165 case GTU:
4166 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4167 case LEU:
4168 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4170 case ORDERED:
4171 return const_true_rtx;
4172 case UNORDERED:
4173 return const0_rtx;
4174 default:
4175 gcc_unreachable ();
4179 /* Check if the given comparison (done in the given MODE) is actually a
4180 tautology or a contradiction.
4181 If no simplification is possible, this function returns zero.
4182 Otherwise, it returns either const_true_rtx or const0_rtx. */
4185 simplify_const_relational_operation (enum rtx_code code,
4186 enum machine_mode mode,
4187 rtx op0, rtx op1)
4189 rtx tem;
4190 rtx trueop0;
4191 rtx trueop1;
4193 gcc_assert (mode != VOIDmode
4194 || (GET_MODE (op0) == VOIDmode
4195 && GET_MODE (op1) == VOIDmode));
4197 /* If op0 is a compare, extract the comparison arguments from it. */
4198 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4200 op1 = XEXP (op0, 1);
4201 op0 = XEXP (op0, 0);
4203 if (GET_MODE (op0) != VOIDmode)
4204 mode = GET_MODE (op0);
4205 else if (GET_MODE (op1) != VOIDmode)
4206 mode = GET_MODE (op1);
4207 else
4208 return 0;
4211 /* We can't simplify MODE_CC values since we don't know what the
4212 actual comparison is. */
4213 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4214 return 0;
4216 /* Make sure the constant is second. */
4217 if (swap_commutative_operands_p (op0, op1))
4219 tem = op0, op0 = op1, op1 = tem;
4220 code = swap_condition (code);
4223 trueop0 = avoid_constant_pool_reference (op0);
4224 trueop1 = avoid_constant_pool_reference (op1);
4226 /* For integer comparisons of A and B maybe we can simplify A - B and can
4227 then simplify a comparison of that with zero. If A and B are both either
4228 a register or a CONST_INT, this can't help; testing for these cases will
4229 prevent infinite recursion here and speed things up.
4231 We can only do this for EQ and NE comparisons as otherwise we may
4232 lose or introduce overflow which we cannot disregard as undefined as
4233 we do not know the signedness of the operation on either the left or
4234 the right hand side of the comparison. */
4236 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4237 && (code == EQ || code == NE)
4238 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4239 && (REG_P (op1) || CONST_INT_P (trueop1)))
4240 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4241 /* We cannot do this if tem is a nonzero address. */
4242 && ! nonzero_address_p (tem))
4243 return simplify_const_relational_operation (signed_condition (code),
4244 mode, tem, const0_rtx);
4246 if (! HONOR_NANS (mode) && code == ORDERED)
4247 return const_true_rtx;
4249 if (! HONOR_NANS (mode) && code == UNORDERED)
4250 return const0_rtx;
4252 /* For modes without NaNs, if the two operands are equal, we know the
4253 result except if they have side-effects. Even with NaNs we know
4254 the result of unordered comparisons and, if signaling NaNs are
4255 irrelevant, also the result of LT/GT/LTGT. */
4256 if ((! HONOR_NANS (GET_MODE (trueop0))
4257 || code == UNEQ || code == UNLE || code == UNGE
4258 || ((code == LT || code == GT || code == LTGT)
4259 && ! HONOR_SNANS (GET_MODE (trueop0))))
4260 && rtx_equal_p (trueop0, trueop1)
4261 && ! side_effects_p (trueop0))
4262 return comparison_result (code, CMP_EQ);
4264 /* If the operands are floating-point constants, see if we can fold
4265 the result. */
4266 if (GET_CODE (trueop0) == CONST_DOUBLE
4267 && GET_CODE (trueop1) == CONST_DOUBLE
4268 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4270 REAL_VALUE_TYPE d0, d1;
4272 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4273 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4275 /* Comparisons are unordered iff at least one of the values is NaN. */
4276 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4277 switch (code)
4279 case UNEQ:
4280 case UNLT:
4281 case UNGT:
4282 case UNLE:
4283 case UNGE:
4284 case NE:
4285 case UNORDERED:
4286 return const_true_rtx;
4287 case EQ:
4288 case LT:
4289 case GT:
4290 case LE:
4291 case GE:
4292 case LTGT:
4293 case ORDERED:
4294 return const0_rtx;
4295 default:
4296 return 0;
4299 return comparison_result (code,
4300 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4301 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4304 /* Otherwise, see if the operands are both integers. */
4305 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4306 && (GET_CODE (trueop0) == CONST_DOUBLE
4307 || CONST_INT_P (trueop0))
4308 && (GET_CODE (trueop1) == CONST_DOUBLE
4309 || CONST_INT_P (trueop1)))
4311 int width = GET_MODE_BITSIZE (mode);
4312 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4313 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4315 /* Get the two words comprising each integer constant. */
4316 if (GET_CODE (trueop0) == CONST_DOUBLE)
4318 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4319 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4321 else
4323 l0u = l0s = INTVAL (trueop0);
4324 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4327 if (GET_CODE (trueop1) == CONST_DOUBLE)
4329 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4330 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4332 else
4334 l1u = l1s = INTVAL (trueop1);
4335 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4338 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4339 we have to sign or zero-extend the values. */
4340 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4342 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
4343 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
4345 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
4346 l0s |= ((HOST_WIDE_INT) (-1) << width);
4348 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
4349 l1s |= ((HOST_WIDE_INT) (-1) << width);
4351 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4352 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4354 if (h0u == h1u && l0u == l1u)
4355 return comparison_result (code, CMP_EQ);
4356 else
4358 int cr;
4359 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
4360 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
4361 return comparison_result (code, cr);
4365 /* Optimize comparisons with upper and lower bounds. */
4366 if (SCALAR_INT_MODE_P (mode)
4367 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4368 && CONST_INT_P (trueop1))
4370 int sign;
4371 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4372 HOST_WIDE_INT val = INTVAL (trueop1);
4373 HOST_WIDE_INT mmin, mmax;
4375 if (code == GEU
4376 || code == LEU
4377 || code == GTU
4378 || code == LTU)
4379 sign = 0;
4380 else
4381 sign = 1;
4383 /* Get a reduced range if the sign bit is zero. */
4384 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4386 mmin = 0;
4387 mmax = nonzero;
4389 else
4391 rtx mmin_rtx, mmax_rtx;
4392 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4394 mmin = INTVAL (mmin_rtx);
4395 mmax = INTVAL (mmax_rtx);
4396 if (sign)
4398 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4400 mmin >>= (sign_copies - 1);
4401 mmax >>= (sign_copies - 1);
4405 switch (code)
4407 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4408 case GEU:
4409 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4410 return const_true_rtx;
4411 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4412 return const0_rtx;
4413 break;
4414 case GE:
4415 if (val <= mmin)
4416 return const_true_rtx;
4417 if (val > mmax)
4418 return const0_rtx;
4419 break;
4421 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4422 case LEU:
4423 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4424 return const_true_rtx;
4425 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4426 return const0_rtx;
4427 break;
4428 case LE:
4429 if (val >= mmax)
4430 return const_true_rtx;
4431 if (val < mmin)
4432 return const0_rtx;
4433 break;
4435 case EQ:
4436 /* x == y is always false for y out of range. */
4437 if (val < mmin || val > mmax)
4438 return const0_rtx;
4439 break;
4441 /* x > y is always false for y >= mmax, always true for y < mmin. */
4442 case GTU:
4443 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4444 return const0_rtx;
4445 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4446 return const_true_rtx;
4447 break;
4448 case GT:
4449 if (val >= mmax)
4450 return const0_rtx;
4451 if (val < mmin)
4452 return const_true_rtx;
4453 break;
4455 /* x < y is always false for y <= mmin, always true for y > mmax. */
4456 case LTU:
4457 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4458 return const0_rtx;
4459 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4460 return const_true_rtx;
4461 break;
4462 case LT:
4463 if (val <= mmin)
4464 return const0_rtx;
4465 if (val > mmax)
4466 return const_true_rtx;
4467 break;
4469 case NE:
4470 /* x != y is always true for y out of range. */
4471 if (val < mmin || val > mmax)
4472 return const_true_rtx;
4473 break;
4475 default:
4476 break;
4480 /* Optimize integer comparisons with zero. */
4481 if (trueop1 == const0_rtx)
4483 /* Some addresses are known to be nonzero. We don't know
4484 their sign, but equality comparisons are known. */
4485 if (nonzero_address_p (trueop0))
4487 if (code == EQ || code == LEU)
4488 return const0_rtx;
4489 if (code == NE || code == GTU)
4490 return const_true_rtx;
4493 /* See if the first operand is an IOR with a constant. If so, we
4494 may be able to determine the result of this comparison. */
4495 if (GET_CODE (op0) == IOR)
4497 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
4498 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
4500 int sign_bitnum = GET_MODE_BITSIZE (mode) - 1;
4501 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
4502 && (INTVAL (inner_const)
4503 & ((HOST_WIDE_INT) 1 << sign_bitnum)));
4505 switch (code)
4507 case EQ:
4508 case LEU:
4509 return const0_rtx;
4510 case NE:
4511 case GTU:
4512 return const_true_rtx;
4513 case LT:
4514 case LE:
4515 if (has_sign)
4516 return const_true_rtx;
4517 break;
4518 case GT:
4519 case GE:
4520 if (has_sign)
4521 return const0_rtx;
4522 break;
4523 default:
4524 break;
4530 /* Optimize comparison of ABS with zero. */
4531 if (trueop1 == CONST0_RTX (mode)
4532 && (GET_CODE (trueop0) == ABS
4533 || (GET_CODE (trueop0) == FLOAT_EXTEND
4534 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
4536 switch (code)
4538 case LT:
4539 /* Optimize abs(x) < 0.0. */
4540 if (!HONOR_SNANS (mode)
4541 && (!INTEGRAL_MODE_P (mode)
4542 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4544 if (INTEGRAL_MODE_P (mode)
4545 && (issue_strict_overflow_warning
4546 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4547 warning (OPT_Wstrict_overflow,
4548 ("assuming signed overflow does not occur when "
4549 "assuming abs (x) < 0 is false"));
4550 return const0_rtx;
4552 break;
4554 case GE:
4555 /* Optimize abs(x) >= 0.0. */
4556 if (!HONOR_NANS (mode)
4557 && (!INTEGRAL_MODE_P (mode)
4558 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4560 if (INTEGRAL_MODE_P (mode)
4561 && (issue_strict_overflow_warning
4562 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4563 warning (OPT_Wstrict_overflow,
4564 ("assuming signed overflow does not occur when "
4565 "assuming abs (x) >= 0 is true"));
4566 return const_true_rtx;
4568 break;
4570 case UNGE:
4571 /* Optimize ! (abs(x) < 0.0). */
4572 return const_true_rtx;
4574 default:
4575 break;
4579 return 0;
4582 /* Simplify CODE, an operation with result mode MODE and three operands,
4583 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4584 a constant. Return 0 if no simplifications is possible. */
4587 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4588 enum machine_mode op0_mode, rtx op0, rtx op1,
4589 rtx op2)
4591 unsigned int width = GET_MODE_BITSIZE (mode);
4593 /* VOIDmode means "infinite" precision. */
4594 if (width == 0)
4595 width = HOST_BITS_PER_WIDE_INT;
4597 switch (code)
4599 case SIGN_EXTRACT:
4600 case ZERO_EXTRACT:
4601 if (CONST_INT_P (op0)
4602 && CONST_INT_P (op1)
4603 && CONST_INT_P (op2)
4604 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4605 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4607 /* Extracting a bit-field from a constant */
4608 HOST_WIDE_INT val = INTVAL (op0);
4610 if (BITS_BIG_ENDIAN)
4611 val >>= (GET_MODE_BITSIZE (op0_mode)
4612 - INTVAL (op2) - INTVAL (op1));
4613 else
4614 val >>= INTVAL (op2);
4616 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4618 /* First zero-extend. */
4619 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4620 /* If desired, propagate sign bit. */
4621 if (code == SIGN_EXTRACT
4622 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4623 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4626 /* Clear the bits that don't belong in our mode,
4627 unless they and our sign bit are all one.
4628 So we get either a reasonable negative value or a reasonable
4629 unsigned value for this mode. */
4630 if (width < HOST_BITS_PER_WIDE_INT
4631 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4632 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4633 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4635 return gen_int_mode (val, mode);
4637 break;
4639 case IF_THEN_ELSE:
4640 if (CONST_INT_P (op0))
4641 return op0 != const0_rtx ? op1 : op2;
4643 /* Convert c ? a : a into "a". */
4644 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4645 return op1;
4647 /* Convert a != b ? a : b into "a". */
4648 if (GET_CODE (op0) == NE
4649 && ! side_effects_p (op0)
4650 && ! HONOR_NANS (mode)
4651 && ! HONOR_SIGNED_ZEROS (mode)
4652 && ((rtx_equal_p (XEXP (op0, 0), op1)
4653 && rtx_equal_p (XEXP (op0, 1), op2))
4654 || (rtx_equal_p (XEXP (op0, 0), op2)
4655 && rtx_equal_p (XEXP (op0, 1), op1))))
4656 return op1;
4658 /* Convert a == b ? a : b into "b". */
4659 if (GET_CODE (op0) == EQ
4660 && ! side_effects_p (op0)
4661 && ! HONOR_NANS (mode)
4662 && ! HONOR_SIGNED_ZEROS (mode)
4663 && ((rtx_equal_p (XEXP (op0, 0), op1)
4664 && rtx_equal_p (XEXP (op0, 1), op2))
4665 || (rtx_equal_p (XEXP (op0, 0), op2)
4666 && rtx_equal_p (XEXP (op0, 1), op1))))
4667 return op2;
4669 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4671 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4672 ? GET_MODE (XEXP (op0, 1))
4673 : GET_MODE (XEXP (op0, 0)));
4674 rtx temp;
4676 /* Look for happy constants in op1 and op2. */
4677 if (CONST_INT_P (op1) && CONST_INT_P (op2))
4679 HOST_WIDE_INT t = INTVAL (op1);
4680 HOST_WIDE_INT f = INTVAL (op2);
4682 if (t == STORE_FLAG_VALUE && f == 0)
4683 code = GET_CODE (op0);
4684 else if (t == 0 && f == STORE_FLAG_VALUE)
4686 enum rtx_code tmp;
4687 tmp = reversed_comparison_code (op0, NULL_RTX);
4688 if (tmp == UNKNOWN)
4689 break;
4690 code = tmp;
4692 else
4693 break;
4695 return simplify_gen_relational (code, mode, cmp_mode,
4696 XEXP (op0, 0), XEXP (op0, 1));
4699 if (cmp_mode == VOIDmode)
4700 cmp_mode = op0_mode;
4701 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4702 cmp_mode, XEXP (op0, 0),
4703 XEXP (op0, 1));
4705 /* See if any simplifications were possible. */
4706 if (temp)
4708 if (CONST_INT_P (temp))
4709 return temp == const0_rtx ? op2 : op1;
4710 else if (temp)
4711 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4714 break;
4716 case VEC_MERGE:
4717 gcc_assert (GET_MODE (op0) == mode);
4718 gcc_assert (GET_MODE (op1) == mode);
4719 gcc_assert (VECTOR_MODE_P (mode));
4720 op2 = avoid_constant_pool_reference (op2);
4721 if (CONST_INT_P (op2))
4723 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4724 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4725 int mask = (1 << n_elts) - 1;
4727 if (!(INTVAL (op2) & mask))
4728 return op1;
4729 if ((INTVAL (op2) & mask) == mask)
4730 return op0;
4732 op0 = avoid_constant_pool_reference (op0);
4733 op1 = avoid_constant_pool_reference (op1);
4734 if (GET_CODE (op0) == CONST_VECTOR
4735 && GET_CODE (op1) == CONST_VECTOR)
4737 rtvec v = rtvec_alloc (n_elts);
4738 unsigned int i;
4740 for (i = 0; i < n_elts; i++)
4741 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4742 ? CONST_VECTOR_ELT (op0, i)
4743 : CONST_VECTOR_ELT (op1, i));
4744 return gen_rtx_CONST_VECTOR (mode, v);
4747 break;
4749 default:
4750 gcc_unreachable ();
4753 return 0;
4756 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
4757 or CONST_VECTOR,
4758 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
4760 Works by unpacking OP into a collection of 8-bit values
4761 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4762 and then repacking them again for OUTERMODE. */
4764 static rtx
4765 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4766 enum machine_mode innermode, unsigned int byte)
4768 /* We support up to 512-bit values (for V8DFmode). */
4769 enum {
4770 max_bitsize = 512,
4771 value_bit = 8,
4772 value_mask = (1 << value_bit) - 1
4774 unsigned char value[max_bitsize / value_bit];
4775 int value_start;
4776 int i;
4777 int elem;
4779 int num_elem;
4780 rtx * elems;
4781 int elem_bitsize;
4782 rtx result_s;
4783 rtvec result_v = NULL;
4784 enum mode_class outer_class;
4785 enum machine_mode outer_submode;
4787 /* Some ports misuse CCmode. */
4788 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
4789 return op;
4791 /* We have no way to represent a complex constant at the rtl level. */
4792 if (COMPLEX_MODE_P (outermode))
4793 return NULL_RTX;
4795 /* Unpack the value. */
4797 if (GET_CODE (op) == CONST_VECTOR)
4799 num_elem = CONST_VECTOR_NUNITS (op);
4800 elems = &CONST_VECTOR_ELT (op, 0);
4801 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4803 else
4805 num_elem = 1;
4806 elems = &op;
4807 elem_bitsize = max_bitsize;
4809 /* If this asserts, it is too complicated; reducing value_bit may help. */
4810 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4811 /* I don't know how to handle endianness of sub-units. */
4812 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4814 for (elem = 0; elem < num_elem; elem++)
4816 unsigned char * vp;
4817 rtx el = elems[elem];
4819 /* Vectors are kept in target memory order. (This is probably
4820 a mistake.) */
4822 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4823 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4824 / BITS_PER_UNIT);
4825 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4826 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4827 unsigned bytele = (subword_byte % UNITS_PER_WORD
4828 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4829 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4832 switch (GET_CODE (el))
4834 case CONST_INT:
4835 for (i = 0;
4836 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4837 i += value_bit)
4838 *vp++ = INTVAL (el) >> i;
4839 /* CONST_INTs are always logically sign-extended. */
4840 for (; i < elem_bitsize; i += value_bit)
4841 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4842 break;
4844 case CONST_DOUBLE:
4845 if (GET_MODE (el) == VOIDmode)
4847 /* If this triggers, someone should have generated a
4848 CONST_INT instead. */
4849 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4851 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4852 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4853 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4855 *vp++
4856 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4857 i += value_bit;
4859 /* It shouldn't matter what's done here, so fill it with
4860 zero. */
4861 for (; i < elem_bitsize; i += value_bit)
4862 *vp++ = 0;
4864 else
4866 long tmp[max_bitsize / 32];
4867 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4869 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4870 gcc_assert (bitsize <= elem_bitsize);
4871 gcc_assert (bitsize % value_bit == 0);
4873 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4874 GET_MODE (el));
4876 /* real_to_target produces its result in words affected by
4877 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4878 and use WORDS_BIG_ENDIAN instead; see the documentation
4879 of SUBREG in rtl.texi. */
4880 for (i = 0; i < bitsize; i += value_bit)
4882 int ibase;
4883 if (WORDS_BIG_ENDIAN)
4884 ibase = bitsize - 1 - i;
4885 else
4886 ibase = i;
4887 *vp++ = tmp[ibase / 32] >> i % 32;
4890 /* It shouldn't matter what's done here, so fill it with
4891 zero. */
4892 for (; i < elem_bitsize; i += value_bit)
4893 *vp++ = 0;
4895 break;
4897 case CONST_FIXED:
4898 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4900 for (i = 0; i < elem_bitsize; i += value_bit)
4901 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
4903 else
4905 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4906 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
4907 for (; i < 2 * HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4908 i += value_bit)
4909 *vp++ = CONST_FIXED_VALUE_HIGH (el)
4910 >> (i - HOST_BITS_PER_WIDE_INT);
4911 for (; i < elem_bitsize; i += value_bit)
4912 *vp++ = 0;
4914 break;
4916 default:
4917 gcc_unreachable ();
4921 /* Now, pick the right byte to start with. */
4922 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4923 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4924 will already have offset 0. */
4925 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4927 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4928 - byte);
4929 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4930 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4931 byte = (subword_byte % UNITS_PER_WORD
4932 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4935 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4936 so if it's become negative it will instead be very large.) */
4937 gcc_assert (byte < GET_MODE_SIZE (innermode));
4939 /* Convert from bytes to chunks of size value_bit. */
4940 value_start = byte * (BITS_PER_UNIT / value_bit);
4942 /* Re-pack the value. */
4944 if (VECTOR_MODE_P (outermode))
4946 num_elem = GET_MODE_NUNITS (outermode);
4947 result_v = rtvec_alloc (num_elem);
4948 elems = &RTVEC_ELT (result_v, 0);
4949 outer_submode = GET_MODE_INNER (outermode);
4951 else
4953 num_elem = 1;
4954 elems = &result_s;
4955 outer_submode = outermode;
4958 outer_class = GET_MODE_CLASS (outer_submode);
4959 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
4961 gcc_assert (elem_bitsize % value_bit == 0);
4962 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
4964 for (elem = 0; elem < num_elem; elem++)
4966 unsigned char *vp;
4968 /* Vectors are stored in target memory order. (This is probably
4969 a mistake.) */
4971 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4972 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4973 / BITS_PER_UNIT);
4974 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4975 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4976 unsigned bytele = (subword_byte % UNITS_PER_WORD
4977 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4978 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
4981 switch (outer_class)
4983 case MODE_INT:
4984 case MODE_PARTIAL_INT:
4986 unsigned HOST_WIDE_INT hi = 0, lo = 0;
4988 for (i = 0;
4989 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4990 i += value_bit)
4991 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
4992 for (; i < elem_bitsize; i += value_bit)
4993 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
4994 << (i - HOST_BITS_PER_WIDE_INT));
4996 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4997 know why. */
4998 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4999 elems[elem] = gen_int_mode (lo, outer_submode);
5000 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
5001 elems[elem] = immed_double_const (lo, hi, outer_submode);
5002 else
5003 return NULL_RTX;
5005 break;
5007 case MODE_FLOAT:
5008 case MODE_DECIMAL_FLOAT:
5010 REAL_VALUE_TYPE r;
5011 long tmp[max_bitsize / 32];
5013 /* real_from_target wants its input in words affected by
5014 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5015 and use WORDS_BIG_ENDIAN instead; see the documentation
5016 of SUBREG in rtl.texi. */
5017 for (i = 0; i < max_bitsize / 32; i++)
5018 tmp[i] = 0;
5019 for (i = 0; i < elem_bitsize; i += value_bit)
5021 int ibase;
5022 if (WORDS_BIG_ENDIAN)
5023 ibase = elem_bitsize - 1 - i;
5024 else
5025 ibase = i;
5026 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5029 real_from_target (&r, tmp, outer_submode);
5030 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5032 break;
5034 case MODE_FRACT:
5035 case MODE_UFRACT:
5036 case MODE_ACCUM:
5037 case MODE_UACCUM:
5039 FIXED_VALUE_TYPE f;
5040 f.data.low = 0;
5041 f.data.high = 0;
5042 f.mode = outer_submode;
5044 for (i = 0;
5045 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5046 i += value_bit)
5047 f.data.low |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
5048 for (; i < elem_bitsize; i += value_bit)
5049 f.data.high |= ((HOST_WIDE_INT)(*vp++ & value_mask)
5050 << (i - HOST_BITS_PER_WIDE_INT));
5052 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5054 break;
5056 default:
5057 gcc_unreachable ();
5060 if (VECTOR_MODE_P (outermode))
5061 return gen_rtx_CONST_VECTOR (outermode, result_v);
5062 else
5063 return result_s;
5066 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5067 Return 0 if no simplifications are possible. */
5069 simplify_subreg (enum machine_mode outermode, rtx op,
5070 enum machine_mode innermode, unsigned int byte)
5072 /* Little bit of sanity checking. */
5073 gcc_assert (innermode != VOIDmode);
5074 gcc_assert (outermode != VOIDmode);
5075 gcc_assert (innermode != BLKmode);
5076 gcc_assert (outermode != BLKmode);
5078 gcc_assert (GET_MODE (op) == innermode
5079 || GET_MODE (op) == VOIDmode);
5081 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
5082 gcc_assert (byte < GET_MODE_SIZE (innermode));
5084 if (outermode == innermode && !byte)
5085 return op;
5087 if (CONST_INT_P (op)
5088 || GET_CODE (op) == CONST_DOUBLE
5089 || GET_CODE (op) == CONST_FIXED
5090 || GET_CODE (op) == CONST_VECTOR)
5091 return simplify_immed_subreg (outermode, op, innermode, byte);
5093 /* Changing mode twice with SUBREG => just change it once,
5094 or not at all if changing back op starting mode. */
5095 if (GET_CODE (op) == SUBREG)
5097 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5098 int final_offset = byte + SUBREG_BYTE (op);
5099 rtx newx;
5101 if (outermode == innermostmode
5102 && byte == 0 && SUBREG_BYTE (op) == 0)
5103 return SUBREG_REG (op);
5105 /* The SUBREG_BYTE represents offset, as if the value were stored
5106 in memory. Irritating exception is paradoxical subreg, where
5107 we define SUBREG_BYTE to be 0. On big endian machines, this
5108 value should be negative. For a moment, undo this exception. */
5109 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5111 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5112 if (WORDS_BIG_ENDIAN)
5113 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5114 if (BYTES_BIG_ENDIAN)
5115 final_offset += difference % UNITS_PER_WORD;
5117 if (SUBREG_BYTE (op) == 0
5118 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5120 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5121 if (WORDS_BIG_ENDIAN)
5122 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5123 if (BYTES_BIG_ENDIAN)
5124 final_offset += difference % UNITS_PER_WORD;
5127 /* See whether resulting subreg will be paradoxical. */
5128 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5130 /* In nonparadoxical subregs we can't handle negative offsets. */
5131 if (final_offset < 0)
5132 return NULL_RTX;
5133 /* Bail out in case resulting subreg would be incorrect. */
5134 if (final_offset % GET_MODE_SIZE (outermode)
5135 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5136 return NULL_RTX;
5138 else
5140 int offset = 0;
5141 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5143 /* In paradoxical subreg, see if we are still looking on lower part.
5144 If so, our SUBREG_BYTE will be 0. */
5145 if (WORDS_BIG_ENDIAN)
5146 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5147 if (BYTES_BIG_ENDIAN)
5148 offset += difference % UNITS_PER_WORD;
5149 if (offset == final_offset)
5150 final_offset = 0;
5151 else
5152 return NULL_RTX;
5155 /* Recurse for further possible simplifications. */
5156 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5157 final_offset);
5158 if (newx)
5159 return newx;
5160 if (validate_subreg (outermode, innermostmode,
5161 SUBREG_REG (op), final_offset))
5163 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5164 if (SUBREG_PROMOTED_VAR_P (op)
5165 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5166 && GET_MODE_CLASS (outermode) == MODE_INT
5167 && IN_RANGE (GET_MODE_SIZE (outermode),
5168 GET_MODE_SIZE (innermode),
5169 GET_MODE_SIZE (innermostmode))
5170 && subreg_lowpart_p (newx))
5172 SUBREG_PROMOTED_VAR_P (newx) = 1;
5173 SUBREG_PROMOTED_UNSIGNED_SET
5174 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5176 return newx;
5178 return NULL_RTX;
5181 /* Merge implicit and explicit truncations. */
5183 if (GET_CODE (op) == TRUNCATE
5184 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
5185 && subreg_lowpart_offset (outermode, innermode) == byte)
5186 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
5187 GET_MODE (XEXP (op, 0)));
5189 /* SUBREG of a hard register => just change the register number
5190 and/or mode. If the hard register is not valid in that mode,
5191 suppress this simplification. If the hard register is the stack,
5192 frame, or argument pointer, leave this as a SUBREG. */
5194 if (REG_P (op) && HARD_REGISTER_P (op))
5196 unsigned int regno, final_regno;
5198 regno = REGNO (op);
5199 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5200 if (HARD_REGISTER_NUM_P (final_regno))
5202 rtx x;
5203 int final_offset = byte;
5205 /* Adjust offset for paradoxical subregs. */
5206 if (byte == 0
5207 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5209 int difference = (GET_MODE_SIZE (innermode)
5210 - GET_MODE_SIZE (outermode));
5211 if (WORDS_BIG_ENDIAN)
5212 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5213 if (BYTES_BIG_ENDIAN)
5214 final_offset += difference % UNITS_PER_WORD;
5217 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5219 /* Propagate original regno. We don't have any way to specify
5220 the offset inside original regno, so do so only for lowpart.
5221 The information is used only by alias analysis that can not
5222 grog partial register anyway. */
5224 if (subreg_lowpart_offset (outermode, innermode) == byte)
5225 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5226 return x;
5230 /* If we have a SUBREG of a register that we are replacing and we are
5231 replacing it with a MEM, make a new MEM and try replacing the
5232 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5233 or if we would be widening it. */
5235 if (MEM_P (op)
5236 && ! mode_dependent_address_p (XEXP (op, 0))
5237 /* Allow splitting of volatile memory references in case we don't
5238 have instruction to move the whole thing. */
5239 && (! MEM_VOLATILE_P (op)
5240 || ! have_insn_for (SET, innermode))
5241 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5242 return adjust_address_nv (op, outermode, byte);
5244 /* Handle complex values represented as CONCAT
5245 of real and imaginary part. */
5246 if (GET_CODE (op) == CONCAT)
5248 unsigned int part_size, final_offset;
5249 rtx part, res;
5251 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5252 if (byte < part_size)
5254 part = XEXP (op, 0);
5255 final_offset = byte;
5257 else
5259 part = XEXP (op, 1);
5260 final_offset = byte - part_size;
5263 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5264 return NULL_RTX;
5266 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5267 if (res)
5268 return res;
5269 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5270 return gen_rtx_SUBREG (outermode, part, final_offset);
5271 return NULL_RTX;
5274 /* Optimize SUBREG truncations of zero and sign extended values. */
5275 if ((GET_CODE (op) == ZERO_EXTEND
5276 || GET_CODE (op) == SIGN_EXTEND)
5277 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
5279 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5281 /* If we're requesting the lowpart of a zero or sign extension,
5282 there are three possibilities. If the outermode is the same
5283 as the origmode, we can omit both the extension and the subreg.
5284 If the outermode is not larger than the origmode, we can apply
5285 the truncation without the extension. Finally, if the outermode
5286 is larger than the origmode, but both are integer modes, we
5287 can just extend to the appropriate mode. */
5288 if (bitpos == 0)
5290 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
5291 if (outermode == origmode)
5292 return XEXP (op, 0);
5293 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
5294 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
5295 subreg_lowpart_offset (outermode,
5296 origmode));
5297 if (SCALAR_INT_MODE_P (outermode))
5298 return simplify_gen_unary (GET_CODE (op), outermode,
5299 XEXP (op, 0), origmode);
5302 /* A SUBREG resulting from a zero extension may fold to zero if
5303 it extracts higher bits that the ZERO_EXTEND's source bits. */
5304 if (GET_CODE (op) == ZERO_EXTEND
5305 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
5306 return CONST0_RTX (outermode);
5309 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5310 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5311 the outer subreg is effectively a truncation to the original mode. */
5312 if ((GET_CODE (op) == LSHIFTRT
5313 || GET_CODE (op) == ASHIFTRT)
5314 && SCALAR_INT_MODE_P (outermode)
5315 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5316 to avoid the possibility that an outer LSHIFTRT shifts by more
5317 than the sign extension's sign_bit_copies and introduces zeros
5318 into the high bits of the result. */
5319 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
5320 && CONST_INT_P (XEXP (op, 1))
5321 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
5322 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5323 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5324 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5325 return simplify_gen_binary (ASHIFTRT, outermode,
5326 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5328 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5329 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5330 the outer subreg is effectively a truncation to the original mode. */
5331 if ((GET_CODE (op) == LSHIFTRT
5332 || GET_CODE (op) == ASHIFTRT)
5333 && SCALAR_INT_MODE_P (outermode)
5334 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5335 && CONST_INT_P (XEXP (op, 1))
5336 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5337 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5338 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5339 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5340 return simplify_gen_binary (LSHIFTRT, outermode,
5341 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5343 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5344 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5345 the outer subreg is effectively a truncation to the original mode. */
5346 if (GET_CODE (op) == ASHIFT
5347 && SCALAR_INT_MODE_P (outermode)
5348 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5349 && CONST_INT_P (XEXP (op, 1))
5350 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5351 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
5352 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5353 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5354 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5355 return simplify_gen_binary (ASHIFT, outermode,
5356 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5358 /* Recognize a word extraction from a multi-word subreg. */
5359 if ((GET_CODE (op) == LSHIFTRT
5360 || GET_CODE (op) == ASHIFTRT)
5361 && SCALAR_INT_MODE_P (outermode)
5362 && GET_MODE_BITSIZE (outermode) >= BITS_PER_WORD
5363 && GET_MODE_BITSIZE (innermode) >= (2 * GET_MODE_BITSIZE (outermode))
5364 && CONST_INT_P (XEXP (op, 1))
5365 && (INTVAL (XEXP (op, 1)) & (GET_MODE_BITSIZE (outermode) - 1)) == 0
5366 && INTVAL (XEXP (op, 1)) >= 0
5367 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
5368 && byte == subreg_lowpart_offset (outermode, innermode))
5370 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5371 return simplify_gen_subreg (outermode, XEXP (op, 0), innermode,
5372 (WORDS_BIG_ENDIAN
5373 ? byte - shifted_bytes
5374 : byte + shifted_bytes));
5377 return NULL_RTX;
5380 /* Make a SUBREG operation or equivalent if it folds. */
5383 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5384 enum machine_mode innermode, unsigned int byte)
5386 rtx newx;
5388 newx = simplify_subreg (outermode, op, innermode, byte);
5389 if (newx)
5390 return newx;
5392 if (GET_CODE (op) == SUBREG
5393 || GET_CODE (op) == CONCAT
5394 || GET_MODE (op) == VOIDmode)
5395 return NULL_RTX;
5397 if (validate_subreg (outermode, innermode, op, byte))
5398 return gen_rtx_SUBREG (outermode, op, byte);
5400 return NULL_RTX;
5403 /* Simplify X, an rtx expression.
5405 Return the simplified expression or NULL if no simplifications
5406 were possible.
5408 This is the preferred entry point into the simplification routines;
5409 however, we still allow passes to call the more specific routines.
5411 Right now GCC has three (yes, three) major bodies of RTL simplification
5412 code that need to be unified.
5414 1. fold_rtx in cse.c. This code uses various CSE specific
5415 information to aid in RTL simplification.
5417 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5418 it uses combine specific information to aid in RTL
5419 simplification.
5421 3. The routines in this file.
5424 Long term we want to only have one body of simplification code; to
5425 get to that state I recommend the following steps:
5427 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5428 which are not pass dependent state into these routines.
5430 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5431 use this routine whenever possible.
5433 3. Allow for pass dependent state to be provided to these
5434 routines and add simplifications based on the pass dependent
5435 state. Remove code from cse.c & combine.c that becomes
5436 redundant/dead.
5438 It will take time, but ultimately the compiler will be easier to
5439 maintain and improve. It's totally silly that when we add a
5440 simplification that it needs to be added to 4 places (3 for RTL
5441 simplification and 1 for tree simplification. */
5444 simplify_rtx (const_rtx x)
5446 const enum rtx_code code = GET_CODE (x);
5447 const enum machine_mode mode = GET_MODE (x);
5449 switch (GET_RTX_CLASS (code))
5451 case RTX_UNARY:
5452 return simplify_unary_operation (code, mode,
5453 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5454 case RTX_COMM_ARITH:
5455 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5456 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5458 /* Fall through.... */
5460 case RTX_BIN_ARITH:
5461 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5463 case RTX_TERNARY:
5464 case RTX_BITFIELD_OPS:
5465 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5466 XEXP (x, 0), XEXP (x, 1),
5467 XEXP (x, 2));
5469 case RTX_COMPARE:
5470 case RTX_COMM_COMPARE:
5471 return simplify_relational_operation (code, mode,
5472 ((GET_MODE (XEXP (x, 0))
5473 != VOIDmode)
5474 ? GET_MODE (XEXP (x, 0))
5475 : GET_MODE (XEXP (x, 1))),
5476 XEXP (x, 0),
5477 XEXP (x, 1));
5479 case RTX_EXTRA:
5480 if (code == SUBREG)
5481 return simplify_subreg (mode, SUBREG_REG (x),
5482 GET_MODE (SUBREG_REG (x)),
5483 SUBREG_BYTE (x));
5484 break;
5486 case RTX_OBJ:
5487 if (code == LO_SUM)
5489 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5490 if (GET_CODE (XEXP (x, 0)) == HIGH
5491 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5492 return XEXP (x, 1);
5494 break;
5496 default:
5497 break;
5499 return NULL;