Move C tests for used attribute to c-c++-common.
[official-gcc.git] / gcc / simplify-rtx.c
blob5ad5b1aceb9cbee4ec473d8588ba8d70d9d67976
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, const_rtx);
53 static bool plus_minus_operand_p (const_rtx);
54 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
57 unsigned int);
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
59 rtx, rtx);
60 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
61 enum machine_mode, rtx, rtx);
62 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
63 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
64 rtx, rtx, rtx, rtx);
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
68 static rtx
69 neg_const_int (enum machine_mode mode, const_rtx i)
71 return gen_int_mode (- INTVAL (i), mode);
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
77 bool
78 mode_signbit_p (enum machine_mode mode, const_rtx x)
80 unsigned HOST_WIDE_INT val;
81 unsigned int width;
83 if (GET_MODE_CLASS (mode) != MODE_INT)
84 return false;
86 width = GET_MODE_BITSIZE (mode);
87 if (width == 0)
88 return false;
90 if (width <= HOST_BITS_PER_WIDE_INT
91 && CONST_INT_P (x))
92 val = INTVAL (x);
93 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x) == 0)
97 val = CONST_DOUBLE_HIGH (x);
98 width -= HOST_BITS_PER_WIDE_INT;
100 else
101 return false;
103 if (width < HOST_BITS_PER_WIDE_INT)
104 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
105 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
112 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
113 rtx op1)
115 rtx tem;
117 /* If this simplifies, do it. */
118 tem = simplify_binary_operation (code, mode, op0, op1);
119 if (tem)
120 return tem;
122 /* Put complex operands first and constants second if commutative. */
123 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
124 && swap_commutative_operands_p (op0, op1))
125 tem = op0, op0 = op1, op1 = tem;
127 return gen_rtx_fmt_ee (code, mode, op0, op1);
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
133 avoid_constant_pool_reference (rtx x)
135 rtx c, tmp, addr;
136 enum machine_mode cmode;
137 HOST_WIDE_INT offset = 0;
139 switch (GET_CODE (x))
141 case MEM:
142 break;
144 case FLOAT_EXTEND:
145 /* Handle float extensions of constant pool references. */
146 tmp = XEXP (x, 0);
147 c = avoid_constant_pool_reference (tmp);
148 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
150 REAL_VALUE_TYPE d;
152 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
155 return x;
157 default:
158 return x;
161 if (GET_MODE (x) == BLKmode)
162 return x;
164 addr = XEXP (x, 0);
166 /* Call target hook to avoid the effects of -fpic etc.... */
167 addr = targetm.delegitimize_address (addr);
169 /* Split the address into a base and integer offset. */
170 if (GET_CODE (addr) == CONST
171 && GET_CODE (XEXP (addr, 0)) == PLUS
172 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
174 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
175 addr = XEXP (XEXP (addr, 0), 0);
178 if (GET_CODE (addr) == LO_SUM)
179 addr = XEXP (addr, 1);
181 /* If this is a constant pool reference, we can turn it into its
182 constant and hope that simplifications happen. */
183 if (GET_CODE (addr) == SYMBOL_REF
184 && CONSTANT_POOL_ADDRESS_P (addr))
186 c = get_pool_constant (addr);
187 cmode = get_pool_mode (addr);
189 /* If we're accessing the constant in a different mode than it was
190 originally stored, attempt to fix that up via subreg simplifications.
191 If that fails we have no choice but to return the original memory. */
192 if (offset != 0 || cmode != GET_MODE (x))
194 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
195 if (tem && CONSTANT_P (tem))
196 return tem;
198 else
199 return c;
202 return x;
205 /* Simplify a MEM based on its attributes. This is the default
206 delegitimize_address target hook, and it's recommended that every
207 overrider call it. */
210 delegitimize_mem_from_attrs (rtx x)
212 if (MEM_P (x)
213 && MEM_EXPR (x)
214 && (!MEM_OFFSET (x)
215 || GET_CODE (MEM_OFFSET (x)) == CONST_INT))
217 tree decl = MEM_EXPR (x);
218 enum machine_mode mode = GET_MODE (x);
219 HOST_WIDE_INT offset = 0;
221 switch (TREE_CODE (decl))
223 default:
224 decl = NULL;
225 break;
227 case VAR_DECL:
228 break;
230 case ARRAY_REF:
231 case ARRAY_RANGE_REF:
232 case COMPONENT_REF:
233 case BIT_FIELD_REF:
234 case REALPART_EXPR:
235 case IMAGPART_EXPR:
236 case VIEW_CONVERT_EXPR:
238 HOST_WIDE_INT bitsize, bitpos;
239 tree toffset;
240 int unsignedp = 0, volatilep = 0;
242 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
243 &mode, &unsignedp, &volatilep, false);
244 if (bitsize != GET_MODE_BITSIZE (mode)
245 || (bitpos % BITS_PER_UNIT)
246 || (toffset && !host_integerp (toffset, 0)))
247 decl = NULL;
248 else
250 offset += bitpos / BITS_PER_UNIT;
251 if (toffset)
252 offset += TREE_INT_CST_LOW (toffset);
254 break;
258 if (decl
259 && mode == GET_MODE (x)
260 && TREE_CODE (decl) == VAR_DECL
261 && (TREE_STATIC (decl)
262 || DECL_THREAD_LOCAL_P (decl))
263 && DECL_RTL_SET_P (decl)
264 && MEM_P (DECL_RTL (decl)))
266 rtx newx;
268 if (MEM_OFFSET (x))
269 offset += INTVAL (MEM_OFFSET (x));
271 newx = DECL_RTL (decl);
273 if (MEM_P (newx))
275 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
277 /* Avoid creating a new MEM needlessly if we already had
278 the same address. We do if there's no OFFSET and the
279 old address X is identical to NEWX, or if X is of the
280 form (plus NEWX OFFSET), or the NEWX is of the form
281 (plus Y (const_int Z)) and X is that with the offset
282 added: (plus Y (const_int Z+OFFSET)). */
283 if (!((offset == 0
284 || (GET_CODE (o) == PLUS
285 && GET_CODE (XEXP (o, 1)) == CONST_INT
286 && (offset == INTVAL (XEXP (o, 1))
287 || (GET_CODE (n) == PLUS
288 && GET_CODE (XEXP (n, 1)) == CONST_INT
289 && (INTVAL (XEXP (n, 1)) + offset
290 == INTVAL (XEXP (o, 1)))
291 && (n = XEXP (n, 0))))
292 && (o = XEXP (o, 0))))
293 && rtx_equal_p (o, n)))
294 x = adjust_address_nv (newx, mode, offset);
296 else if (GET_MODE (x) == GET_MODE (newx)
297 && offset == 0)
298 x = newx;
302 return x;
305 /* Make a unary operation by first seeing if it folds and otherwise making
306 the specified operation. */
309 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
310 enum machine_mode op_mode)
312 rtx tem;
314 /* If this simplifies, use it. */
315 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
316 return tem;
318 return gen_rtx_fmt_e (code, mode, op);
321 /* Likewise for ternary operations. */
324 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
325 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
327 rtx tem;
329 /* If this simplifies, use it. */
330 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
331 op0, op1, op2)))
332 return tem;
334 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
337 /* Likewise, for relational operations.
338 CMP_MODE specifies mode comparison is done in. */
341 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
342 enum machine_mode cmp_mode, rtx op0, rtx op1)
344 rtx tem;
346 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
347 op0, op1)))
348 return tem;
350 return gen_rtx_fmt_ee (code, mode, op0, op1);
353 /* Replace all occurrences of OLD_RTX in X with FN (X', DATA), where X'
354 is an expression in X that is equal to OLD_RTX. Canonicalize and
355 simplify the result.
357 If FN is null, assume FN (X', DATA) == copy_rtx (DATA). */
360 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
361 rtx (*fn) (rtx, void *), void *data)
363 enum rtx_code code = GET_CODE (x);
364 enum machine_mode mode = GET_MODE (x);
365 enum machine_mode op_mode;
366 const char *fmt;
367 rtx op0, op1, op2, newx, op;
368 rtvec vec, newvec;
369 int i, j;
371 /* If X is OLD_RTX, return FN (X, DATA), with a null FN. Otherwise,
372 if this is an expression, try to build a new expression, substituting
373 recursively. If we can't do anything, return our input. */
375 if (rtx_equal_p (x, old_rtx))
377 if (fn)
378 return fn (x, data);
379 else
380 return copy_rtx ((rtx) data);
383 switch (GET_RTX_CLASS (code))
385 case RTX_UNARY:
386 op0 = XEXP (x, 0);
387 op_mode = GET_MODE (op0);
388 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
389 if (op0 == XEXP (x, 0))
390 return x;
391 return simplify_gen_unary (code, mode, op0, op_mode);
393 case RTX_BIN_ARITH:
394 case RTX_COMM_ARITH:
395 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
396 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
397 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
398 return x;
399 return simplify_gen_binary (code, mode, op0, op1);
401 case RTX_COMPARE:
402 case RTX_COMM_COMPARE:
403 op0 = XEXP (x, 0);
404 op1 = XEXP (x, 1);
405 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
406 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
407 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
408 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
409 return x;
410 return simplify_gen_relational (code, mode, op_mode, op0, op1);
412 case RTX_TERNARY:
413 case RTX_BITFIELD_OPS:
414 op0 = XEXP (x, 0);
415 op_mode = GET_MODE (op0);
416 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
417 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
418 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
419 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
420 return x;
421 if (op_mode == VOIDmode)
422 op_mode = GET_MODE (op0);
423 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
425 case RTX_EXTRA:
426 if (code == SUBREG)
428 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
429 if (op0 == SUBREG_REG (x))
430 return x;
431 op0 = simplify_gen_subreg (GET_MODE (x), op0,
432 GET_MODE (SUBREG_REG (x)),
433 SUBREG_BYTE (x));
434 return op0 ? op0 : x;
436 break;
438 case RTX_OBJ:
439 if (code == MEM)
441 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
442 if (op0 == XEXP (x, 0))
443 return x;
444 return replace_equiv_address_nv (x, op0);
446 else if (code == LO_SUM)
448 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
449 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
451 /* (lo_sum (high x) x) -> x */
452 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
453 return op1;
455 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
456 return x;
457 return gen_rtx_LO_SUM (mode, op0, op1);
459 break;
461 default:
462 break;
465 newx = x;
466 fmt = GET_RTX_FORMAT (code);
467 for (i = 0; fmt[i]; i++)
468 switch (fmt[i])
470 case 'E':
471 vec = XVEC (x, i);
472 newvec = XVEC (newx, i);
473 for (j = 0; j < GET_NUM_ELEM (vec); j++)
475 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
476 old_rtx, fn, data);
477 if (op != RTVEC_ELT (vec, j))
479 if (newvec == vec)
481 newvec = shallow_copy_rtvec (vec);
482 if (x == newx)
483 newx = shallow_copy_rtx (x);
484 XVEC (newx, i) = newvec;
486 RTVEC_ELT (newvec, j) = op;
489 break;
491 case 'e':
492 if (XEXP (x, i))
494 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
495 if (op != XEXP (x, i))
497 if (x == newx)
498 newx = shallow_copy_rtx (x);
499 XEXP (newx, i) = op;
502 break;
504 return newx;
507 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
508 resulting RTX. Return a new RTX which is as simplified as possible. */
511 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
513 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
516 /* Try to simplify a unary operation CODE whose output mode is to be
517 MODE with input operand OP whose mode was originally OP_MODE.
518 Return zero if no simplification can be made. */
520 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
521 rtx op, enum machine_mode op_mode)
523 rtx trueop, tem;
525 trueop = avoid_constant_pool_reference (op);
527 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
528 if (tem)
529 return tem;
531 return simplify_unary_operation_1 (code, mode, op);
534 /* Perform some simplifications we can do even if the operands
535 aren't constant. */
536 static rtx
537 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
539 enum rtx_code reversed;
540 rtx temp;
542 switch (code)
544 case NOT:
545 /* (not (not X)) == X. */
546 if (GET_CODE (op) == NOT)
547 return XEXP (op, 0);
549 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
550 comparison is all ones. */
551 if (COMPARISON_P (op)
552 && (mode == BImode || STORE_FLAG_VALUE == -1)
553 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
554 return simplify_gen_relational (reversed, mode, VOIDmode,
555 XEXP (op, 0), XEXP (op, 1));
557 /* (not (plus X -1)) can become (neg X). */
558 if (GET_CODE (op) == PLUS
559 && XEXP (op, 1) == constm1_rtx)
560 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
562 /* Similarly, (not (neg X)) is (plus X -1). */
563 if (GET_CODE (op) == NEG)
564 return plus_constant (XEXP (op, 0), -1);
566 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
567 if (GET_CODE (op) == XOR
568 && CONST_INT_P (XEXP (op, 1))
569 && (temp = simplify_unary_operation (NOT, mode,
570 XEXP (op, 1), mode)) != 0)
571 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
573 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
574 if (GET_CODE (op) == PLUS
575 && CONST_INT_P (XEXP (op, 1))
576 && mode_signbit_p (mode, XEXP (op, 1))
577 && (temp = simplify_unary_operation (NOT, mode,
578 XEXP (op, 1), mode)) != 0)
579 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
582 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
583 operands other than 1, but that is not valid. We could do a
584 similar simplification for (not (lshiftrt C X)) where C is
585 just the sign bit, but this doesn't seem common enough to
586 bother with. */
587 if (GET_CODE (op) == ASHIFT
588 && XEXP (op, 0) == const1_rtx)
590 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
591 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
594 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
595 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
596 so we can perform the above simplification. */
598 if (STORE_FLAG_VALUE == -1
599 && GET_CODE (op) == ASHIFTRT
600 && GET_CODE (XEXP (op, 1))
601 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
602 return simplify_gen_relational (GE, mode, VOIDmode,
603 XEXP (op, 0), const0_rtx);
606 if (GET_CODE (op) == SUBREG
607 && subreg_lowpart_p (op)
608 && (GET_MODE_SIZE (GET_MODE (op))
609 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
610 && GET_CODE (SUBREG_REG (op)) == ASHIFT
611 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
613 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
614 rtx x;
616 x = gen_rtx_ROTATE (inner_mode,
617 simplify_gen_unary (NOT, inner_mode, const1_rtx,
618 inner_mode),
619 XEXP (SUBREG_REG (op), 1));
620 return rtl_hooks.gen_lowpart_no_emit (mode, x);
623 /* Apply De Morgan's laws to reduce number of patterns for machines
624 with negating logical insns (and-not, nand, etc.). If result has
625 only one NOT, put it first, since that is how the patterns are
626 coded. */
628 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
630 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
631 enum machine_mode op_mode;
633 op_mode = GET_MODE (in1);
634 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
636 op_mode = GET_MODE (in2);
637 if (op_mode == VOIDmode)
638 op_mode = mode;
639 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
641 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
643 rtx tem = in2;
644 in2 = in1; in1 = tem;
647 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
648 mode, in1, in2);
650 break;
652 case NEG:
653 /* (neg (neg X)) == X. */
654 if (GET_CODE (op) == NEG)
655 return XEXP (op, 0);
657 /* (neg (plus X 1)) can become (not X). */
658 if (GET_CODE (op) == PLUS
659 && XEXP (op, 1) == const1_rtx)
660 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
662 /* Similarly, (neg (not X)) is (plus X 1). */
663 if (GET_CODE (op) == NOT)
664 return plus_constant (XEXP (op, 0), 1);
666 /* (neg (minus X Y)) can become (minus Y X). This transformation
667 isn't safe for modes with signed zeros, since if X and Y are
668 both +0, (minus Y X) is the same as (minus X Y). If the
669 rounding mode is towards +infinity (or -infinity) then the two
670 expressions will be rounded differently. */
671 if (GET_CODE (op) == MINUS
672 && !HONOR_SIGNED_ZEROS (mode)
673 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
674 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
676 if (GET_CODE (op) == PLUS
677 && !HONOR_SIGNED_ZEROS (mode)
678 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
680 /* (neg (plus A C)) is simplified to (minus -C A). */
681 if (CONST_INT_P (XEXP (op, 1))
682 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
684 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
685 if (temp)
686 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
689 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
690 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
691 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
694 /* (neg (mult A B)) becomes (mult (neg A) B).
695 This works even for floating-point values. */
696 if (GET_CODE (op) == MULT
697 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
699 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
700 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
703 /* NEG commutes with ASHIFT since it is multiplication. Only do
704 this if we can then eliminate the NEG (e.g., if the operand
705 is a constant). */
706 if (GET_CODE (op) == ASHIFT)
708 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
709 if (temp)
710 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
713 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
714 C is equal to the width of MODE minus 1. */
715 if (GET_CODE (op) == ASHIFTRT
716 && CONST_INT_P (XEXP (op, 1))
717 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
718 return simplify_gen_binary (LSHIFTRT, mode,
719 XEXP (op, 0), XEXP (op, 1));
721 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
722 C is equal to the width of MODE minus 1. */
723 if (GET_CODE (op) == LSHIFTRT
724 && CONST_INT_P (XEXP (op, 1))
725 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
726 return simplify_gen_binary (ASHIFTRT, mode,
727 XEXP (op, 0), XEXP (op, 1));
729 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
730 if (GET_CODE (op) == XOR
731 && XEXP (op, 1) == const1_rtx
732 && nonzero_bits (XEXP (op, 0), mode) == 1)
733 return plus_constant (XEXP (op, 0), -1);
735 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
736 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
737 if (GET_CODE (op) == LT
738 && XEXP (op, 1) == const0_rtx
739 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
741 enum machine_mode inner = GET_MODE (XEXP (op, 0));
742 int isize = GET_MODE_BITSIZE (inner);
743 if (STORE_FLAG_VALUE == 1)
745 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
746 GEN_INT (isize - 1));
747 if (mode == inner)
748 return temp;
749 if (GET_MODE_BITSIZE (mode) > isize)
750 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
751 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
753 else if (STORE_FLAG_VALUE == -1)
755 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
756 GEN_INT (isize - 1));
757 if (mode == inner)
758 return temp;
759 if (GET_MODE_BITSIZE (mode) > isize)
760 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
761 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
764 break;
766 case TRUNCATE:
767 /* We can't handle truncation to a partial integer mode here
768 because we don't know the real bitsize of the partial
769 integer mode. */
770 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
771 break;
773 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
774 if ((GET_CODE (op) == SIGN_EXTEND
775 || GET_CODE (op) == ZERO_EXTEND)
776 && GET_MODE (XEXP (op, 0)) == mode)
777 return XEXP (op, 0);
779 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
780 (OP:SI foo:SI) if OP is NEG or ABS. */
781 if ((GET_CODE (op) == ABS
782 || GET_CODE (op) == NEG)
783 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
784 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
785 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
786 return simplify_gen_unary (GET_CODE (op), mode,
787 XEXP (XEXP (op, 0), 0), mode);
789 /* (truncate:A (subreg:B (truncate:C X) 0)) is
790 (truncate:A X). */
791 if (GET_CODE (op) == SUBREG
792 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
793 && subreg_lowpart_p (op))
794 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
795 GET_MODE (XEXP (SUBREG_REG (op), 0)));
797 /* If we know that the value is already truncated, we can
798 replace the TRUNCATE with a SUBREG. Note that this is also
799 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
800 modes we just have to apply a different definition for
801 truncation. But don't do this for an (LSHIFTRT (MULT ...))
802 since this will cause problems with the umulXi3_highpart
803 patterns. */
804 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
805 GET_MODE_BITSIZE (GET_MODE (op)))
806 ? (num_sign_bit_copies (op, GET_MODE (op))
807 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
808 - GET_MODE_BITSIZE (mode)))
809 : truncated_to_mode (mode, op))
810 && ! (GET_CODE (op) == LSHIFTRT
811 && GET_CODE (XEXP (op, 0)) == MULT))
812 return rtl_hooks.gen_lowpart_no_emit (mode, op);
814 /* A truncate of a comparison can be replaced with a subreg if
815 STORE_FLAG_VALUE permits. This is like the previous test,
816 but it works even if the comparison is done in a mode larger
817 than HOST_BITS_PER_WIDE_INT. */
818 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
819 && COMPARISON_P (op)
820 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
821 return rtl_hooks.gen_lowpart_no_emit (mode, op);
822 break;
824 case FLOAT_TRUNCATE:
825 if (DECIMAL_FLOAT_MODE_P (mode))
826 break;
828 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
829 if (GET_CODE (op) == FLOAT_EXTEND
830 && GET_MODE (XEXP (op, 0)) == mode)
831 return XEXP (op, 0);
833 /* (float_truncate:SF (float_truncate:DF foo:XF))
834 = (float_truncate:SF foo:XF).
835 This may eliminate double rounding, so it is unsafe.
837 (float_truncate:SF (float_extend:XF foo:DF))
838 = (float_truncate:SF foo:DF).
840 (float_truncate:DF (float_extend:XF foo:SF))
841 = (float_extend:SF foo:DF). */
842 if ((GET_CODE (op) == FLOAT_TRUNCATE
843 && flag_unsafe_math_optimizations)
844 || GET_CODE (op) == FLOAT_EXTEND)
845 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
846 0)))
847 > GET_MODE_SIZE (mode)
848 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
849 mode,
850 XEXP (op, 0), mode);
852 /* (float_truncate (float x)) is (float x) */
853 if (GET_CODE (op) == FLOAT
854 && (flag_unsafe_math_optimizations
855 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
856 && ((unsigned)significand_size (GET_MODE (op))
857 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
858 - num_sign_bit_copies (XEXP (op, 0),
859 GET_MODE (XEXP (op, 0))))))))
860 return simplify_gen_unary (FLOAT, mode,
861 XEXP (op, 0),
862 GET_MODE (XEXP (op, 0)));
864 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
865 (OP:SF foo:SF) if OP is NEG or ABS. */
866 if ((GET_CODE (op) == ABS
867 || GET_CODE (op) == NEG)
868 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
869 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
870 return simplify_gen_unary (GET_CODE (op), mode,
871 XEXP (XEXP (op, 0), 0), mode);
873 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
874 is (float_truncate:SF x). */
875 if (GET_CODE (op) == SUBREG
876 && subreg_lowpart_p (op)
877 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
878 return SUBREG_REG (op);
879 break;
881 case FLOAT_EXTEND:
882 if (DECIMAL_FLOAT_MODE_P (mode))
883 break;
885 /* (float_extend (float_extend x)) is (float_extend x)
887 (float_extend (float x)) is (float x) assuming that double
888 rounding can't happen.
890 if (GET_CODE (op) == FLOAT_EXTEND
891 || (GET_CODE (op) == FLOAT
892 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
893 && ((unsigned)significand_size (GET_MODE (op))
894 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
895 - num_sign_bit_copies (XEXP (op, 0),
896 GET_MODE (XEXP (op, 0)))))))
897 return simplify_gen_unary (GET_CODE (op), mode,
898 XEXP (op, 0),
899 GET_MODE (XEXP (op, 0)));
901 break;
903 case ABS:
904 /* (abs (neg <foo>)) -> (abs <foo>) */
905 if (GET_CODE (op) == NEG)
906 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
907 GET_MODE (XEXP (op, 0)));
909 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
910 do nothing. */
911 if (GET_MODE (op) == VOIDmode)
912 break;
914 /* If operand is something known to be positive, ignore the ABS. */
915 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
916 || ((GET_MODE_BITSIZE (GET_MODE (op))
917 <= HOST_BITS_PER_WIDE_INT)
918 && ((nonzero_bits (op, GET_MODE (op))
919 & ((HOST_WIDE_INT) 1
920 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
921 == 0)))
922 return op;
924 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
925 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
926 return gen_rtx_NEG (mode, op);
928 break;
930 case FFS:
931 /* (ffs (*_extend <X>)) = (ffs <X>) */
932 if (GET_CODE (op) == SIGN_EXTEND
933 || GET_CODE (op) == ZERO_EXTEND)
934 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
935 GET_MODE (XEXP (op, 0)));
936 break;
938 case POPCOUNT:
939 switch (GET_CODE (op))
941 case BSWAP:
942 case ZERO_EXTEND:
943 /* (popcount (zero_extend <X>)) = (popcount <X>) */
944 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
945 GET_MODE (XEXP (op, 0)));
947 case ROTATE:
948 case ROTATERT:
949 /* Rotations don't affect popcount. */
950 if (!side_effects_p (XEXP (op, 1)))
951 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
952 GET_MODE (XEXP (op, 0)));
953 break;
955 default:
956 break;
958 break;
960 case PARITY:
961 switch (GET_CODE (op))
963 case NOT:
964 case BSWAP:
965 case ZERO_EXTEND:
966 case SIGN_EXTEND:
967 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
968 GET_MODE (XEXP (op, 0)));
970 case ROTATE:
971 case ROTATERT:
972 /* Rotations don't affect parity. */
973 if (!side_effects_p (XEXP (op, 1)))
974 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
975 GET_MODE (XEXP (op, 0)));
976 break;
978 default:
979 break;
981 break;
983 case BSWAP:
984 /* (bswap (bswap x)) -> x. */
985 if (GET_CODE (op) == BSWAP)
986 return XEXP (op, 0);
987 break;
989 case FLOAT:
990 /* (float (sign_extend <X>)) = (float <X>). */
991 if (GET_CODE (op) == SIGN_EXTEND)
992 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
993 GET_MODE (XEXP (op, 0)));
994 break;
996 case SIGN_EXTEND:
997 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
998 becomes just the MINUS if its mode is MODE. This allows
999 folding switch statements on machines using casesi (such as
1000 the VAX). */
1001 if (GET_CODE (op) == TRUNCATE
1002 && GET_MODE (XEXP (op, 0)) == mode
1003 && GET_CODE (XEXP (op, 0)) == MINUS
1004 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1005 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1006 return XEXP (op, 0);
1008 /* Check for a sign extension of a subreg of a promoted
1009 variable, where the promotion is sign-extended, and the
1010 target mode is the same as the variable's promotion. */
1011 if (GET_CODE (op) == SUBREG
1012 && SUBREG_PROMOTED_VAR_P (op)
1013 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1014 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1015 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1017 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1018 /* As we do not know which address space the pointer is refering to,
1019 we can do this only if the target does not support different pointer
1020 or address modes depending on the address space. */
1021 if (target_default_pointer_address_modes_p ()
1022 && ! POINTERS_EXTEND_UNSIGNED
1023 && mode == Pmode && GET_MODE (op) == ptr_mode
1024 && (CONSTANT_P (op)
1025 || (GET_CODE (op) == SUBREG
1026 && REG_P (SUBREG_REG (op))
1027 && REG_POINTER (SUBREG_REG (op))
1028 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1029 return convert_memory_address (Pmode, op);
1030 #endif
1031 break;
1033 case ZERO_EXTEND:
1034 /* Check for a zero extension of a subreg of a promoted
1035 variable, where the promotion is zero-extended, and the
1036 target mode is the same as the variable's promotion. */
1037 if (GET_CODE (op) == SUBREG
1038 && SUBREG_PROMOTED_VAR_P (op)
1039 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1040 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1041 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1043 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1044 /* As we do not know which address space the pointer is refering to,
1045 we can do this only if the target does not support different pointer
1046 or address modes depending on the address space. */
1047 if (target_default_pointer_address_modes_p ()
1048 && POINTERS_EXTEND_UNSIGNED > 0
1049 && mode == Pmode && GET_MODE (op) == ptr_mode
1050 && (CONSTANT_P (op)
1051 || (GET_CODE (op) == SUBREG
1052 && REG_P (SUBREG_REG (op))
1053 && REG_POINTER (SUBREG_REG (op))
1054 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1055 return convert_memory_address (Pmode, op);
1056 #endif
1057 break;
1059 default:
1060 break;
1063 return 0;
1066 /* Try to compute the value of a unary operation CODE whose output mode is to
1067 be MODE with input operand OP whose mode was originally OP_MODE.
1068 Return zero if the value cannot be computed. */
1070 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1071 rtx op, enum machine_mode op_mode)
1073 unsigned int width = GET_MODE_BITSIZE (mode);
1075 if (code == VEC_DUPLICATE)
1077 gcc_assert (VECTOR_MODE_P (mode));
1078 if (GET_MODE (op) != VOIDmode)
1080 if (!VECTOR_MODE_P (GET_MODE (op)))
1081 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1082 else
1083 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1084 (GET_MODE (op)));
1086 if (CONST_INT_P (op) || GET_CODE (op) == CONST_DOUBLE
1087 || GET_CODE (op) == CONST_VECTOR)
1089 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1090 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1091 rtvec v = rtvec_alloc (n_elts);
1092 unsigned int i;
1094 if (GET_CODE (op) != CONST_VECTOR)
1095 for (i = 0; i < n_elts; i++)
1096 RTVEC_ELT (v, i) = op;
1097 else
1099 enum machine_mode inmode = GET_MODE (op);
1100 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1101 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1103 gcc_assert (in_n_elts < n_elts);
1104 gcc_assert ((n_elts % in_n_elts) == 0);
1105 for (i = 0; i < n_elts; i++)
1106 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1108 return gen_rtx_CONST_VECTOR (mode, v);
1112 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1114 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1115 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1116 enum machine_mode opmode = GET_MODE (op);
1117 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1118 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1119 rtvec v = rtvec_alloc (n_elts);
1120 unsigned int i;
1122 gcc_assert (op_n_elts == n_elts);
1123 for (i = 0; i < n_elts; i++)
1125 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1126 CONST_VECTOR_ELT (op, i),
1127 GET_MODE_INNER (opmode));
1128 if (!x)
1129 return 0;
1130 RTVEC_ELT (v, i) = x;
1132 return gen_rtx_CONST_VECTOR (mode, v);
1135 /* The order of these tests is critical so that, for example, we don't
1136 check the wrong mode (input vs. output) for a conversion operation,
1137 such as FIX. At some point, this should be simplified. */
1139 if (code == FLOAT && GET_MODE (op) == VOIDmode
1140 && (GET_CODE (op) == CONST_DOUBLE || CONST_INT_P (op)))
1142 HOST_WIDE_INT hv, lv;
1143 REAL_VALUE_TYPE d;
1145 if (CONST_INT_P (op))
1146 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1147 else
1148 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1150 REAL_VALUE_FROM_INT (d, lv, hv, mode);
1151 d = real_value_truncate (mode, d);
1152 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1154 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
1155 && (GET_CODE (op) == CONST_DOUBLE
1156 || CONST_INT_P (op)))
1158 HOST_WIDE_INT hv, lv;
1159 REAL_VALUE_TYPE d;
1161 if (CONST_INT_P (op))
1162 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1163 else
1164 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1166 if (op_mode == VOIDmode)
1168 /* We don't know how to interpret negative-looking numbers in
1169 this case, so don't try to fold those. */
1170 if (hv < 0)
1171 return 0;
1173 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
1175 else
1176 hv = 0, lv &= GET_MODE_MASK (op_mode);
1178 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1179 d = real_value_truncate (mode, d);
1180 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1183 if (CONST_INT_P (op)
1184 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1186 HOST_WIDE_INT arg0 = INTVAL (op);
1187 HOST_WIDE_INT val;
1189 switch (code)
1191 case NOT:
1192 val = ~ arg0;
1193 break;
1195 case NEG:
1196 val = - arg0;
1197 break;
1199 case ABS:
1200 val = (arg0 >= 0 ? arg0 : - arg0);
1201 break;
1203 case FFS:
1204 /* Don't use ffs here. Instead, get low order bit and then its
1205 number. If arg0 is zero, this will return 0, as desired. */
1206 arg0 &= GET_MODE_MASK (mode);
1207 val = exact_log2 (arg0 & (- arg0)) + 1;
1208 break;
1210 case CLZ:
1211 arg0 &= GET_MODE_MASK (mode);
1212 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1214 else
1215 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1216 break;
1218 case CTZ:
1219 arg0 &= GET_MODE_MASK (mode);
1220 if (arg0 == 0)
1222 /* Even if the value at zero is undefined, we have to come
1223 up with some replacement. Seems good enough. */
1224 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1225 val = GET_MODE_BITSIZE (mode);
1227 else
1228 val = exact_log2 (arg0 & -arg0);
1229 break;
1231 case POPCOUNT:
1232 arg0 &= GET_MODE_MASK (mode);
1233 val = 0;
1234 while (arg0)
1235 val++, arg0 &= arg0 - 1;
1236 break;
1238 case PARITY:
1239 arg0 &= GET_MODE_MASK (mode);
1240 val = 0;
1241 while (arg0)
1242 val++, arg0 &= arg0 - 1;
1243 val &= 1;
1244 break;
1246 case BSWAP:
1248 unsigned int s;
1250 val = 0;
1251 for (s = 0; s < width; s += 8)
1253 unsigned int d = width - s - 8;
1254 unsigned HOST_WIDE_INT byte;
1255 byte = (arg0 >> s) & 0xff;
1256 val |= byte << d;
1259 break;
1261 case TRUNCATE:
1262 val = arg0;
1263 break;
1265 case ZERO_EXTEND:
1266 /* When zero-extending a CONST_INT, we need to know its
1267 original mode. */
1268 gcc_assert (op_mode != VOIDmode);
1269 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1271 /* If we were really extending the mode,
1272 we would have to distinguish between zero-extension
1273 and sign-extension. */
1274 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1275 val = arg0;
1277 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1278 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1279 else
1280 return 0;
1281 break;
1283 case SIGN_EXTEND:
1284 if (op_mode == VOIDmode)
1285 op_mode = mode;
1286 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1288 /* If we were really extending the mode,
1289 we would have to distinguish between zero-extension
1290 and sign-extension. */
1291 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1292 val = arg0;
1294 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1297 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1298 if (val
1299 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1300 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1302 else
1303 return 0;
1304 break;
1306 case SQRT:
1307 case FLOAT_EXTEND:
1308 case FLOAT_TRUNCATE:
1309 case SS_TRUNCATE:
1310 case US_TRUNCATE:
1311 case SS_NEG:
1312 case US_NEG:
1313 case SS_ABS:
1314 return 0;
1316 default:
1317 gcc_unreachable ();
1320 return gen_int_mode (val, mode);
1323 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1324 for a DImode operation on a CONST_INT. */
1325 else if (GET_MODE (op) == VOIDmode
1326 && width <= HOST_BITS_PER_WIDE_INT * 2
1327 && (GET_CODE (op) == CONST_DOUBLE
1328 || CONST_INT_P (op)))
1330 unsigned HOST_WIDE_INT l1, lv;
1331 HOST_WIDE_INT h1, hv;
1333 if (GET_CODE (op) == CONST_DOUBLE)
1334 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1335 else
1336 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1338 switch (code)
1340 case NOT:
1341 lv = ~ l1;
1342 hv = ~ h1;
1343 break;
1345 case NEG:
1346 neg_double (l1, h1, &lv, &hv);
1347 break;
1349 case ABS:
1350 if (h1 < 0)
1351 neg_double (l1, h1, &lv, &hv);
1352 else
1353 lv = l1, hv = h1;
1354 break;
1356 case FFS:
1357 hv = 0;
1358 if (l1 == 0)
1360 if (h1 == 0)
1361 lv = 0;
1362 else
1363 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1365 else
1366 lv = exact_log2 (l1 & -l1) + 1;
1367 break;
1369 case CLZ:
1370 hv = 0;
1371 if (h1 != 0)
1372 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1373 - HOST_BITS_PER_WIDE_INT;
1374 else if (l1 != 0)
1375 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1376 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1377 lv = GET_MODE_BITSIZE (mode);
1378 break;
1380 case CTZ:
1381 hv = 0;
1382 if (l1 != 0)
1383 lv = exact_log2 (l1 & -l1);
1384 else if (h1 != 0)
1385 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1386 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1387 lv = GET_MODE_BITSIZE (mode);
1388 break;
1390 case POPCOUNT:
1391 hv = 0;
1392 lv = 0;
1393 while (l1)
1394 lv++, l1 &= l1 - 1;
1395 while (h1)
1396 lv++, h1 &= h1 - 1;
1397 break;
1399 case PARITY:
1400 hv = 0;
1401 lv = 0;
1402 while (l1)
1403 lv++, l1 &= l1 - 1;
1404 while (h1)
1405 lv++, h1 &= h1 - 1;
1406 lv &= 1;
1407 break;
1409 case BSWAP:
1411 unsigned int s;
1413 hv = 0;
1414 lv = 0;
1415 for (s = 0; s < width; s += 8)
1417 unsigned int d = width - s - 8;
1418 unsigned HOST_WIDE_INT byte;
1420 if (s < HOST_BITS_PER_WIDE_INT)
1421 byte = (l1 >> s) & 0xff;
1422 else
1423 byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1425 if (d < HOST_BITS_PER_WIDE_INT)
1426 lv |= byte << d;
1427 else
1428 hv |= byte << (d - HOST_BITS_PER_WIDE_INT);
1431 break;
1433 case TRUNCATE:
1434 /* This is just a change-of-mode, so do nothing. */
1435 lv = l1, hv = h1;
1436 break;
1438 case ZERO_EXTEND:
1439 gcc_assert (op_mode != VOIDmode);
1441 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1442 return 0;
1444 hv = 0;
1445 lv = l1 & GET_MODE_MASK (op_mode);
1446 break;
1448 case SIGN_EXTEND:
1449 if (op_mode == VOIDmode
1450 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1451 return 0;
1452 else
1454 lv = l1 & GET_MODE_MASK (op_mode);
1455 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1456 && (lv & ((HOST_WIDE_INT) 1
1457 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1458 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1460 hv = HWI_SIGN_EXTEND (lv);
1462 break;
1464 case SQRT:
1465 return 0;
1467 default:
1468 return 0;
1471 return immed_double_const (lv, hv, mode);
1474 else if (GET_CODE (op) == CONST_DOUBLE
1475 && SCALAR_FLOAT_MODE_P (mode))
1477 REAL_VALUE_TYPE d, t;
1478 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1480 switch (code)
1482 case SQRT:
1483 if (HONOR_SNANS (mode) && real_isnan (&d))
1484 return 0;
1485 real_sqrt (&t, mode, &d);
1486 d = t;
1487 break;
1488 case ABS:
1489 d = REAL_VALUE_ABS (d);
1490 break;
1491 case NEG:
1492 d = REAL_VALUE_NEGATE (d);
1493 break;
1494 case FLOAT_TRUNCATE:
1495 d = real_value_truncate (mode, d);
1496 break;
1497 case FLOAT_EXTEND:
1498 /* All this does is change the mode. */
1499 break;
1500 case FIX:
1501 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1502 break;
1503 case NOT:
1505 long tmp[4];
1506 int i;
1508 real_to_target (tmp, &d, GET_MODE (op));
1509 for (i = 0; i < 4; i++)
1510 tmp[i] = ~tmp[i];
1511 real_from_target (&d, tmp, mode);
1512 break;
1514 default:
1515 gcc_unreachable ();
1517 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1520 else if (GET_CODE (op) == CONST_DOUBLE
1521 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1522 && GET_MODE_CLASS (mode) == MODE_INT
1523 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1525 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1526 operators are intentionally left unspecified (to ease implementation
1527 by target backends), for consistency, this routine implements the
1528 same semantics for constant folding as used by the middle-end. */
1530 /* This was formerly used only for non-IEEE float.
1531 eggert@twinsun.com says it is safe for IEEE also. */
1532 HOST_WIDE_INT xh, xl, th, tl;
1533 REAL_VALUE_TYPE x, t;
1534 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1535 switch (code)
1537 case FIX:
1538 if (REAL_VALUE_ISNAN (x))
1539 return const0_rtx;
1541 /* Test against the signed upper bound. */
1542 if (width > HOST_BITS_PER_WIDE_INT)
1544 th = ((unsigned HOST_WIDE_INT) 1
1545 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1546 tl = -1;
1548 else
1550 th = 0;
1551 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1553 real_from_integer (&t, VOIDmode, tl, th, 0);
1554 if (REAL_VALUES_LESS (t, x))
1556 xh = th;
1557 xl = tl;
1558 break;
1561 /* Test against the signed lower bound. */
1562 if (width > HOST_BITS_PER_WIDE_INT)
1564 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1565 tl = 0;
1567 else
1569 th = -1;
1570 tl = (HOST_WIDE_INT) -1 << (width - 1);
1572 real_from_integer (&t, VOIDmode, tl, th, 0);
1573 if (REAL_VALUES_LESS (x, t))
1575 xh = th;
1576 xl = tl;
1577 break;
1579 REAL_VALUE_TO_INT (&xl, &xh, x);
1580 break;
1582 case UNSIGNED_FIX:
1583 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1584 return const0_rtx;
1586 /* Test against the unsigned upper bound. */
1587 if (width == 2*HOST_BITS_PER_WIDE_INT)
1589 th = -1;
1590 tl = -1;
1592 else if (width >= HOST_BITS_PER_WIDE_INT)
1594 th = ((unsigned HOST_WIDE_INT) 1
1595 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1596 tl = -1;
1598 else
1600 th = 0;
1601 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1603 real_from_integer (&t, VOIDmode, tl, th, 1);
1604 if (REAL_VALUES_LESS (t, x))
1606 xh = th;
1607 xl = tl;
1608 break;
1611 REAL_VALUE_TO_INT (&xl, &xh, x);
1612 break;
1614 default:
1615 gcc_unreachable ();
1617 return immed_double_const (xl, xh, mode);
1620 return NULL_RTX;
1623 /* Subroutine of simplify_binary_operation to simplify a commutative,
1624 associative binary operation CODE with result mode MODE, operating
1625 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1626 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1627 canonicalization is possible. */
1629 static rtx
1630 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1631 rtx op0, rtx op1)
1633 rtx tem;
1635 /* Linearize the operator to the left. */
1636 if (GET_CODE (op1) == code)
1638 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1639 if (GET_CODE (op0) == code)
1641 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1642 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1645 /* "a op (b op c)" becomes "(b op c) op a". */
1646 if (! swap_commutative_operands_p (op1, op0))
1647 return simplify_gen_binary (code, mode, op1, op0);
1649 tem = op0;
1650 op0 = op1;
1651 op1 = tem;
1654 if (GET_CODE (op0) == code)
1656 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1657 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1659 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1660 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1663 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1664 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1665 if (tem != 0)
1666 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1668 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1669 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1670 if (tem != 0)
1671 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1674 return 0;
1678 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1679 and OP1. Return 0 if no simplification is possible.
1681 Don't use this for relational operations such as EQ or LT.
1682 Use simplify_relational_operation instead. */
1684 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1685 rtx op0, rtx op1)
1687 rtx trueop0, trueop1;
1688 rtx tem;
1690 /* Relational operations don't work here. We must know the mode
1691 of the operands in order to do the comparison correctly.
1692 Assuming a full word can give incorrect results.
1693 Consider comparing 128 with -128 in QImode. */
1694 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1695 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1697 /* Make sure the constant is second. */
1698 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1699 && swap_commutative_operands_p (op0, op1))
1701 tem = op0, op0 = op1, op1 = tem;
1704 trueop0 = avoid_constant_pool_reference (op0);
1705 trueop1 = avoid_constant_pool_reference (op1);
1707 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1708 if (tem)
1709 return tem;
1710 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1713 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1714 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1715 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1716 actual constants. */
1718 static rtx
1719 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1720 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1722 rtx tem, reversed, opleft, opright;
1723 HOST_WIDE_INT val;
1724 unsigned int width = GET_MODE_BITSIZE (mode);
1726 /* Even if we can't compute a constant result,
1727 there are some cases worth simplifying. */
1729 switch (code)
1731 case PLUS:
1732 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1733 when x is NaN, infinite, or finite and nonzero. They aren't
1734 when x is -0 and the rounding mode is not towards -infinity,
1735 since (-0) + 0 is then 0. */
1736 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1737 return op0;
1739 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1740 transformations are safe even for IEEE. */
1741 if (GET_CODE (op0) == NEG)
1742 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1743 else if (GET_CODE (op1) == NEG)
1744 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1746 /* (~a) + 1 -> -a */
1747 if (INTEGRAL_MODE_P (mode)
1748 && GET_CODE (op0) == NOT
1749 && trueop1 == const1_rtx)
1750 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1752 /* Handle both-operands-constant cases. We can only add
1753 CONST_INTs to constants since the sum of relocatable symbols
1754 can't be handled by most assemblers. Don't add CONST_INT
1755 to CONST_INT since overflow won't be computed properly if wider
1756 than HOST_BITS_PER_WIDE_INT. */
1758 if ((GET_CODE (op0) == CONST
1759 || GET_CODE (op0) == SYMBOL_REF
1760 || GET_CODE (op0) == LABEL_REF)
1761 && CONST_INT_P (op1))
1762 return plus_constant (op0, INTVAL (op1));
1763 else if ((GET_CODE (op1) == CONST
1764 || GET_CODE (op1) == SYMBOL_REF
1765 || GET_CODE (op1) == LABEL_REF)
1766 && CONST_INT_P (op0))
1767 return plus_constant (op1, INTVAL (op0));
1769 /* See if this is something like X * C - X or vice versa or
1770 if the multiplication is written as a shift. If so, we can
1771 distribute and make a new multiply, shift, or maybe just
1772 have X (if C is 2 in the example above). But don't make
1773 something more expensive than we had before. */
1775 if (SCALAR_INT_MODE_P (mode))
1777 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1778 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1779 rtx lhs = op0, rhs = op1;
1781 if (GET_CODE (lhs) == NEG)
1783 coeff0l = -1;
1784 coeff0h = -1;
1785 lhs = XEXP (lhs, 0);
1787 else if (GET_CODE (lhs) == MULT
1788 && CONST_INT_P (XEXP (lhs, 1)))
1790 coeff0l = INTVAL (XEXP (lhs, 1));
1791 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1792 lhs = XEXP (lhs, 0);
1794 else if (GET_CODE (lhs) == ASHIFT
1795 && CONST_INT_P (XEXP (lhs, 1))
1796 && INTVAL (XEXP (lhs, 1)) >= 0
1797 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1799 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1800 coeff0h = 0;
1801 lhs = XEXP (lhs, 0);
1804 if (GET_CODE (rhs) == NEG)
1806 coeff1l = -1;
1807 coeff1h = -1;
1808 rhs = XEXP (rhs, 0);
1810 else if (GET_CODE (rhs) == MULT
1811 && CONST_INT_P (XEXP (rhs, 1)))
1813 coeff1l = INTVAL (XEXP (rhs, 1));
1814 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1815 rhs = XEXP (rhs, 0);
1817 else if (GET_CODE (rhs) == ASHIFT
1818 && CONST_INT_P (XEXP (rhs, 1))
1819 && INTVAL (XEXP (rhs, 1)) >= 0
1820 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1822 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1823 coeff1h = 0;
1824 rhs = XEXP (rhs, 0);
1827 if (rtx_equal_p (lhs, rhs))
1829 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1830 rtx coeff;
1831 unsigned HOST_WIDE_INT l;
1832 HOST_WIDE_INT h;
1833 bool speed = optimize_function_for_speed_p (cfun);
1835 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1836 coeff = immed_double_const (l, h, mode);
1838 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1839 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
1840 ? tem : 0;
1844 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1845 if ((CONST_INT_P (op1)
1846 || GET_CODE (op1) == CONST_DOUBLE)
1847 && GET_CODE (op0) == XOR
1848 && (CONST_INT_P (XEXP (op0, 1))
1849 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1850 && mode_signbit_p (mode, op1))
1851 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1852 simplify_gen_binary (XOR, mode, op1,
1853 XEXP (op0, 1)));
1855 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1856 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1857 && GET_CODE (op0) == MULT
1858 && GET_CODE (XEXP (op0, 0)) == NEG)
1860 rtx in1, in2;
1862 in1 = XEXP (XEXP (op0, 0), 0);
1863 in2 = XEXP (op0, 1);
1864 return simplify_gen_binary (MINUS, mode, op1,
1865 simplify_gen_binary (MULT, mode,
1866 in1, in2));
1869 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1870 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1871 is 1. */
1872 if (COMPARISON_P (op0)
1873 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1874 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1875 && (reversed = reversed_comparison (op0, mode)))
1876 return
1877 simplify_gen_unary (NEG, mode, reversed, mode);
1879 /* If one of the operands is a PLUS or a MINUS, see if we can
1880 simplify this by the associative law.
1881 Don't use the associative law for floating point.
1882 The inaccuracy makes it nonassociative,
1883 and subtle programs can break if operations are associated. */
1885 if (INTEGRAL_MODE_P (mode)
1886 && (plus_minus_operand_p (op0)
1887 || plus_minus_operand_p (op1))
1888 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1889 return tem;
1891 /* Reassociate floating point addition only when the user
1892 specifies associative math operations. */
1893 if (FLOAT_MODE_P (mode)
1894 && flag_associative_math)
1896 tem = simplify_associative_operation (code, mode, op0, op1);
1897 if (tem)
1898 return tem;
1900 break;
1902 case COMPARE:
1903 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1904 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1905 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1906 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1908 rtx xop00 = XEXP (op0, 0);
1909 rtx xop10 = XEXP (op1, 0);
1911 #ifdef HAVE_cc0
1912 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1913 #else
1914 if (REG_P (xop00) && REG_P (xop10)
1915 && GET_MODE (xop00) == GET_MODE (xop10)
1916 && REGNO (xop00) == REGNO (xop10)
1917 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1918 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1919 #endif
1920 return xop00;
1922 break;
1924 case MINUS:
1925 /* We can't assume x-x is 0 even with non-IEEE floating point,
1926 but since it is zero except in very strange circumstances, we
1927 will treat it as zero with -ffinite-math-only. */
1928 if (rtx_equal_p (trueop0, trueop1)
1929 && ! side_effects_p (op0)
1930 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
1931 return CONST0_RTX (mode);
1933 /* Change subtraction from zero into negation. (0 - x) is the
1934 same as -x when x is NaN, infinite, or finite and nonzero.
1935 But if the mode has signed zeros, and does not round towards
1936 -infinity, then 0 - 0 is 0, not -0. */
1937 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1938 return simplify_gen_unary (NEG, mode, op1, mode);
1940 /* (-1 - a) is ~a. */
1941 if (trueop0 == constm1_rtx)
1942 return simplify_gen_unary (NOT, mode, op1, mode);
1944 /* Subtracting 0 has no effect unless the mode has signed zeros
1945 and supports rounding towards -infinity. In such a case,
1946 0 - 0 is -0. */
1947 if (!(HONOR_SIGNED_ZEROS (mode)
1948 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1949 && trueop1 == CONST0_RTX (mode))
1950 return op0;
1952 /* See if this is something like X * C - X or vice versa or
1953 if the multiplication is written as a shift. If so, we can
1954 distribute and make a new multiply, shift, or maybe just
1955 have X (if C is 2 in the example above). But don't make
1956 something more expensive than we had before. */
1958 if (SCALAR_INT_MODE_P (mode))
1960 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1961 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1962 rtx lhs = op0, rhs = op1;
1964 if (GET_CODE (lhs) == NEG)
1966 coeff0l = -1;
1967 coeff0h = -1;
1968 lhs = XEXP (lhs, 0);
1970 else if (GET_CODE (lhs) == MULT
1971 && CONST_INT_P (XEXP (lhs, 1)))
1973 coeff0l = INTVAL (XEXP (lhs, 1));
1974 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1975 lhs = XEXP (lhs, 0);
1977 else if (GET_CODE (lhs) == ASHIFT
1978 && CONST_INT_P (XEXP (lhs, 1))
1979 && INTVAL (XEXP (lhs, 1)) >= 0
1980 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1982 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1983 coeff0h = 0;
1984 lhs = XEXP (lhs, 0);
1987 if (GET_CODE (rhs) == NEG)
1989 negcoeff1l = 1;
1990 negcoeff1h = 0;
1991 rhs = XEXP (rhs, 0);
1993 else if (GET_CODE (rhs) == MULT
1994 && CONST_INT_P (XEXP (rhs, 1)))
1996 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1997 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1998 rhs = XEXP (rhs, 0);
2000 else if (GET_CODE (rhs) == ASHIFT
2001 && CONST_INT_P (XEXP (rhs, 1))
2002 && INTVAL (XEXP (rhs, 1)) >= 0
2003 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2005 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
2006 negcoeff1h = -1;
2007 rhs = XEXP (rhs, 0);
2010 if (rtx_equal_p (lhs, rhs))
2012 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2013 rtx coeff;
2014 unsigned HOST_WIDE_INT l;
2015 HOST_WIDE_INT h;
2016 bool speed = optimize_function_for_speed_p (cfun);
2018 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
2019 coeff = immed_double_const (l, h, mode);
2021 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2022 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
2023 ? tem : 0;
2027 /* (a - (-b)) -> (a + b). True even for IEEE. */
2028 if (GET_CODE (op1) == NEG)
2029 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2031 /* (-x - c) may be simplified as (-c - x). */
2032 if (GET_CODE (op0) == NEG
2033 && (CONST_INT_P (op1)
2034 || GET_CODE (op1) == CONST_DOUBLE))
2036 tem = simplify_unary_operation (NEG, mode, op1, mode);
2037 if (tem)
2038 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2041 /* Don't let a relocatable value get a negative coeff. */
2042 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2043 return simplify_gen_binary (PLUS, mode,
2044 op0,
2045 neg_const_int (mode, op1));
2047 /* (x - (x & y)) -> (x & ~y) */
2048 if (GET_CODE (op1) == AND)
2050 if (rtx_equal_p (op0, XEXP (op1, 0)))
2052 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2053 GET_MODE (XEXP (op1, 1)));
2054 return simplify_gen_binary (AND, mode, op0, tem);
2056 if (rtx_equal_p (op0, XEXP (op1, 1)))
2058 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2059 GET_MODE (XEXP (op1, 0)));
2060 return simplify_gen_binary (AND, mode, op0, tem);
2064 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2065 by reversing the comparison code if valid. */
2066 if (STORE_FLAG_VALUE == 1
2067 && trueop0 == const1_rtx
2068 && COMPARISON_P (op1)
2069 && (reversed = reversed_comparison (op1, mode)))
2070 return reversed;
2072 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2073 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2074 && GET_CODE (op1) == MULT
2075 && GET_CODE (XEXP (op1, 0)) == NEG)
2077 rtx in1, in2;
2079 in1 = XEXP (XEXP (op1, 0), 0);
2080 in2 = XEXP (op1, 1);
2081 return simplify_gen_binary (PLUS, mode,
2082 simplify_gen_binary (MULT, mode,
2083 in1, in2),
2084 op0);
2087 /* Canonicalize (minus (neg A) (mult B C)) to
2088 (minus (mult (neg B) C) A). */
2089 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2090 && GET_CODE (op1) == MULT
2091 && GET_CODE (op0) == NEG)
2093 rtx in1, in2;
2095 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2096 in2 = XEXP (op1, 1);
2097 return simplify_gen_binary (MINUS, mode,
2098 simplify_gen_binary (MULT, mode,
2099 in1, in2),
2100 XEXP (op0, 0));
2103 /* If one of the operands is a PLUS or a MINUS, see if we can
2104 simplify this by the associative law. This will, for example,
2105 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2106 Don't use the associative law for floating point.
2107 The inaccuracy makes it nonassociative,
2108 and subtle programs can break if operations are associated. */
2110 if (INTEGRAL_MODE_P (mode)
2111 && (plus_minus_operand_p (op0)
2112 || plus_minus_operand_p (op1))
2113 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2114 return tem;
2115 break;
2117 case MULT:
2118 if (trueop1 == constm1_rtx)
2119 return simplify_gen_unary (NEG, mode, op0, mode);
2121 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2122 x is NaN, since x * 0 is then also NaN. Nor is it valid
2123 when the mode has signed zeros, since multiplying a negative
2124 number by 0 will give -0, not 0. */
2125 if (!HONOR_NANS (mode)
2126 && !HONOR_SIGNED_ZEROS (mode)
2127 && trueop1 == CONST0_RTX (mode)
2128 && ! side_effects_p (op0))
2129 return op1;
2131 /* In IEEE floating point, x*1 is not equivalent to x for
2132 signalling NaNs. */
2133 if (!HONOR_SNANS (mode)
2134 && trueop1 == CONST1_RTX (mode))
2135 return op0;
2137 /* Convert multiply by constant power of two into shift unless
2138 we are still generating RTL. This test is a kludge. */
2139 if (CONST_INT_P (trueop1)
2140 && (val = exact_log2 (INTVAL (trueop1))) >= 0
2141 /* If the mode is larger than the host word size, and the
2142 uppermost bit is set, then this isn't a power of two due
2143 to implicit sign extension. */
2144 && (width <= HOST_BITS_PER_WIDE_INT
2145 || val != HOST_BITS_PER_WIDE_INT - 1))
2146 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2148 /* Likewise for multipliers wider than a word. */
2149 if (GET_CODE (trueop1) == CONST_DOUBLE
2150 && (GET_MODE (trueop1) == VOIDmode
2151 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
2152 && GET_MODE (op0) == mode
2153 && CONST_DOUBLE_LOW (trueop1) == 0
2154 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
2155 return simplify_gen_binary (ASHIFT, mode, op0,
2156 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2158 /* x*2 is x+x and x*(-1) is -x */
2159 if (GET_CODE (trueop1) == CONST_DOUBLE
2160 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2161 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2162 && GET_MODE (op0) == mode)
2164 REAL_VALUE_TYPE d;
2165 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2167 if (REAL_VALUES_EQUAL (d, dconst2))
2168 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2170 if (!HONOR_SNANS (mode)
2171 && REAL_VALUES_EQUAL (d, dconstm1))
2172 return simplify_gen_unary (NEG, mode, op0, mode);
2175 /* Optimize -x * -x as x * x. */
2176 if (FLOAT_MODE_P (mode)
2177 && GET_CODE (op0) == NEG
2178 && GET_CODE (op1) == NEG
2179 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2180 && !side_effects_p (XEXP (op0, 0)))
2181 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2183 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2184 if (SCALAR_FLOAT_MODE_P (mode)
2185 && GET_CODE (op0) == ABS
2186 && GET_CODE (op1) == ABS
2187 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2188 && !side_effects_p (XEXP (op0, 0)))
2189 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2191 /* Reassociate multiplication, but for floating point MULTs
2192 only when the user specifies unsafe math optimizations. */
2193 if (! FLOAT_MODE_P (mode)
2194 || flag_unsafe_math_optimizations)
2196 tem = simplify_associative_operation (code, mode, op0, op1);
2197 if (tem)
2198 return tem;
2200 break;
2202 case IOR:
2203 if (trueop1 == const0_rtx)
2204 return op0;
2205 if (CONST_INT_P (trueop1)
2206 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2207 == GET_MODE_MASK (mode)))
2208 return op1;
2209 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2210 return op0;
2211 /* A | (~A) -> -1 */
2212 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2213 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2214 && ! side_effects_p (op0)
2215 && SCALAR_INT_MODE_P (mode))
2216 return constm1_rtx;
2218 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2219 if (CONST_INT_P (op1)
2220 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2221 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
2222 return op1;
2224 /* Canonicalize (X & C1) | C2. */
2225 if (GET_CODE (op0) == AND
2226 && CONST_INT_P (trueop1)
2227 && CONST_INT_P (XEXP (op0, 1)))
2229 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2230 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2231 HOST_WIDE_INT c2 = INTVAL (trueop1);
2233 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2234 if ((c1 & c2) == c1
2235 && !side_effects_p (XEXP (op0, 0)))
2236 return trueop1;
2238 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2239 if (((c1|c2) & mask) == mask)
2240 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2242 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2243 if (((c1 & ~c2) & mask) != (c1 & mask))
2245 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2246 gen_int_mode (c1 & ~c2, mode));
2247 return simplify_gen_binary (IOR, mode, tem, op1);
2251 /* Convert (A & B) | A to A. */
2252 if (GET_CODE (op0) == AND
2253 && (rtx_equal_p (XEXP (op0, 0), op1)
2254 || rtx_equal_p (XEXP (op0, 1), op1))
2255 && ! side_effects_p (XEXP (op0, 0))
2256 && ! side_effects_p (XEXP (op0, 1)))
2257 return op1;
2259 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2260 mode size to (rotate A CX). */
2262 if (GET_CODE (op1) == ASHIFT
2263 || GET_CODE (op1) == SUBREG)
2265 opleft = op1;
2266 opright = op0;
2268 else
2270 opright = op1;
2271 opleft = op0;
2274 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2275 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2276 && CONST_INT_P (XEXP (opleft, 1))
2277 && CONST_INT_P (XEXP (opright, 1))
2278 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2279 == GET_MODE_BITSIZE (mode)))
2280 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2282 /* Same, but for ashift that has been "simplified" to a wider mode
2283 by simplify_shift_const. */
2285 if (GET_CODE (opleft) == SUBREG
2286 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2287 && GET_CODE (opright) == LSHIFTRT
2288 && GET_CODE (XEXP (opright, 0)) == SUBREG
2289 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2290 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2291 && (GET_MODE_SIZE (GET_MODE (opleft))
2292 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2293 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2294 SUBREG_REG (XEXP (opright, 0)))
2295 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2296 && CONST_INT_P (XEXP (opright, 1))
2297 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2298 == GET_MODE_BITSIZE (mode)))
2299 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2300 XEXP (SUBREG_REG (opleft), 1));
2302 /* If we have (ior (and (X C1) C2)), simplify this by making
2303 C1 as small as possible if C1 actually changes. */
2304 if (CONST_INT_P (op1)
2305 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2306 || INTVAL (op1) > 0)
2307 && GET_CODE (op0) == AND
2308 && CONST_INT_P (XEXP (op0, 1))
2309 && CONST_INT_P (op1)
2310 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2311 return simplify_gen_binary (IOR, mode,
2312 simplify_gen_binary
2313 (AND, mode, XEXP (op0, 0),
2314 GEN_INT (INTVAL (XEXP (op0, 1))
2315 & ~INTVAL (op1))),
2316 op1);
2318 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2319 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2320 the PLUS does not affect any of the bits in OP1: then we can do
2321 the IOR as a PLUS and we can associate. This is valid if OP1
2322 can be safely shifted left C bits. */
2323 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2324 && GET_CODE (XEXP (op0, 0)) == PLUS
2325 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2326 && CONST_INT_P (XEXP (op0, 1))
2327 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2329 int count = INTVAL (XEXP (op0, 1));
2330 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2332 if (mask >> count == INTVAL (trueop1)
2333 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2334 return simplify_gen_binary (ASHIFTRT, mode,
2335 plus_constant (XEXP (op0, 0), mask),
2336 XEXP (op0, 1));
2339 tem = simplify_associative_operation (code, mode, op0, op1);
2340 if (tem)
2341 return tem;
2342 break;
2344 case XOR:
2345 if (trueop1 == const0_rtx)
2346 return op0;
2347 if (CONST_INT_P (trueop1)
2348 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2349 == GET_MODE_MASK (mode)))
2350 return simplify_gen_unary (NOT, mode, op0, mode);
2351 if (rtx_equal_p (trueop0, trueop1)
2352 && ! side_effects_p (op0)
2353 && GET_MODE_CLASS (mode) != MODE_CC)
2354 return CONST0_RTX (mode);
2356 /* Canonicalize XOR of the most significant bit to PLUS. */
2357 if ((CONST_INT_P (op1)
2358 || GET_CODE (op1) == CONST_DOUBLE)
2359 && mode_signbit_p (mode, op1))
2360 return simplify_gen_binary (PLUS, mode, op0, op1);
2361 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2362 if ((CONST_INT_P (op1)
2363 || GET_CODE (op1) == CONST_DOUBLE)
2364 && GET_CODE (op0) == PLUS
2365 && (CONST_INT_P (XEXP (op0, 1))
2366 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2367 && mode_signbit_p (mode, XEXP (op0, 1)))
2368 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2369 simplify_gen_binary (XOR, mode, op1,
2370 XEXP (op0, 1)));
2372 /* If we are XORing two things that have no bits in common,
2373 convert them into an IOR. This helps to detect rotation encoded
2374 using those methods and possibly other simplifications. */
2376 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2377 && (nonzero_bits (op0, mode)
2378 & nonzero_bits (op1, mode)) == 0)
2379 return (simplify_gen_binary (IOR, mode, op0, op1));
2381 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2382 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2383 (NOT y). */
2385 int num_negated = 0;
2387 if (GET_CODE (op0) == NOT)
2388 num_negated++, op0 = XEXP (op0, 0);
2389 if (GET_CODE (op1) == NOT)
2390 num_negated++, op1 = XEXP (op1, 0);
2392 if (num_negated == 2)
2393 return simplify_gen_binary (XOR, mode, op0, op1);
2394 else if (num_negated == 1)
2395 return simplify_gen_unary (NOT, mode,
2396 simplify_gen_binary (XOR, mode, op0, op1),
2397 mode);
2400 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2401 correspond to a machine insn or result in further simplifications
2402 if B is a constant. */
2404 if (GET_CODE (op0) == AND
2405 && rtx_equal_p (XEXP (op0, 1), op1)
2406 && ! side_effects_p (op1))
2407 return simplify_gen_binary (AND, mode,
2408 simplify_gen_unary (NOT, mode,
2409 XEXP (op0, 0), mode),
2410 op1);
2412 else if (GET_CODE (op0) == AND
2413 && rtx_equal_p (XEXP (op0, 0), op1)
2414 && ! side_effects_p (op1))
2415 return simplify_gen_binary (AND, mode,
2416 simplify_gen_unary (NOT, mode,
2417 XEXP (op0, 1), mode),
2418 op1);
2420 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2421 comparison if STORE_FLAG_VALUE is 1. */
2422 if (STORE_FLAG_VALUE == 1
2423 && trueop1 == const1_rtx
2424 && COMPARISON_P (op0)
2425 && (reversed = reversed_comparison (op0, mode)))
2426 return reversed;
2428 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2429 is (lt foo (const_int 0)), so we can perform the above
2430 simplification if STORE_FLAG_VALUE is 1. */
2432 if (STORE_FLAG_VALUE == 1
2433 && trueop1 == const1_rtx
2434 && GET_CODE (op0) == LSHIFTRT
2435 && CONST_INT_P (XEXP (op0, 1))
2436 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2437 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2439 /* (xor (comparison foo bar) (const_int sign-bit))
2440 when STORE_FLAG_VALUE is the sign bit. */
2441 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2442 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2443 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2444 && trueop1 == const_true_rtx
2445 && COMPARISON_P (op0)
2446 && (reversed = reversed_comparison (op0, mode)))
2447 return reversed;
2449 tem = simplify_associative_operation (code, mode, op0, op1);
2450 if (tem)
2451 return tem;
2452 break;
2454 case AND:
2455 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2456 return trueop1;
2457 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
2459 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2460 HOST_WIDE_INT nzop1;
2461 if (CONST_INT_P (trueop1))
2463 HOST_WIDE_INT val1 = INTVAL (trueop1);
2464 /* If we are turning off bits already known off in OP0, we need
2465 not do an AND. */
2466 if ((nzop0 & ~val1) == 0)
2467 return op0;
2469 nzop1 = nonzero_bits (trueop1, mode);
2470 /* If we are clearing all the nonzero bits, the result is zero. */
2471 if ((nzop1 & nzop0) == 0
2472 && !side_effects_p (op0) && !side_effects_p (op1))
2473 return CONST0_RTX (mode);
2475 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2476 && GET_MODE_CLASS (mode) != MODE_CC)
2477 return op0;
2478 /* A & (~A) -> 0 */
2479 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2480 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2481 && ! side_effects_p (op0)
2482 && GET_MODE_CLASS (mode) != MODE_CC)
2483 return CONST0_RTX (mode);
2485 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2486 there are no nonzero bits of C outside of X's mode. */
2487 if ((GET_CODE (op0) == SIGN_EXTEND
2488 || GET_CODE (op0) == ZERO_EXTEND)
2489 && CONST_INT_P (trueop1)
2490 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2491 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2492 & INTVAL (trueop1)) == 0)
2494 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2495 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2496 gen_int_mode (INTVAL (trueop1),
2497 imode));
2498 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2501 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2502 we might be able to further simplify the AND with X and potentially
2503 remove the truncation altogether. */
2504 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2506 rtx x = XEXP (op0, 0);
2507 enum machine_mode xmode = GET_MODE (x);
2508 tem = simplify_gen_binary (AND, xmode, x,
2509 gen_int_mode (INTVAL (trueop1), xmode));
2510 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2513 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2514 if (GET_CODE (op0) == IOR
2515 && CONST_INT_P (trueop1)
2516 && CONST_INT_P (XEXP (op0, 1)))
2518 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2519 return simplify_gen_binary (IOR, mode,
2520 simplify_gen_binary (AND, mode,
2521 XEXP (op0, 0), op1),
2522 gen_int_mode (tmp, mode));
2525 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2526 insn (and may simplify more). */
2527 if (GET_CODE (op0) == XOR
2528 && rtx_equal_p (XEXP (op0, 0), op1)
2529 && ! side_effects_p (op1))
2530 return simplify_gen_binary (AND, mode,
2531 simplify_gen_unary (NOT, mode,
2532 XEXP (op0, 1), mode),
2533 op1);
2535 if (GET_CODE (op0) == XOR
2536 && rtx_equal_p (XEXP (op0, 1), op1)
2537 && ! side_effects_p (op1))
2538 return simplify_gen_binary (AND, mode,
2539 simplify_gen_unary (NOT, mode,
2540 XEXP (op0, 0), mode),
2541 op1);
2543 /* Similarly for (~(A ^ B)) & A. */
2544 if (GET_CODE (op0) == NOT
2545 && GET_CODE (XEXP (op0, 0)) == XOR
2546 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2547 && ! side_effects_p (op1))
2548 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2550 if (GET_CODE (op0) == NOT
2551 && GET_CODE (XEXP (op0, 0)) == XOR
2552 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2553 && ! side_effects_p (op1))
2554 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2556 /* Convert (A | B) & A to A. */
2557 if (GET_CODE (op0) == IOR
2558 && (rtx_equal_p (XEXP (op0, 0), op1)
2559 || rtx_equal_p (XEXP (op0, 1), op1))
2560 && ! side_effects_p (XEXP (op0, 0))
2561 && ! side_effects_p (XEXP (op0, 1)))
2562 return op1;
2564 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2565 ((A & N) + B) & M -> (A + B) & M
2566 Similarly if (N & M) == 0,
2567 ((A | N) + B) & M -> (A + B) & M
2568 and for - instead of + and/or ^ instead of |.
2569 Also, if (N & M) == 0, then
2570 (A +- N) & M -> A & M. */
2571 if (CONST_INT_P (trueop1)
2572 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2573 && ~INTVAL (trueop1)
2574 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2575 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2577 rtx pmop[2];
2578 int which;
2580 pmop[0] = XEXP (op0, 0);
2581 pmop[1] = XEXP (op0, 1);
2583 if (CONST_INT_P (pmop[1])
2584 && (INTVAL (pmop[1]) & INTVAL (trueop1)) == 0)
2585 return simplify_gen_binary (AND, mode, pmop[0], op1);
2587 for (which = 0; which < 2; which++)
2589 tem = pmop[which];
2590 switch (GET_CODE (tem))
2592 case AND:
2593 if (CONST_INT_P (XEXP (tem, 1))
2594 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2595 == INTVAL (trueop1))
2596 pmop[which] = XEXP (tem, 0);
2597 break;
2598 case IOR:
2599 case XOR:
2600 if (CONST_INT_P (XEXP (tem, 1))
2601 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2602 pmop[which] = XEXP (tem, 0);
2603 break;
2604 default:
2605 break;
2609 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2611 tem = simplify_gen_binary (GET_CODE (op0), mode,
2612 pmop[0], pmop[1]);
2613 return simplify_gen_binary (code, mode, tem, op1);
2617 /* (and X (ior (not X) Y) -> (and X Y) */
2618 if (GET_CODE (op1) == IOR
2619 && GET_CODE (XEXP (op1, 0)) == NOT
2620 && op0 == XEXP (XEXP (op1, 0), 0))
2621 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2623 /* (and (ior (not X) Y) X) -> (and X Y) */
2624 if (GET_CODE (op0) == IOR
2625 && GET_CODE (XEXP (op0, 0)) == NOT
2626 && op1 == XEXP (XEXP (op0, 0), 0))
2627 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2629 tem = simplify_associative_operation (code, mode, op0, op1);
2630 if (tem)
2631 return tem;
2632 break;
2634 case UDIV:
2635 /* 0/x is 0 (or x&0 if x has side-effects). */
2636 if (trueop0 == CONST0_RTX (mode))
2638 if (side_effects_p (op1))
2639 return simplify_gen_binary (AND, mode, op1, trueop0);
2640 return trueop0;
2642 /* x/1 is x. */
2643 if (trueop1 == CONST1_RTX (mode))
2644 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2645 /* Convert divide by power of two into shift. */
2646 if (CONST_INT_P (trueop1)
2647 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2648 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2649 break;
2651 case DIV:
2652 /* Handle floating point and integers separately. */
2653 if (SCALAR_FLOAT_MODE_P (mode))
2655 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2656 safe for modes with NaNs, since 0.0 / 0.0 will then be
2657 NaN rather than 0.0. Nor is it safe for modes with signed
2658 zeros, since dividing 0 by a negative number gives -0.0 */
2659 if (trueop0 == CONST0_RTX (mode)
2660 && !HONOR_NANS (mode)
2661 && !HONOR_SIGNED_ZEROS (mode)
2662 && ! side_effects_p (op1))
2663 return op0;
2664 /* x/1.0 is x. */
2665 if (trueop1 == CONST1_RTX (mode)
2666 && !HONOR_SNANS (mode))
2667 return op0;
2669 if (GET_CODE (trueop1) == CONST_DOUBLE
2670 && trueop1 != CONST0_RTX (mode))
2672 REAL_VALUE_TYPE d;
2673 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2675 /* x/-1.0 is -x. */
2676 if (REAL_VALUES_EQUAL (d, dconstm1)
2677 && !HONOR_SNANS (mode))
2678 return simplify_gen_unary (NEG, mode, op0, mode);
2680 /* Change FP division by a constant into multiplication.
2681 Only do this with -freciprocal-math. */
2682 if (flag_reciprocal_math
2683 && !REAL_VALUES_EQUAL (d, dconst0))
2685 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2686 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2687 return simplify_gen_binary (MULT, mode, op0, tem);
2691 else
2693 /* 0/x is 0 (or x&0 if x has side-effects). */
2694 if (trueop0 == CONST0_RTX (mode))
2696 if (side_effects_p (op1))
2697 return simplify_gen_binary (AND, mode, op1, trueop0);
2698 return trueop0;
2700 /* x/1 is x. */
2701 if (trueop1 == CONST1_RTX (mode))
2702 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2703 /* x/-1 is -x. */
2704 if (trueop1 == constm1_rtx)
2706 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2707 return simplify_gen_unary (NEG, mode, x, mode);
2710 break;
2712 case UMOD:
2713 /* 0%x is 0 (or x&0 if x has side-effects). */
2714 if (trueop0 == CONST0_RTX (mode))
2716 if (side_effects_p (op1))
2717 return simplify_gen_binary (AND, mode, op1, trueop0);
2718 return trueop0;
2720 /* x%1 is 0 (of x&0 if x has side-effects). */
2721 if (trueop1 == CONST1_RTX (mode))
2723 if (side_effects_p (op0))
2724 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2725 return CONST0_RTX (mode);
2727 /* Implement modulus by power of two as AND. */
2728 if (CONST_INT_P (trueop1)
2729 && exact_log2 (INTVAL (trueop1)) > 0)
2730 return simplify_gen_binary (AND, mode, op0,
2731 GEN_INT (INTVAL (op1) - 1));
2732 break;
2734 case MOD:
2735 /* 0%x is 0 (or x&0 if x has side-effects). */
2736 if (trueop0 == CONST0_RTX (mode))
2738 if (side_effects_p (op1))
2739 return simplify_gen_binary (AND, mode, op1, trueop0);
2740 return trueop0;
2742 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2743 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2745 if (side_effects_p (op0))
2746 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2747 return CONST0_RTX (mode);
2749 break;
2751 case ROTATERT:
2752 case ROTATE:
2753 case ASHIFTRT:
2754 if (trueop1 == CONST0_RTX (mode))
2755 return op0;
2756 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2757 return op0;
2758 /* Rotating ~0 always results in ~0. */
2759 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
2760 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2761 && ! side_effects_p (op1))
2762 return op0;
2763 canonicalize_shift:
2764 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
2766 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
2767 if (val != INTVAL (op1))
2768 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
2770 break;
2772 case ASHIFT:
2773 case SS_ASHIFT:
2774 case US_ASHIFT:
2775 if (trueop1 == CONST0_RTX (mode))
2776 return op0;
2777 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2778 return op0;
2779 goto canonicalize_shift;
2781 case LSHIFTRT:
2782 if (trueop1 == CONST0_RTX (mode))
2783 return op0;
2784 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2785 return op0;
2786 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2787 if (GET_CODE (op0) == CLZ
2788 && CONST_INT_P (trueop1)
2789 && STORE_FLAG_VALUE == 1
2790 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2792 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2793 unsigned HOST_WIDE_INT zero_val = 0;
2795 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2796 && zero_val == GET_MODE_BITSIZE (imode)
2797 && INTVAL (trueop1) == exact_log2 (zero_val))
2798 return simplify_gen_relational (EQ, mode, imode,
2799 XEXP (op0, 0), const0_rtx);
2801 goto canonicalize_shift;
2803 case SMIN:
2804 if (width <= HOST_BITS_PER_WIDE_INT
2805 && CONST_INT_P (trueop1)
2806 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2807 && ! side_effects_p (op0))
2808 return op1;
2809 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2810 return op0;
2811 tem = simplify_associative_operation (code, mode, op0, op1);
2812 if (tem)
2813 return tem;
2814 break;
2816 case SMAX:
2817 if (width <= HOST_BITS_PER_WIDE_INT
2818 && CONST_INT_P (trueop1)
2819 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2820 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2821 && ! side_effects_p (op0))
2822 return op1;
2823 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2824 return op0;
2825 tem = simplify_associative_operation (code, mode, op0, op1);
2826 if (tem)
2827 return tem;
2828 break;
2830 case UMIN:
2831 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2832 return op1;
2833 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2834 return op0;
2835 tem = simplify_associative_operation (code, mode, op0, op1);
2836 if (tem)
2837 return tem;
2838 break;
2840 case UMAX:
2841 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2842 return op1;
2843 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2844 return op0;
2845 tem = simplify_associative_operation (code, mode, op0, op1);
2846 if (tem)
2847 return tem;
2848 break;
2850 case SS_PLUS:
2851 case US_PLUS:
2852 case SS_MINUS:
2853 case US_MINUS:
2854 case SS_MULT:
2855 case US_MULT:
2856 case SS_DIV:
2857 case US_DIV:
2858 /* ??? There are simplifications that can be done. */
2859 return 0;
2861 case VEC_SELECT:
2862 if (!VECTOR_MODE_P (mode))
2864 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2865 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2866 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2867 gcc_assert (XVECLEN (trueop1, 0) == 1);
2868 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
2870 if (GET_CODE (trueop0) == CONST_VECTOR)
2871 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2872 (trueop1, 0, 0)));
2874 /* Extract a scalar element from a nested VEC_SELECT expression
2875 (with optional nested VEC_CONCAT expression). Some targets
2876 (i386) extract scalar element from a vector using chain of
2877 nested VEC_SELECT expressions. When input operand is a memory
2878 operand, this operation can be simplified to a simple scalar
2879 load from an offseted memory address. */
2880 if (GET_CODE (trueop0) == VEC_SELECT)
2882 rtx op0 = XEXP (trueop0, 0);
2883 rtx op1 = XEXP (trueop0, 1);
2885 enum machine_mode opmode = GET_MODE (op0);
2886 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
2887 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
2889 int i = INTVAL (XVECEXP (trueop1, 0, 0));
2890 int elem;
2892 rtvec vec;
2893 rtx tmp_op, tmp;
2895 gcc_assert (GET_CODE (op1) == PARALLEL);
2896 gcc_assert (i < n_elts);
2898 /* Select element, pointed by nested selector. */
2899 elem = INTVAL (XVECEXP (op1, 0, i));
2901 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
2902 if (GET_CODE (op0) == VEC_CONCAT)
2904 rtx op00 = XEXP (op0, 0);
2905 rtx op01 = XEXP (op0, 1);
2907 enum machine_mode mode00, mode01;
2908 int n_elts00, n_elts01;
2910 mode00 = GET_MODE (op00);
2911 mode01 = GET_MODE (op01);
2913 /* Find out number of elements of each operand. */
2914 if (VECTOR_MODE_P (mode00))
2916 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
2917 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
2919 else
2920 n_elts00 = 1;
2922 if (VECTOR_MODE_P (mode01))
2924 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
2925 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
2927 else
2928 n_elts01 = 1;
2930 gcc_assert (n_elts == n_elts00 + n_elts01);
2932 /* Select correct operand of VEC_CONCAT
2933 and adjust selector. */
2934 if (elem < n_elts01)
2935 tmp_op = op00;
2936 else
2938 tmp_op = op01;
2939 elem -= n_elts00;
2942 else
2943 tmp_op = op0;
2945 vec = rtvec_alloc (1);
2946 RTVEC_ELT (vec, 0) = GEN_INT (elem);
2948 tmp = gen_rtx_fmt_ee (code, mode,
2949 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
2950 return tmp;
2952 if (GET_CODE (trueop0) == VEC_DUPLICATE
2953 && GET_MODE (XEXP (trueop0, 0)) == mode)
2954 return XEXP (trueop0, 0);
2956 else
2958 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2959 gcc_assert (GET_MODE_INNER (mode)
2960 == GET_MODE_INNER (GET_MODE (trueop0)));
2961 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2963 if (GET_CODE (trueop0) == CONST_VECTOR)
2965 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2966 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2967 rtvec v = rtvec_alloc (n_elts);
2968 unsigned int i;
2970 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2971 for (i = 0; i < n_elts; i++)
2973 rtx x = XVECEXP (trueop1, 0, i);
2975 gcc_assert (CONST_INT_P (x));
2976 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2977 INTVAL (x));
2980 return gen_rtx_CONST_VECTOR (mode, v);
2984 if (XVECLEN (trueop1, 0) == 1
2985 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
2986 && GET_CODE (trueop0) == VEC_CONCAT)
2988 rtx vec = trueop0;
2989 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2991 /* Try to find the element in the VEC_CONCAT. */
2992 while (GET_MODE (vec) != mode
2993 && GET_CODE (vec) == VEC_CONCAT)
2995 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2996 if (offset < vec_size)
2997 vec = XEXP (vec, 0);
2998 else
3000 offset -= vec_size;
3001 vec = XEXP (vec, 1);
3003 vec = avoid_constant_pool_reference (vec);
3006 if (GET_MODE (vec) == mode)
3007 return vec;
3010 return 0;
3011 case VEC_CONCAT:
3013 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3014 ? GET_MODE (trueop0)
3015 : GET_MODE_INNER (mode));
3016 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3017 ? GET_MODE (trueop1)
3018 : GET_MODE_INNER (mode));
3020 gcc_assert (VECTOR_MODE_P (mode));
3021 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3022 == GET_MODE_SIZE (mode));
3024 if (VECTOR_MODE_P (op0_mode))
3025 gcc_assert (GET_MODE_INNER (mode)
3026 == GET_MODE_INNER (op0_mode));
3027 else
3028 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3030 if (VECTOR_MODE_P (op1_mode))
3031 gcc_assert (GET_MODE_INNER (mode)
3032 == GET_MODE_INNER (op1_mode));
3033 else
3034 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3036 if ((GET_CODE (trueop0) == CONST_VECTOR
3037 || CONST_INT_P (trueop0)
3038 || GET_CODE (trueop0) == CONST_DOUBLE)
3039 && (GET_CODE (trueop1) == CONST_VECTOR
3040 || CONST_INT_P (trueop1)
3041 || GET_CODE (trueop1) == CONST_DOUBLE))
3043 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3044 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3045 rtvec v = rtvec_alloc (n_elts);
3046 unsigned int i;
3047 unsigned in_n_elts = 1;
3049 if (VECTOR_MODE_P (op0_mode))
3050 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3051 for (i = 0; i < n_elts; i++)
3053 if (i < in_n_elts)
3055 if (!VECTOR_MODE_P (op0_mode))
3056 RTVEC_ELT (v, i) = trueop0;
3057 else
3058 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3060 else
3062 if (!VECTOR_MODE_P (op1_mode))
3063 RTVEC_ELT (v, i) = trueop1;
3064 else
3065 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3066 i - in_n_elts);
3070 return gen_rtx_CONST_VECTOR (mode, v);
3073 return 0;
3075 default:
3076 gcc_unreachable ();
3079 return 0;
3083 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3084 rtx op0, rtx op1)
3086 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3087 HOST_WIDE_INT val;
3088 unsigned int width = GET_MODE_BITSIZE (mode);
3090 if (VECTOR_MODE_P (mode)
3091 && code != VEC_CONCAT
3092 && GET_CODE (op0) == CONST_VECTOR
3093 && GET_CODE (op1) == CONST_VECTOR)
3095 unsigned n_elts = GET_MODE_NUNITS (mode);
3096 enum machine_mode op0mode = GET_MODE (op0);
3097 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3098 enum machine_mode op1mode = GET_MODE (op1);
3099 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3100 rtvec v = rtvec_alloc (n_elts);
3101 unsigned int i;
3103 gcc_assert (op0_n_elts == n_elts);
3104 gcc_assert (op1_n_elts == n_elts);
3105 for (i = 0; i < n_elts; i++)
3107 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3108 CONST_VECTOR_ELT (op0, i),
3109 CONST_VECTOR_ELT (op1, i));
3110 if (!x)
3111 return 0;
3112 RTVEC_ELT (v, i) = x;
3115 return gen_rtx_CONST_VECTOR (mode, v);
3118 if (VECTOR_MODE_P (mode)
3119 && code == VEC_CONCAT
3120 && (CONST_INT_P (op0)
3121 || GET_CODE (op0) == CONST_DOUBLE
3122 || GET_CODE (op0) == CONST_FIXED)
3123 && (CONST_INT_P (op1)
3124 || GET_CODE (op1) == CONST_DOUBLE
3125 || GET_CODE (op1) == CONST_FIXED))
3127 unsigned n_elts = GET_MODE_NUNITS (mode);
3128 rtvec v = rtvec_alloc (n_elts);
3130 gcc_assert (n_elts >= 2);
3131 if (n_elts == 2)
3133 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3134 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3136 RTVEC_ELT (v, 0) = op0;
3137 RTVEC_ELT (v, 1) = op1;
3139 else
3141 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3142 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3143 unsigned i;
3145 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3146 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3147 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3149 for (i = 0; i < op0_n_elts; ++i)
3150 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3151 for (i = 0; i < op1_n_elts; ++i)
3152 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3155 return gen_rtx_CONST_VECTOR (mode, v);
3158 if (SCALAR_FLOAT_MODE_P (mode)
3159 && GET_CODE (op0) == CONST_DOUBLE
3160 && GET_CODE (op1) == CONST_DOUBLE
3161 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3163 if (code == AND
3164 || code == IOR
3165 || code == XOR)
3167 long tmp0[4];
3168 long tmp1[4];
3169 REAL_VALUE_TYPE r;
3170 int i;
3172 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3173 GET_MODE (op0));
3174 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3175 GET_MODE (op1));
3176 for (i = 0; i < 4; i++)
3178 switch (code)
3180 case AND:
3181 tmp0[i] &= tmp1[i];
3182 break;
3183 case IOR:
3184 tmp0[i] |= tmp1[i];
3185 break;
3186 case XOR:
3187 tmp0[i] ^= tmp1[i];
3188 break;
3189 default:
3190 gcc_unreachable ();
3193 real_from_target (&r, tmp0, mode);
3194 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3196 else
3198 REAL_VALUE_TYPE f0, f1, value, result;
3199 bool inexact;
3201 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3202 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3203 real_convert (&f0, mode, &f0);
3204 real_convert (&f1, mode, &f1);
3206 if (HONOR_SNANS (mode)
3207 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3208 return 0;
3210 if (code == DIV
3211 && REAL_VALUES_EQUAL (f1, dconst0)
3212 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3213 return 0;
3215 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3216 && flag_trapping_math
3217 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3219 int s0 = REAL_VALUE_NEGATIVE (f0);
3220 int s1 = REAL_VALUE_NEGATIVE (f1);
3222 switch (code)
3224 case PLUS:
3225 /* Inf + -Inf = NaN plus exception. */
3226 if (s0 != s1)
3227 return 0;
3228 break;
3229 case MINUS:
3230 /* Inf - Inf = NaN plus exception. */
3231 if (s0 == s1)
3232 return 0;
3233 break;
3234 case DIV:
3235 /* Inf / Inf = NaN plus exception. */
3236 return 0;
3237 default:
3238 break;
3242 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3243 && flag_trapping_math
3244 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3245 || (REAL_VALUE_ISINF (f1)
3246 && REAL_VALUES_EQUAL (f0, dconst0))))
3247 /* Inf * 0 = NaN plus exception. */
3248 return 0;
3250 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3251 &f0, &f1);
3252 real_convert (&result, mode, &value);
3254 /* Don't constant fold this floating point operation if
3255 the result has overflowed and flag_trapping_math. */
3257 if (flag_trapping_math
3258 && MODE_HAS_INFINITIES (mode)
3259 && REAL_VALUE_ISINF (result)
3260 && !REAL_VALUE_ISINF (f0)
3261 && !REAL_VALUE_ISINF (f1))
3262 /* Overflow plus exception. */
3263 return 0;
3265 /* Don't constant fold this floating point operation if the
3266 result may dependent upon the run-time rounding mode and
3267 flag_rounding_math is set, or if GCC's software emulation
3268 is unable to accurately represent the result. */
3270 if ((flag_rounding_math
3271 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3272 && (inexact || !real_identical (&result, &value)))
3273 return NULL_RTX;
3275 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3279 /* We can fold some multi-word operations. */
3280 if (GET_MODE_CLASS (mode) == MODE_INT
3281 && width == HOST_BITS_PER_WIDE_INT * 2
3282 && (GET_CODE (op0) == CONST_DOUBLE || CONST_INT_P (op0))
3283 && (GET_CODE (op1) == CONST_DOUBLE || CONST_INT_P (op1)))
3285 unsigned HOST_WIDE_INT l1, l2, lv, lt;
3286 HOST_WIDE_INT h1, h2, hv, ht;
3288 if (GET_CODE (op0) == CONST_DOUBLE)
3289 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
3290 else
3291 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
3293 if (GET_CODE (op1) == CONST_DOUBLE)
3294 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
3295 else
3296 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
3298 switch (code)
3300 case MINUS:
3301 /* A - B == A + (-B). */
3302 neg_double (l2, h2, &lv, &hv);
3303 l2 = lv, h2 = hv;
3305 /* Fall through.... */
3307 case PLUS:
3308 add_double (l1, h1, l2, h2, &lv, &hv);
3309 break;
3311 case MULT:
3312 mul_double (l1, h1, l2, h2, &lv, &hv);
3313 break;
3315 case DIV:
3316 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3317 &lv, &hv, &lt, &ht))
3318 return 0;
3319 break;
3321 case MOD:
3322 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3323 &lt, &ht, &lv, &hv))
3324 return 0;
3325 break;
3327 case UDIV:
3328 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3329 &lv, &hv, &lt, &ht))
3330 return 0;
3331 break;
3333 case UMOD:
3334 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3335 &lt, &ht, &lv, &hv))
3336 return 0;
3337 break;
3339 case AND:
3340 lv = l1 & l2, hv = h1 & h2;
3341 break;
3343 case IOR:
3344 lv = l1 | l2, hv = h1 | h2;
3345 break;
3347 case XOR:
3348 lv = l1 ^ l2, hv = h1 ^ h2;
3349 break;
3351 case SMIN:
3352 if (h1 < h2
3353 || (h1 == h2
3354 && ((unsigned HOST_WIDE_INT) l1
3355 < (unsigned HOST_WIDE_INT) l2)))
3356 lv = l1, hv = h1;
3357 else
3358 lv = l2, hv = h2;
3359 break;
3361 case SMAX:
3362 if (h1 > h2
3363 || (h1 == h2
3364 && ((unsigned HOST_WIDE_INT) l1
3365 > (unsigned HOST_WIDE_INT) l2)))
3366 lv = l1, hv = h1;
3367 else
3368 lv = l2, hv = h2;
3369 break;
3371 case UMIN:
3372 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
3373 || (h1 == h2
3374 && ((unsigned HOST_WIDE_INT) l1
3375 < (unsigned HOST_WIDE_INT) l2)))
3376 lv = l1, hv = h1;
3377 else
3378 lv = l2, hv = h2;
3379 break;
3381 case UMAX:
3382 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
3383 || (h1 == h2
3384 && ((unsigned HOST_WIDE_INT) l1
3385 > (unsigned HOST_WIDE_INT) l2)))
3386 lv = l1, hv = h1;
3387 else
3388 lv = l2, hv = h2;
3389 break;
3391 case LSHIFTRT: case ASHIFTRT:
3392 case ASHIFT:
3393 case ROTATE: case ROTATERT:
3394 if (SHIFT_COUNT_TRUNCATED)
3395 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
3397 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
3398 return 0;
3400 if (code == LSHIFTRT || code == ASHIFTRT)
3401 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
3402 code == ASHIFTRT);
3403 else if (code == ASHIFT)
3404 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
3405 else if (code == ROTATE)
3406 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3407 else /* code == ROTATERT */
3408 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3409 break;
3411 default:
3412 return 0;
3415 return immed_double_const (lv, hv, mode);
3418 if (CONST_INT_P (op0) && CONST_INT_P (op1)
3419 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3421 /* Get the integer argument values in two forms:
3422 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3424 arg0 = INTVAL (op0);
3425 arg1 = INTVAL (op1);
3427 if (width < HOST_BITS_PER_WIDE_INT)
3429 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
3430 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
3432 arg0s = arg0;
3433 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3434 arg0s |= ((HOST_WIDE_INT) (-1) << width);
3436 arg1s = arg1;
3437 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3438 arg1s |= ((HOST_WIDE_INT) (-1) << width);
3440 else
3442 arg0s = arg0;
3443 arg1s = arg1;
3446 /* Compute the value of the arithmetic. */
3448 switch (code)
3450 case PLUS:
3451 val = arg0s + arg1s;
3452 break;
3454 case MINUS:
3455 val = arg0s - arg1s;
3456 break;
3458 case MULT:
3459 val = arg0s * arg1s;
3460 break;
3462 case DIV:
3463 if (arg1s == 0
3464 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3465 && arg1s == -1))
3466 return 0;
3467 val = arg0s / arg1s;
3468 break;
3470 case MOD:
3471 if (arg1s == 0
3472 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3473 && arg1s == -1))
3474 return 0;
3475 val = arg0s % arg1s;
3476 break;
3478 case UDIV:
3479 if (arg1 == 0
3480 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3481 && arg1s == -1))
3482 return 0;
3483 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3484 break;
3486 case UMOD:
3487 if (arg1 == 0
3488 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3489 && arg1s == -1))
3490 return 0;
3491 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3492 break;
3494 case AND:
3495 val = arg0 & arg1;
3496 break;
3498 case IOR:
3499 val = arg0 | arg1;
3500 break;
3502 case XOR:
3503 val = arg0 ^ arg1;
3504 break;
3506 case LSHIFTRT:
3507 case ASHIFT:
3508 case ASHIFTRT:
3509 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3510 the value is in range. We can't return any old value for
3511 out-of-range arguments because either the middle-end (via
3512 shift_truncation_mask) or the back-end might be relying on
3513 target-specific knowledge. Nor can we rely on
3514 shift_truncation_mask, since the shift might not be part of an
3515 ashlM3, lshrM3 or ashrM3 instruction. */
3516 if (SHIFT_COUNT_TRUNCATED)
3517 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3518 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3519 return 0;
3521 val = (code == ASHIFT
3522 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3523 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3525 /* Sign-extend the result for arithmetic right shifts. */
3526 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3527 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3528 break;
3530 case ROTATERT:
3531 if (arg1 < 0)
3532 return 0;
3534 arg1 %= width;
3535 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3536 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3537 break;
3539 case ROTATE:
3540 if (arg1 < 0)
3541 return 0;
3543 arg1 %= width;
3544 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3545 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3546 break;
3548 case COMPARE:
3549 /* Do nothing here. */
3550 return 0;
3552 case SMIN:
3553 val = arg0s <= arg1s ? arg0s : arg1s;
3554 break;
3556 case UMIN:
3557 val = ((unsigned HOST_WIDE_INT) arg0
3558 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3559 break;
3561 case SMAX:
3562 val = arg0s > arg1s ? arg0s : arg1s;
3563 break;
3565 case UMAX:
3566 val = ((unsigned HOST_WIDE_INT) arg0
3567 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3568 break;
3570 case SS_PLUS:
3571 case US_PLUS:
3572 case SS_MINUS:
3573 case US_MINUS:
3574 case SS_MULT:
3575 case US_MULT:
3576 case SS_DIV:
3577 case US_DIV:
3578 case SS_ASHIFT:
3579 case US_ASHIFT:
3580 /* ??? There are simplifications that can be done. */
3581 return 0;
3583 default:
3584 gcc_unreachable ();
3587 return gen_int_mode (val, mode);
3590 return NULL_RTX;
3595 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3596 PLUS or MINUS.
3598 Rather than test for specific case, we do this by a brute-force method
3599 and do all possible simplifications until no more changes occur. Then
3600 we rebuild the operation. */
3602 struct simplify_plus_minus_op_data
3604 rtx op;
3605 short neg;
3608 static bool
3609 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3611 int result;
3613 result = (commutative_operand_precedence (y)
3614 - commutative_operand_precedence (x));
3615 if (result)
3616 return result > 0;
3618 /* Group together equal REGs to do more simplification. */
3619 if (REG_P (x) && REG_P (y))
3620 return REGNO (x) > REGNO (y);
3621 else
3622 return false;
3625 static rtx
3626 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3627 rtx op1)
3629 struct simplify_plus_minus_op_data ops[8];
3630 rtx result, tem;
3631 int n_ops = 2, input_ops = 2;
3632 int changed, n_constants = 0, canonicalized = 0;
3633 int i, j;
3635 memset (ops, 0, sizeof ops);
3637 /* Set up the two operands and then expand them until nothing has been
3638 changed. If we run out of room in our array, give up; this should
3639 almost never happen. */
3641 ops[0].op = op0;
3642 ops[0].neg = 0;
3643 ops[1].op = op1;
3644 ops[1].neg = (code == MINUS);
3648 changed = 0;
3650 for (i = 0; i < n_ops; i++)
3652 rtx this_op = ops[i].op;
3653 int this_neg = ops[i].neg;
3654 enum rtx_code this_code = GET_CODE (this_op);
3656 switch (this_code)
3658 case PLUS:
3659 case MINUS:
3660 if (n_ops == 7)
3661 return NULL_RTX;
3663 ops[n_ops].op = XEXP (this_op, 1);
3664 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3665 n_ops++;
3667 ops[i].op = XEXP (this_op, 0);
3668 input_ops++;
3669 changed = 1;
3670 canonicalized |= this_neg;
3671 break;
3673 case NEG:
3674 ops[i].op = XEXP (this_op, 0);
3675 ops[i].neg = ! this_neg;
3676 changed = 1;
3677 canonicalized = 1;
3678 break;
3680 case CONST:
3681 if (n_ops < 7
3682 && GET_CODE (XEXP (this_op, 0)) == PLUS
3683 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3684 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3686 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3687 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3688 ops[n_ops].neg = this_neg;
3689 n_ops++;
3690 changed = 1;
3691 canonicalized = 1;
3693 break;
3695 case NOT:
3696 /* ~a -> (-a - 1) */
3697 if (n_ops != 7)
3699 ops[n_ops].op = constm1_rtx;
3700 ops[n_ops++].neg = this_neg;
3701 ops[i].op = XEXP (this_op, 0);
3702 ops[i].neg = !this_neg;
3703 changed = 1;
3704 canonicalized = 1;
3706 break;
3708 case CONST_INT:
3709 n_constants++;
3710 if (this_neg)
3712 ops[i].op = neg_const_int (mode, this_op);
3713 ops[i].neg = 0;
3714 changed = 1;
3715 canonicalized = 1;
3717 break;
3719 default:
3720 break;
3724 while (changed);
3726 if (n_constants > 1)
3727 canonicalized = 1;
3729 gcc_assert (n_ops >= 2);
3731 /* If we only have two operands, we can avoid the loops. */
3732 if (n_ops == 2)
3734 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3735 rtx lhs, rhs;
3737 /* Get the two operands. Be careful with the order, especially for
3738 the cases where code == MINUS. */
3739 if (ops[0].neg && ops[1].neg)
3741 lhs = gen_rtx_NEG (mode, ops[0].op);
3742 rhs = ops[1].op;
3744 else if (ops[0].neg)
3746 lhs = ops[1].op;
3747 rhs = ops[0].op;
3749 else
3751 lhs = ops[0].op;
3752 rhs = ops[1].op;
3755 return simplify_const_binary_operation (code, mode, lhs, rhs);
3758 /* Now simplify each pair of operands until nothing changes. */
3761 /* Insertion sort is good enough for an eight-element array. */
3762 for (i = 1; i < n_ops; i++)
3764 struct simplify_plus_minus_op_data save;
3765 j = i - 1;
3766 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
3767 continue;
3769 canonicalized = 1;
3770 save = ops[i];
3772 ops[j + 1] = ops[j];
3773 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
3774 ops[j + 1] = save;
3777 changed = 0;
3778 for (i = n_ops - 1; i > 0; i--)
3779 for (j = i - 1; j >= 0; j--)
3781 rtx lhs = ops[j].op, rhs = ops[i].op;
3782 int lneg = ops[j].neg, rneg = ops[i].neg;
3784 if (lhs != 0 && rhs != 0)
3786 enum rtx_code ncode = PLUS;
3788 if (lneg != rneg)
3790 ncode = MINUS;
3791 if (lneg)
3792 tem = lhs, lhs = rhs, rhs = tem;
3794 else if (swap_commutative_operands_p (lhs, rhs))
3795 tem = lhs, lhs = rhs, rhs = tem;
3797 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
3798 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
3800 rtx tem_lhs, tem_rhs;
3802 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3803 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3804 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3806 if (tem && !CONSTANT_P (tem))
3807 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3809 else
3810 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3812 /* Reject "simplifications" that just wrap the two
3813 arguments in a CONST. Failure to do so can result
3814 in infinite recursion with simplify_binary_operation
3815 when it calls us to simplify CONST operations. */
3816 if (tem
3817 && ! (GET_CODE (tem) == CONST
3818 && GET_CODE (XEXP (tem, 0)) == ncode
3819 && XEXP (XEXP (tem, 0), 0) == lhs
3820 && XEXP (XEXP (tem, 0), 1) == rhs))
3822 lneg &= rneg;
3823 if (GET_CODE (tem) == NEG)
3824 tem = XEXP (tem, 0), lneg = !lneg;
3825 if (CONST_INT_P (tem) && lneg)
3826 tem = neg_const_int (mode, tem), lneg = 0;
3828 ops[i].op = tem;
3829 ops[i].neg = lneg;
3830 ops[j].op = NULL_RTX;
3831 changed = 1;
3832 canonicalized = 1;
3837 /* If nothing changed, fail. */
3838 if (!canonicalized)
3839 return NULL_RTX;
3841 /* Pack all the operands to the lower-numbered entries. */
3842 for (i = 0, j = 0; j < n_ops; j++)
3843 if (ops[j].op)
3845 ops[i] = ops[j];
3846 i++;
3848 n_ops = i;
3850 while (changed);
3852 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3853 if (n_ops == 2
3854 && CONST_INT_P (ops[1].op)
3855 && CONSTANT_P (ops[0].op)
3856 && ops[0].neg)
3857 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3859 /* We suppressed creation of trivial CONST expressions in the
3860 combination loop to avoid recursion. Create one manually now.
3861 The combination loop should have ensured that there is exactly
3862 one CONST_INT, and the sort will have ensured that it is last
3863 in the array and that any other constant will be next-to-last. */
3865 if (n_ops > 1
3866 && CONST_INT_P (ops[n_ops - 1].op)
3867 && CONSTANT_P (ops[n_ops - 2].op))
3869 rtx value = ops[n_ops - 1].op;
3870 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3871 value = neg_const_int (mode, value);
3872 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3873 n_ops--;
3876 /* Put a non-negated operand first, if possible. */
3878 for (i = 0; i < n_ops && ops[i].neg; i++)
3879 continue;
3880 if (i == n_ops)
3881 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3882 else if (i != 0)
3884 tem = ops[0].op;
3885 ops[0] = ops[i];
3886 ops[i].op = tem;
3887 ops[i].neg = 1;
3890 /* Now make the result by performing the requested operations. */
3891 result = ops[0].op;
3892 for (i = 1; i < n_ops; i++)
3893 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3894 mode, result, ops[i].op);
3896 return result;
3899 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3900 static bool
3901 plus_minus_operand_p (const_rtx x)
3903 return GET_CODE (x) == PLUS
3904 || GET_CODE (x) == MINUS
3905 || (GET_CODE (x) == CONST
3906 && GET_CODE (XEXP (x, 0)) == PLUS
3907 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3908 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3911 /* Like simplify_binary_operation except used for relational operators.
3912 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3913 not also be VOIDmode.
3915 CMP_MODE specifies in which mode the comparison is done in, so it is
3916 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3917 the operands or, if both are VOIDmode, the operands are compared in
3918 "infinite precision". */
3920 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3921 enum machine_mode cmp_mode, rtx op0, rtx op1)
3923 rtx tem, trueop0, trueop1;
3925 if (cmp_mode == VOIDmode)
3926 cmp_mode = GET_MODE (op0);
3927 if (cmp_mode == VOIDmode)
3928 cmp_mode = GET_MODE (op1);
3930 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3931 if (tem)
3933 if (SCALAR_FLOAT_MODE_P (mode))
3935 if (tem == const0_rtx)
3936 return CONST0_RTX (mode);
3937 #ifdef FLOAT_STORE_FLAG_VALUE
3939 REAL_VALUE_TYPE val;
3940 val = FLOAT_STORE_FLAG_VALUE (mode);
3941 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3943 #else
3944 return NULL_RTX;
3945 #endif
3947 if (VECTOR_MODE_P (mode))
3949 if (tem == const0_rtx)
3950 return CONST0_RTX (mode);
3951 #ifdef VECTOR_STORE_FLAG_VALUE
3953 int i, units;
3954 rtvec v;
3956 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3957 if (val == NULL_RTX)
3958 return NULL_RTX;
3959 if (val == const1_rtx)
3960 return CONST1_RTX (mode);
3962 units = GET_MODE_NUNITS (mode);
3963 v = rtvec_alloc (units);
3964 for (i = 0; i < units; i++)
3965 RTVEC_ELT (v, i) = val;
3966 return gen_rtx_raw_CONST_VECTOR (mode, v);
3968 #else
3969 return NULL_RTX;
3970 #endif
3973 return tem;
3976 /* For the following tests, ensure const0_rtx is op1. */
3977 if (swap_commutative_operands_p (op0, op1)
3978 || (op0 == const0_rtx && op1 != const0_rtx))
3979 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3981 /* If op0 is a compare, extract the comparison arguments from it. */
3982 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3983 return simplify_gen_relational (code, mode, VOIDmode,
3984 XEXP (op0, 0), XEXP (op0, 1));
3986 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
3987 || CC0_P (op0))
3988 return NULL_RTX;
3990 trueop0 = avoid_constant_pool_reference (op0);
3991 trueop1 = avoid_constant_pool_reference (op1);
3992 return simplify_relational_operation_1 (code, mode, cmp_mode,
3993 trueop0, trueop1);
3996 /* This part of simplify_relational_operation is only used when CMP_MODE
3997 is not in class MODE_CC (i.e. it is a real comparison).
3999 MODE is the mode of the result, while CMP_MODE specifies in which
4000 mode the comparison is done in, so it is the mode of the operands. */
4002 static rtx
4003 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
4004 enum machine_mode cmp_mode, rtx op0, rtx op1)
4006 enum rtx_code op0code = GET_CODE (op0);
4008 if (op1 == const0_rtx && COMPARISON_P (op0))
4010 /* If op0 is a comparison, extract the comparison arguments
4011 from it. */
4012 if (code == NE)
4014 if (GET_MODE (op0) == mode)
4015 return simplify_rtx (op0);
4016 else
4017 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4018 XEXP (op0, 0), XEXP (op0, 1));
4020 else if (code == EQ)
4022 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4023 if (new_code != UNKNOWN)
4024 return simplify_gen_relational (new_code, mode, VOIDmode,
4025 XEXP (op0, 0), XEXP (op0, 1));
4029 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4030 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4031 if ((code == LTU || code == GEU)
4032 && GET_CODE (op0) == PLUS
4033 && CONST_INT_P (XEXP (op0, 1))
4034 && (rtx_equal_p (op1, XEXP (op0, 0))
4035 || rtx_equal_p (op1, XEXP (op0, 1))))
4037 rtx new_cmp
4038 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4039 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4040 cmp_mode, XEXP (op0, 0), new_cmp);
4043 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4044 if ((code == LTU || code == GEU)
4045 && GET_CODE (op0) == PLUS
4046 && rtx_equal_p (op1, XEXP (op0, 1))
4047 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4048 && !rtx_equal_p (op1, XEXP (op0, 0)))
4049 return simplify_gen_relational (code, mode, cmp_mode, op0,
4050 copy_rtx (XEXP (op0, 0)));
4052 if (op1 == const0_rtx)
4054 /* Canonicalize (GTU x 0) as (NE x 0). */
4055 if (code == GTU)
4056 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4057 /* Canonicalize (LEU x 0) as (EQ x 0). */
4058 if (code == LEU)
4059 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4061 else if (op1 == const1_rtx)
4063 switch (code)
4065 case GE:
4066 /* Canonicalize (GE x 1) as (GT x 0). */
4067 return simplify_gen_relational (GT, mode, cmp_mode,
4068 op0, const0_rtx);
4069 case GEU:
4070 /* Canonicalize (GEU x 1) as (NE x 0). */
4071 return simplify_gen_relational (NE, mode, cmp_mode,
4072 op0, const0_rtx);
4073 case LT:
4074 /* Canonicalize (LT x 1) as (LE x 0). */
4075 return simplify_gen_relational (LE, mode, cmp_mode,
4076 op0, const0_rtx);
4077 case LTU:
4078 /* Canonicalize (LTU x 1) as (EQ x 0). */
4079 return simplify_gen_relational (EQ, mode, cmp_mode,
4080 op0, const0_rtx);
4081 default:
4082 break;
4085 else if (op1 == constm1_rtx)
4087 /* Canonicalize (LE x -1) as (LT x 0). */
4088 if (code == LE)
4089 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4090 /* Canonicalize (GT x -1) as (GE x 0). */
4091 if (code == GT)
4092 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4095 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4096 if ((code == EQ || code == NE)
4097 && (op0code == PLUS || op0code == MINUS)
4098 && CONSTANT_P (op1)
4099 && CONSTANT_P (XEXP (op0, 1))
4100 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4102 rtx x = XEXP (op0, 0);
4103 rtx c = XEXP (op0, 1);
4105 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
4106 cmp_mode, op1, c);
4107 return simplify_gen_relational (code, mode, cmp_mode, x, c);
4110 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4111 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4112 if (code == NE
4113 && op1 == const0_rtx
4114 && GET_MODE_CLASS (mode) == MODE_INT
4115 && cmp_mode != VOIDmode
4116 /* ??? Work-around BImode bugs in the ia64 backend. */
4117 && mode != BImode
4118 && cmp_mode != BImode
4119 && nonzero_bits (op0, cmp_mode) == 1
4120 && STORE_FLAG_VALUE == 1)
4121 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4122 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4123 : lowpart_subreg (mode, op0, cmp_mode);
4125 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4126 if ((code == EQ || code == NE)
4127 && op1 == const0_rtx
4128 && op0code == XOR)
4129 return simplify_gen_relational (code, mode, cmp_mode,
4130 XEXP (op0, 0), XEXP (op0, 1));
4132 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4133 if ((code == EQ || code == NE)
4134 && op0code == XOR
4135 && rtx_equal_p (XEXP (op0, 0), op1)
4136 && !side_effects_p (XEXP (op0, 0)))
4137 return simplify_gen_relational (code, mode, cmp_mode,
4138 XEXP (op0, 1), const0_rtx);
4140 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4141 if ((code == EQ || code == NE)
4142 && op0code == XOR
4143 && rtx_equal_p (XEXP (op0, 1), op1)
4144 && !side_effects_p (XEXP (op0, 1)))
4145 return simplify_gen_relational (code, mode, cmp_mode,
4146 XEXP (op0, 0), const0_rtx);
4148 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4149 if ((code == EQ || code == NE)
4150 && op0code == XOR
4151 && (CONST_INT_P (op1)
4152 || GET_CODE (op1) == CONST_DOUBLE)
4153 && (CONST_INT_P (XEXP (op0, 1))
4154 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
4155 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4156 simplify_gen_binary (XOR, cmp_mode,
4157 XEXP (op0, 1), op1));
4159 if (op0code == POPCOUNT && op1 == const0_rtx)
4160 switch (code)
4162 case EQ:
4163 case LE:
4164 case LEU:
4165 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4166 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4167 XEXP (op0, 0), const0_rtx);
4169 case NE:
4170 case GT:
4171 case GTU:
4172 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4173 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4174 XEXP (op0, 0), const0_rtx);
4176 default:
4177 break;
4180 return NULL_RTX;
4183 enum
4185 CMP_EQ = 1,
4186 CMP_LT = 2,
4187 CMP_GT = 4,
4188 CMP_LTU = 8,
4189 CMP_GTU = 16
4193 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4194 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4195 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4196 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4197 For floating-point comparisons, assume that the operands were ordered. */
4199 static rtx
4200 comparison_result (enum rtx_code code, int known_results)
4202 switch (code)
4204 case EQ:
4205 case UNEQ:
4206 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4207 case NE:
4208 case LTGT:
4209 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4211 case LT:
4212 case UNLT:
4213 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4214 case GE:
4215 case UNGE:
4216 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4218 case GT:
4219 case UNGT:
4220 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4221 case LE:
4222 case UNLE:
4223 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4225 case LTU:
4226 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4227 case GEU:
4228 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4230 case GTU:
4231 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4232 case LEU:
4233 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4235 case ORDERED:
4236 return const_true_rtx;
4237 case UNORDERED:
4238 return const0_rtx;
4239 default:
4240 gcc_unreachable ();
4244 /* Check if the given comparison (done in the given MODE) is actually a
4245 tautology or a contradiction.
4246 If no simplification is possible, this function returns zero.
4247 Otherwise, it returns either const_true_rtx or const0_rtx. */
4250 simplify_const_relational_operation (enum rtx_code code,
4251 enum machine_mode mode,
4252 rtx op0, rtx op1)
4254 rtx tem;
4255 rtx trueop0;
4256 rtx trueop1;
4258 gcc_assert (mode != VOIDmode
4259 || (GET_MODE (op0) == VOIDmode
4260 && GET_MODE (op1) == VOIDmode));
4262 /* If op0 is a compare, extract the comparison arguments from it. */
4263 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4265 op1 = XEXP (op0, 1);
4266 op0 = XEXP (op0, 0);
4268 if (GET_MODE (op0) != VOIDmode)
4269 mode = GET_MODE (op0);
4270 else if (GET_MODE (op1) != VOIDmode)
4271 mode = GET_MODE (op1);
4272 else
4273 return 0;
4276 /* We can't simplify MODE_CC values since we don't know what the
4277 actual comparison is. */
4278 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4279 return 0;
4281 /* Make sure the constant is second. */
4282 if (swap_commutative_operands_p (op0, op1))
4284 tem = op0, op0 = op1, op1 = tem;
4285 code = swap_condition (code);
4288 trueop0 = avoid_constant_pool_reference (op0);
4289 trueop1 = avoid_constant_pool_reference (op1);
4291 /* For integer comparisons of A and B maybe we can simplify A - B and can
4292 then simplify a comparison of that with zero. If A and B are both either
4293 a register or a CONST_INT, this can't help; testing for these cases will
4294 prevent infinite recursion here and speed things up.
4296 We can only do this for EQ and NE comparisons as otherwise we may
4297 lose or introduce overflow which we cannot disregard as undefined as
4298 we do not know the signedness of the operation on either the left or
4299 the right hand side of the comparison. */
4301 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4302 && (code == EQ || code == NE)
4303 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4304 && (REG_P (op1) || CONST_INT_P (trueop1)))
4305 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4306 /* We cannot do this if tem is a nonzero address. */
4307 && ! nonzero_address_p (tem))
4308 return simplify_const_relational_operation (signed_condition (code),
4309 mode, tem, const0_rtx);
4311 if (! HONOR_NANS (mode) && code == ORDERED)
4312 return const_true_rtx;
4314 if (! HONOR_NANS (mode) && code == UNORDERED)
4315 return const0_rtx;
4317 /* For modes without NaNs, if the two operands are equal, we know the
4318 result except if they have side-effects. Even with NaNs we know
4319 the result of unordered comparisons and, if signaling NaNs are
4320 irrelevant, also the result of LT/GT/LTGT. */
4321 if ((! HONOR_NANS (GET_MODE (trueop0))
4322 || code == UNEQ || code == UNLE || code == UNGE
4323 || ((code == LT || code == GT || code == LTGT)
4324 && ! HONOR_SNANS (GET_MODE (trueop0))))
4325 && rtx_equal_p (trueop0, trueop1)
4326 && ! side_effects_p (trueop0))
4327 return comparison_result (code, CMP_EQ);
4329 /* If the operands are floating-point constants, see if we can fold
4330 the result. */
4331 if (GET_CODE (trueop0) == CONST_DOUBLE
4332 && GET_CODE (trueop1) == CONST_DOUBLE
4333 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4335 REAL_VALUE_TYPE d0, d1;
4337 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4338 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4340 /* Comparisons are unordered iff at least one of the values is NaN. */
4341 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4342 switch (code)
4344 case UNEQ:
4345 case UNLT:
4346 case UNGT:
4347 case UNLE:
4348 case UNGE:
4349 case NE:
4350 case UNORDERED:
4351 return const_true_rtx;
4352 case EQ:
4353 case LT:
4354 case GT:
4355 case LE:
4356 case GE:
4357 case LTGT:
4358 case ORDERED:
4359 return const0_rtx;
4360 default:
4361 return 0;
4364 return comparison_result (code,
4365 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4366 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4369 /* Otherwise, see if the operands are both integers. */
4370 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4371 && (GET_CODE (trueop0) == CONST_DOUBLE
4372 || CONST_INT_P (trueop0))
4373 && (GET_CODE (trueop1) == CONST_DOUBLE
4374 || CONST_INT_P (trueop1)))
4376 int width = GET_MODE_BITSIZE (mode);
4377 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4378 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4380 /* Get the two words comprising each integer constant. */
4381 if (GET_CODE (trueop0) == CONST_DOUBLE)
4383 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4384 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4386 else
4388 l0u = l0s = INTVAL (trueop0);
4389 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4392 if (GET_CODE (trueop1) == CONST_DOUBLE)
4394 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4395 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4397 else
4399 l1u = l1s = INTVAL (trueop1);
4400 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4403 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4404 we have to sign or zero-extend the values. */
4405 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4407 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
4408 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
4410 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
4411 l0s |= ((HOST_WIDE_INT) (-1) << width);
4413 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
4414 l1s |= ((HOST_WIDE_INT) (-1) << width);
4416 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4417 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4419 if (h0u == h1u && l0u == l1u)
4420 return comparison_result (code, CMP_EQ);
4421 else
4423 int cr;
4424 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
4425 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
4426 return comparison_result (code, cr);
4430 /* Optimize comparisons with upper and lower bounds. */
4431 if (SCALAR_INT_MODE_P (mode)
4432 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4433 && CONST_INT_P (trueop1))
4435 int sign;
4436 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4437 HOST_WIDE_INT val = INTVAL (trueop1);
4438 HOST_WIDE_INT mmin, mmax;
4440 if (code == GEU
4441 || code == LEU
4442 || code == GTU
4443 || code == LTU)
4444 sign = 0;
4445 else
4446 sign = 1;
4448 /* Get a reduced range if the sign bit is zero. */
4449 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4451 mmin = 0;
4452 mmax = nonzero;
4454 else
4456 rtx mmin_rtx, mmax_rtx;
4457 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4459 mmin = INTVAL (mmin_rtx);
4460 mmax = INTVAL (mmax_rtx);
4461 if (sign)
4463 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4465 mmin >>= (sign_copies - 1);
4466 mmax >>= (sign_copies - 1);
4470 switch (code)
4472 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4473 case GEU:
4474 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4475 return const_true_rtx;
4476 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4477 return const0_rtx;
4478 break;
4479 case GE:
4480 if (val <= mmin)
4481 return const_true_rtx;
4482 if (val > mmax)
4483 return const0_rtx;
4484 break;
4486 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4487 case LEU:
4488 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4489 return const_true_rtx;
4490 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4491 return const0_rtx;
4492 break;
4493 case LE:
4494 if (val >= mmax)
4495 return const_true_rtx;
4496 if (val < mmin)
4497 return const0_rtx;
4498 break;
4500 case EQ:
4501 /* x == y is always false for y out of range. */
4502 if (val < mmin || val > mmax)
4503 return const0_rtx;
4504 break;
4506 /* x > y is always false for y >= mmax, always true for y < mmin. */
4507 case GTU:
4508 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4509 return const0_rtx;
4510 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4511 return const_true_rtx;
4512 break;
4513 case GT:
4514 if (val >= mmax)
4515 return const0_rtx;
4516 if (val < mmin)
4517 return const_true_rtx;
4518 break;
4520 /* x < y is always false for y <= mmin, always true for y > mmax. */
4521 case LTU:
4522 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4523 return const0_rtx;
4524 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4525 return const_true_rtx;
4526 break;
4527 case LT:
4528 if (val <= mmin)
4529 return const0_rtx;
4530 if (val > mmax)
4531 return const_true_rtx;
4532 break;
4534 case NE:
4535 /* x != y is always true for y out of range. */
4536 if (val < mmin || val > mmax)
4537 return const_true_rtx;
4538 break;
4540 default:
4541 break;
4545 /* Optimize integer comparisons with zero. */
4546 if (trueop1 == const0_rtx)
4548 /* Some addresses are known to be nonzero. We don't know
4549 their sign, but equality comparisons are known. */
4550 if (nonzero_address_p (trueop0))
4552 if (code == EQ || code == LEU)
4553 return const0_rtx;
4554 if (code == NE || code == GTU)
4555 return const_true_rtx;
4558 /* See if the first operand is an IOR with a constant. If so, we
4559 may be able to determine the result of this comparison. */
4560 if (GET_CODE (op0) == IOR)
4562 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
4563 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
4565 int sign_bitnum = GET_MODE_BITSIZE (mode) - 1;
4566 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
4567 && (INTVAL (inner_const)
4568 & ((HOST_WIDE_INT) 1 << sign_bitnum)));
4570 switch (code)
4572 case EQ:
4573 case LEU:
4574 return const0_rtx;
4575 case NE:
4576 case GTU:
4577 return const_true_rtx;
4578 case LT:
4579 case LE:
4580 if (has_sign)
4581 return const_true_rtx;
4582 break;
4583 case GT:
4584 case GE:
4585 if (has_sign)
4586 return const0_rtx;
4587 break;
4588 default:
4589 break;
4595 /* Optimize comparison of ABS with zero. */
4596 if (trueop1 == CONST0_RTX (mode)
4597 && (GET_CODE (trueop0) == ABS
4598 || (GET_CODE (trueop0) == FLOAT_EXTEND
4599 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
4601 switch (code)
4603 case LT:
4604 /* Optimize abs(x) < 0.0. */
4605 if (!HONOR_SNANS (mode)
4606 && (!INTEGRAL_MODE_P (mode)
4607 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4609 if (INTEGRAL_MODE_P (mode)
4610 && (issue_strict_overflow_warning
4611 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4612 warning (OPT_Wstrict_overflow,
4613 ("assuming signed overflow does not occur when "
4614 "assuming abs (x) < 0 is false"));
4615 return const0_rtx;
4617 break;
4619 case GE:
4620 /* Optimize abs(x) >= 0.0. */
4621 if (!HONOR_NANS (mode)
4622 && (!INTEGRAL_MODE_P (mode)
4623 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4625 if (INTEGRAL_MODE_P (mode)
4626 && (issue_strict_overflow_warning
4627 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4628 warning (OPT_Wstrict_overflow,
4629 ("assuming signed overflow does not occur when "
4630 "assuming abs (x) >= 0 is true"));
4631 return const_true_rtx;
4633 break;
4635 case UNGE:
4636 /* Optimize ! (abs(x) < 0.0). */
4637 return const_true_rtx;
4639 default:
4640 break;
4644 return 0;
4647 /* Simplify CODE, an operation with result mode MODE and three operands,
4648 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4649 a constant. Return 0 if no simplifications is possible. */
4652 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4653 enum machine_mode op0_mode, rtx op0, rtx op1,
4654 rtx op2)
4656 unsigned int width = GET_MODE_BITSIZE (mode);
4658 /* VOIDmode means "infinite" precision. */
4659 if (width == 0)
4660 width = HOST_BITS_PER_WIDE_INT;
4662 switch (code)
4664 case SIGN_EXTRACT:
4665 case ZERO_EXTRACT:
4666 if (CONST_INT_P (op0)
4667 && CONST_INT_P (op1)
4668 && CONST_INT_P (op2)
4669 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4670 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4672 /* Extracting a bit-field from a constant */
4673 HOST_WIDE_INT val = INTVAL (op0);
4675 if (BITS_BIG_ENDIAN)
4676 val >>= (GET_MODE_BITSIZE (op0_mode)
4677 - INTVAL (op2) - INTVAL (op1));
4678 else
4679 val >>= INTVAL (op2);
4681 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4683 /* First zero-extend. */
4684 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4685 /* If desired, propagate sign bit. */
4686 if (code == SIGN_EXTRACT
4687 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4688 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4691 /* Clear the bits that don't belong in our mode,
4692 unless they and our sign bit are all one.
4693 So we get either a reasonable negative value or a reasonable
4694 unsigned value for this mode. */
4695 if (width < HOST_BITS_PER_WIDE_INT
4696 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4697 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4698 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4700 return gen_int_mode (val, mode);
4702 break;
4704 case IF_THEN_ELSE:
4705 if (CONST_INT_P (op0))
4706 return op0 != const0_rtx ? op1 : op2;
4708 /* Convert c ? a : a into "a". */
4709 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4710 return op1;
4712 /* Convert a != b ? a : b into "a". */
4713 if (GET_CODE (op0) == NE
4714 && ! side_effects_p (op0)
4715 && ! HONOR_NANS (mode)
4716 && ! HONOR_SIGNED_ZEROS (mode)
4717 && ((rtx_equal_p (XEXP (op0, 0), op1)
4718 && rtx_equal_p (XEXP (op0, 1), op2))
4719 || (rtx_equal_p (XEXP (op0, 0), op2)
4720 && rtx_equal_p (XEXP (op0, 1), op1))))
4721 return op1;
4723 /* Convert a == b ? a : b into "b". */
4724 if (GET_CODE (op0) == EQ
4725 && ! side_effects_p (op0)
4726 && ! HONOR_NANS (mode)
4727 && ! HONOR_SIGNED_ZEROS (mode)
4728 && ((rtx_equal_p (XEXP (op0, 0), op1)
4729 && rtx_equal_p (XEXP (op0, 1), op2))
4730 || (rtx_equal_p (XEXP (op0, 0), op2)
4731 && rtx_equal_p (XEXP (op0, 1), op1))))
4732 return op2;
4734 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4736 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4737 ? GET_MODE (XEXP (op0, 1))
4738 : GET_MODE (XEXP (op0, 0)));
4739 rtx temp;
4741 /* Look for happy constants in op1 and op2. */
4742 if (CONST_INT_P (op1) && CONST_INT_P (op2))
4744 HOST_WIDE_INT t = INTVAL (op1);
4745 HOST_WIDE_INT f = INTVAL (op2);
4747 if (t == STORE_FLAG_VALUE && f == 0)
4748 code = GET_CODE (op0);
4749 else if (t == 0 && f == STORE_FLAG_VALUE)
4751 enum rtx_code tmp;
4752 tmp = reversed_comparison_code (op0, NULL_RTX);
4753 if (tmp == UNKNOWN)
4754 break;
4755 code = tmp;
4757 else
4758 break;
4760 return simplify_gen_relational (code, mode, cmp_mode,
4761 XEXP (op0, 0), XEXP (op0, 1));
4764 if (cmp_mode == VOIDmode)
4765 cmp_mode = op0_mode;
4766 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4767 cmp_mode, XEXP (op0, 0),
4768 XEXP (op0, 1));
4770 /* See if any simplifications were possible. */
4771 if (temp)
4773 if (CONST_INT_P (temp))
4774 return temp == const0_rtx ? op2 : op1;
4775 else if (temp)
4776 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4779 break;
4781 case VEC_MERGE:
4782 gcc_assert (GET_MODE (op0) == mode);
4783 gcc_assert (GET_MODE (op1) == mode);
4784 gcc_assert (VECTOR_MODE_P (mode));
4785 op2 = avoid_constant_pool_reference (op2);
4786 if (CONST_INT_P (op2))
4788 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4789 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4790 int mask = (1 << n_elts) - 1;
4792 if (!(INTVAL (op2) & mask))
4793 return op1;
4794 if ((INTVAL (op2) & mask) == mask)
4795 return op0;
4797 op0 = avoid_constant_pool_reference (op0);
4798 op1 = avoid_constant_pool_reference (op1);
4799 if (GET_CODE (op0) == CONST_VECTOR
4800 && GET_CODE (op1) == CONST_VECTOR)
4802 rtvec v = rtvec_alloc (n_elts);
4803 unsigned int i;
4805 for (i = 0; i < n_elts; i++)
4806 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4807 ? CONST_VECTOR_ELT (op0, i)
4808 : CONST_VECTOR_ELT (op1, i));
4809 return gen_rtx_CONST_VECTOR (mode, v);
4812 break;
4814 default:
4815 gcc_unreachable ();
4818 return 0;
4821 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
4822 or CONST_VECTOR,
4823 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
4825 Works by unpacking OP into a collection of 8-bit values
4826 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4827 and then repacking them again for OUTERMODE. */
4829 static rtx
4830 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4831 enum machine_mode innermode, unsigned int byte)
4833 /* We support up to 512-bit values (for V8DFmode). */
4834 enum {
4835 max_bitsize = 512,
4836 value_bit = 8,
4837 value_mask = (1 << value_bit) - 1
4839 unsigned char value[max_bitsize / value_bit];
4840 int value_start;
4841 int i;
4842 int elem;
4844 int num_elem;
4845 rtx * elems;
4846 int elem_bitsize;
4847 rtx result_s;
4848 rtvec result_v = NULL;
4849 enum mode_class outer_class;
4850 enum machine_mode outer_submode;
4852 /* Some ports misuse CCmode. */
4853 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
4854 return op;
4856 /* We have no way to represent a complex constant at the rtl level. */
4857 if (COMPLEX_MODE_P (outermode))
4858 return NULL_RTX;
4860 /* Unpack the value. */
4862 if (GET_CODE (op) == CONST_VECTOR)
4864 num_elem = CONST_VECTOR_NUNITS (op);
4865 elems = &CONST_VECTOR_ELT (op, 0);
4866 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4868 else
4870 num_elem = 1;
4871 elems = &op;
4872 elem_bitsize = max_bitsize;
4874 /* If this asserts, it is too complicated; reducing value_bit may help. */
4875 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4876 /* I don't know how to handle endianness of sub-units. */
4877 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4879 for (elem = 0; elem < num_elem; elem++)
4881 unsigned char * vp;
4882 rtx el = elems[elem];
4884 /* Vectors are kept in target memory order. (This is probably
4885 a mistake.) */
4887 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4888 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4889 / BITS_PER_UNIT);
4890 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4891 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4892 unsigned bytele = (subword_byte % UNITS_PER_WORD
4893 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4894 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4897 switch (GET_CODE (el))
4899 case CONST_INT:
4900 for (i = 0;
4901 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4902 i += value_bit)
4903 *vp++ = INTVAL (el) >> i;
4904 /* CONST_INTs are always logically sign-extended. */
4905 for (; i < elem_bitsize; i += value_bit)
4906 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4907 break;
4909 case CONST_DOUBLE:
4910 if (GET_MODE (el) == VOIDmode)
4912 /* If this triggers, someone should have generated a
4913 CONST_INT instead. */
4914 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4916 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4917 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4918 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4920 *vp++
4921 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4922 i += value_bit;
4924 /* It shouldn't matter what's done here, so fill it with
4925 zero. */
4926 for (; i < elem_bitsize; i += value_bit)
4927 *vp++ = 0;
4929 else
4931 long tmp[max_bitsize / 32];
4932 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4934 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4935 gcc_assert (bitsize <= elem_bitsize);
4936 gcc_assert (bitsize % value_bit == 0);
4938 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4939 GET_MODE (el));
4941 /* real_to_target produces its result in words affected by
4942 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4943 and use WORDS_BIG_ENDIAN instead; see the documentation
4944 of SUBREG in rtl.texi. */
4945 for (i = 0; i < bitsize; i += value_bit)
4947 int ibase;
4948 if (WORDS_BIG_ENDIAN)
4949 ibase = bitsize - 1 - i;
4950 else
4951 ibase = i;
4952 *vp++ = tmp[ibase / 32] >> i % 32;
4955 /* It shouldn't matter what's done here, so fill it with
4956 zero. */
4957 for (; i < elem_bitsize; i += value_bit)
4958 *vp++ = 0;
4960 break;
4962 case CONST_FIXED:
4963 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4965 for (i = 0; i < elem_bitsize; i += value_bit)
4966 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
4968 else
4970 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4971 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
4972 for (; i < 2 * HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4973 i += value_bit)
4974 *vp++ = CONST_FIXED_VALUE_HIGH (el)
4975 >> (i - HOST_BITS_PER_WIDE_INT);
4976 for (; i < elem_bitsize; i += value_bit)
4977 *vp++ = 0;
4979 break;
4981 default:
4982 gcc_unreachable ();
4986 /* Now, pick the right byte to start with. */
4987 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4988 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4989 will already have offset 0. */
4990 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4992 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4993 - byte);
4994 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4995 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4996 byte = (subword_byte % UNITS_PER_WORD
4997 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5000 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5001 so if it's become negative it will instead be very large.) */
5002 gcc_assert (byte < GET_MODE_SIZE (innermode));
5004 /* Convert from bytes to chunks of size value_bit. */
5005 value_start = byte * (BITS_PER_UNIT / value_bit);
5007 /* Re-pack the value. */
5009 if (VECTOR_MODE_P (outermode))
5011 num_elem = GET_MODE_NUNITS (outermode);
5012 result_v = rtvec_alloc (num_elem);
5013 elems = &RTVEC_ELT (result_v, 0);
5014 outer_submode = GET_MODE_INNER (outermode);
5016 else
5018 num_elem = 1;
5019 elems = &result_s;
5020 outer_submode = outermode;
5023 outer_class = GET_MODE_CLASS (outer_submode);
5024 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5026 gcc_assert (elem_bitsize % value_bit == 0);
5027 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5029 for (elem = 0; elem < num_elem; elem++)
5031 unsigned char *vp;
5033 /* Vectors are stored in target memory order. (This is probably
5034 a mistake.) */
5036 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5037 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5038 / BITS_PER_UNIT);
5039 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5040 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5041 unsigned bytele = (subword_byte % UNITS_PER_WORD
5042 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5043 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5046 switch (outer_class)
5048 case MODE_INT:
5049 case MODE_PARTIAL_INT:
5051 unsigned HOST_WIDE_INT hi = 0, lo = 0;
5053 for (i = 0;
5054 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5055 i += value_bit)
5056 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
5057 for (; i < elem_bitsize; i += value_bit)
5058 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
5059 << (i - HOST_BITS_PER_WIDE_INT));
5061 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5062 know why. */
5063 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5064 elems[elem] = gen_int_mode (lo, outer_submode);
5065 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
5066 elems[elem] = immed_double_const (lo, hi, outer_submode);
5067 else
5068 return NULL_RTX;
5070 break;
5072 case MODE_FLOAT:
5073 case MODE_DECIMAL_FLOAT:
5075 REAL_VALUE_TYPE r;
5076 long tmp[max_bitsize / 32];
5078 /* real_from_target wants its input in words affected by
5079 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5080 and use WORDS_BIG_ENDIAN instead; see the documentation
5081 of SUBREG in rtl.texi. */
5082 for (i = 0; i < max_bitsize / 32; i++)
5083 tmp[i] = 0;
5084 for (i = 0; i < elem_bitsize; i += value_bit)
5086 int ibase;
5087 if (WORDS_BIG_ENDIAN)
5088 ibase = elem_bitsize - 1 - i;
5089 else
5090 ibase = i;
5091 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5094 real_from_target (&r, tmp, outer_submode);
5095 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5097 break;
5099 case MODE_FRACT:
5100 case MODE_UFRACT:
5101 case MODE_ACCUM:
5102 case MODE_UACCUM:
5104 FIXED_VALUE_TYPE f;
5105 f.data.low = 0;
5106 f.data.high = 0;
5107 f.mode = outer_submode;
5109 for (i = 0;
5110 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5111 i += value_bit)
5112 f.data.low |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
5113 for (; i < elem_bitsize; i += value_bit)
5114 f.data.high |= ((HOST_WIDE_INT)(*vp++ & value_mask)
5115 << (i - HOST_BITS_PER_WIDE_INT));
5117 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5119 break;
5121 default:
5122 gcc_unreachable ();
5125 if (VECTOR_MODE_P (outermode))
5126 return gen_rtx_CONST_VECTOR (outermode, result_v);
5127 else
5128 return result_s;
5131 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5132 Return 0 if no simplifications are possible. */
5134 simplify_subreg (enum machine_mode outermode, rtx op,
5135 enum machine_mode innermode, unsigned int byte)
5137 /* Little bit of sanity checking. */
5138 gcc_assert (innermode != VOIDmode);
5139 gcc_assert (outermode != VOIDmode);
5140 gcc_assert (innermode != BLKmode);
5141 gcc_assert (outermode != BLKmode);
5143 gcc_assert (GET_MODE (op) == innermode
5144 || GET_MODE (op) == VOIDmode);
5146 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
5147 gcc_assert (byte < GET_MODE_SIZE (innermode));
5149 if (outermode == innermode && !byte)
5150 return op;
5152 if (CONST_INT_P (op)
5153 || GET_CODE (op) == CONST_DOUBLE
5154 || GET_CODE (op) == CONST_FIXED
5155 || GET_CODE (op) == CONST_VECTOR)
5156 return simplify_immed_subreg (outermode, op, innermode, byte);
5158 /* Changing mode twice with SUBREG => just change it once,
5159 or not at all if changing back op starting mode. */
5160 if (GET_CODE (op) == SUBREG)
5162 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5163 int final_offset = byte + SUBREG_BYTE (op);
5164 rtx newx;
5166 if (outermode == innermostmode
5167 && byte == 0 && SUBREG_BYTE (op) == 0)
5168 return SUBREG_REG (op);
5170 /* The SUBREG_BYTE represents offset, as if the value were stored
5171 in memory. Irritating exception is paradoxical subreg, where
5172 we define SUBREG_BYTE to be 0. On big endian machines, this
5173 value should be negative. For a moment, undo this exception. */
5174 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5176 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5177 if (WORDS_BIG_ENDIAN)
5178 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5179 if (BYTES_BIG_ENDIAN)
5180 final_offset += difference % UNITS_PER_WORD;
5182 if (SUBREG_BYTE (op) == 0
5183 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5185 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5186 if (WORDS_BIG_ENDIAN)
5187 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5188 if (BYTES_BIG_ENDIAN)
5189 final_offset += difference % UNITS_PER_WORD;
5192 /* See whether resulting subreg will be paradoxical. */
5193 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5195 /* In nonparadoxical subregs we can't handle negative offsets. */
5196 if (final_offset < 0)
5197 return NULL_RTX;
5198 /* Bail out in case resulting subreg would be incorrect. */
5199 if (final_offset % GET_MODE_SIZE (outermode)
5200 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5201 return NULL_RTX;
5203 else
5205 int offset = 0;
5206 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5208 /* In paradoxical subreg, see if we are still looking on lower part.
5209 If so, our SUBREG_BYTE will be 0. */
5210 if (WORDS_BIG_ENDIAN)
5211 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5212 if (BYTES_BIG_ENDIAN)
5213 offset += difference % UNITS_PER_WORD;
5214 if (offset == final_offset)
5215 final_offset = 0;
5216 else
5217 return NULL_RTX;
5220 /* Recurse for further possible simplifications. */
5221 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5222 final_offset);
5223 if (newx)
5224 return newx;
5225 if (validate_subreg (outermode, innermostmode,
5226 SUBREG_REG (op), final_offset))
5228 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5229 if (SUBREG_PROMOTED_VAR_P (op)
5230 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5231 && GET_MODE_CLASS (outermode) == MODE_INT
5232 && IN_RANGE (GET_MODE_SIZE (outermode),
5233 GET_MODE_SIZE (innermode),
5234 GET_MODE_SIZE (innermostmode))
5235 && subreg_lowpart_p (newx))
5237 SUBREG_PROMOTED_VAR_P (newx) = 1;
5238 SUBREG_PROMOTED_UNSIGNED_SET
5239 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5241 return newx;
5243 return NULL_RTX;
5246 /* Merge implicit and explicit truncations. */
5248 if (GET_CODE (op) == TRUNCATE
5249 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
5250 && subreg_lowpart_offset (outermode, innermode) == byte)
5251 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
5252 GET_MODE (XEXP (op, 0)));
5254 /* SUBREG of a hard register => just change the register number
5255 and/or mode. If the hard register is not valid in that mode,
5256 suppress this simplification. If the hard register is the stack,
5257 frame, or argument pointer, leave this as a SUBREG. */
5259 if (REG_P (op) && HARD_REGISTER_P (op))
5261 unsigned int regno, final_regno;
5263 regno = REGNO (op);
5264 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5265 if (HARD_REGISTER_NUM_P (final_regno))
5267 rtx x;
5268 int final_offset = byte;
5270 /* Adjust offset for paradoxical subregs. */
5271 if (byte == 0
5272 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5274 int difference = (GET_MODE_SIZE (innermode)
5275 - GET_MODE_SIZE (outermode));
5276 if (WORDS_BIG_ENDIAN)
5277 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5278 if (BYTES_BIG_ENDIAN)
5279 final_offset += difference % UNITS_PER_WORD;
5282 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5284 /* Propagate original regno. We don't have any way to specify
5285 the offset inside original regno, so do so only for lowpart.
5286 The information is used only by alias analysis that can not
5287 grog partial register anyway. */
5289 if (subreg_lowpart_offset (outermode, innermode) == byte)
5290 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5291 return x;
5295 /* If we have a SUBREG of a register that we are replacing and we are
5296 replacing it with a MEM, make a new MEM and try replacing the
5297 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5298 or if we would be widening it. */
5300 if (MEM_P (op)
5301 && ! mode_dependent_address_p (XEXP (op, 0))
5302 /* Allow splitting of volatile memory references in case we don't
5303 have instruction to move the whole thing. */
5304 && (! MEM_VOLATILE_P (op)
5305 || ! have_insn_for (SET, innermode))
5306 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5307 return adjust_address_nv (op, outermode, byte);
5309 /* Handle complex values represented as CONCAT
5310 of real and imaginary part. */
5311 if (GET_CODE (op) == CONCAT)
5313 unsigned int part_size, final_offset;
5314 rtx part, res;
5316 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5317 if (byte < part_size)
5319 part = XEXP (op, 0);
5320 final_offset = byte;
5322 else
5324 part = XEXP (op, 1);
5325 final_offset = byte - part_size;
5328 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5329 return NULL_RTX;
5331 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5332 if (res)
5333 return res;
5334 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5335 return gen_rtx_SUBREG (outermode, part, final_offset);
5336 return NULL_RTX;
5339 /* Optimize SUBREG truncations of zero and sign extended values. */
5340 if ((GET_CODE (op) == ZERO_EXTEND
5341 || GET_CODE (op) == SIGN_EXTEND)
5342 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
5344 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5346 /* If we're requesting the lowpart of a zero or sign extension,
5347 there are three possibilities. If the outermode is the same
5348 as the origmode, we can omit both the extension and the subreg.
5349 If the outermode is not larger than the origmode, we can apply
5350 the truncation without the extension. Finally, if the outermode
5351 is larger than the origmode, but both are integer modes, we
5352 can just extend to the appropriate mode. */
5353 if (bitpos == 0)
5355 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
5356 if (outermode == origmode)
5357 return XEXP (op, 0);
5358 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
5359 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
5360 subreg_lowpart_offset (outermode,
5361 origmode));
5362 if (SCALAR_INT_MODE_P (outermode))
5363 return simplify_gen_unary (GET_CODE (op), outermode,
5364 XEXP (op, 0), origmode);
5367 /* A SUBREG resulting from a zero extension may fold to zero if
5368 it extracts higher bits that the ZERO_EXTEND's source bits. */
5369 if (GET_CODE (op) == ZERO_EXTEND
5370 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
5371 return CONST0_RTX (outermode);
5374 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5375 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5376 the outer subreg is effectively a truncation to the original mode. */
5377 if ((GET_CODE (op) == LSHIFTRT
5378 || GET_CODE (op) == ASHIFTRT)
5379 && SCALAR_INT_MODE_P (outermode)
5380 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5381 to avoid the possibility that an outer LSHIFTRT shifts by more
5382 than the sign extension's sign_bit_copies and introduces zeros
5383 into the high bits of the result. */
5384 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
5385 && CONST_INT_P (XEXP (op, 1))
5386 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
5387 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5388 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5389 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5390 return simplify_gen_binary (ASHIFTRT, outermode,
5391 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5393 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5394 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5395 the outer subreg is effectively a truncation to the original mode. */
5396 if ((GET_CODE (op) == LSHIFTRT
5397 || GET_CODE (op) == ASHIFTRT)
5398 && SCALAR_INT_MODE_P (outermode)
5399 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5400 && CONST_INT_P (XEXP (op, 1))
5401 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5402 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5403 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5404 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5405 return simplify_gen_binary (LSHIFTRT, outermode,
5406 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5408 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5409 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5410 the outer subreg is effectively a truncation to the original mode. */
5411 if (GET_CODE (op) == ASHIFT
5412 && SCALAR_INT_MODE_P (outermode)
5413 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5414 && CONST_INT_P (XEXP (op, 1))
5415 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5416 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
5417 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5418 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5419 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5420 return simplify_gen_binary (ASHIFT, outermode,
5421 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5423 /* Recognize a word extraction from a multi-word subreg. */
5424 if ((GET_CODE (op) == LSHIFTRT
5425 || GET_CODE (op) == ASHIFTRT)
5426 && SCALAR_INT_MODE_P (outermode)
5427 && GET_MODE_BITSIZE (outermode) >= BITS_PER_WORD
5428 && GET_MODE_BITSIZE (innermode) >= (2 * GET_MODE_BITSIZE (outermode))
5429 && CONST_INT_P (XEXP (op, 1))
5430 && (INTVAL (XEXP (op, 1)) & (GET_MODE_BITSIZE (outermode) - 1)) == 0
5431 && INTVAL (XEXP (op, 1)) >= 0
5432 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
5433 && byte == subreg_lowpart_offset (outermode, innermode))
5435 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5436 return simplify_gen_subreg (outermode, XEXP (op, 0), innermode,
5437 (WORDS_BIG_ENDIAN
5438 ? byte - shifted_bytes
5439 : byte + shifted_bytes));
5442 return NULL_RTX;
5445 /* Make a SUBREG operation or equivalent if it folds. */
5448 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5449 enum machine_mode innermode, unsigned int byte)
5451 rtx newx;
5453 newx = simplify_subreg (outermode, op, innermode, byte);
5454 if (newx)
5455 return newx;
5457 if (GET_CODE (op) == SUBREG
5458 || GET_CODE (op) == CONCAT
5459 || GET_MODE (op) == VOIDmode)
5460 return NULL_RTX;
5462 if (validate_subreg (outermode, innermode, op, byte))
5463 return gen_rtx_SUBREG (outermode, op, byte);
5465 return NULL_RTX;
5468 /* Simplify X, an rtx expression.
5470 Return the simplified expression or NULL if no simplifications
5471 were possible.
5473 This is the preferred entry point into the simplification routines;
5474 however, we still allow passes to call the more specific routines.
5476 Right now GCC has three (yes, three) major bodies of RTL simplification
5477 code that need to be unified.
5479 1. fold_rtx in cse.c. This code uses various CSE specific
5480 information to aid in RTL simplification.
5482 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5483 it uses combine specific information to aid in RTL
5484 simplification.
5486 3. The routines in this file.
5489 Long term we want to only have one body of simplification code; to
5490 get to that state I recommend the following steps:
5492 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5493 which are not pass dependent state into these routines.
5495 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5496 use this routine whenever possible.
5498 3. Allow for pass dependent state to be provided to these
5499 routines and add simplifications based on the pass dependent
5500 state. Remove code from cse.c & combine.c that becomes
5501 redundant/dead.
5503 It will take time, but ultimately the compiler will be easier to
5504 maintain and improve. It's totally silly that when we add a
5505 simplification that it needs to be added to 4 places (3 for RTL
5506 simplification and 1 for tree simplification. */
5509 simplify_rtx (const_rtx x)
5511 const enum rtx_code code = GET_CODE (x);
5512 const enum machine_mode mode = GET_MODE (x);
5514 switch (GET_RTX_CLASS (code))
5516 case RTX_UNARY:
5517 return simplify_unary_operation (code, mode,
5518 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5519 case RTX_COMM_ARITH:
5520 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5521 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5523 /* Fall through.... */
5525 case RTX_BIN_ARITH:
5526 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5528 case RTX_TERNARY:
5529 case RTX_BITFIELD_OPS:
5530 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5531 XEXP (x, 0), XEXP (x, 1),
5532 XEXP (x, 2));
5534 case RTX_COMPARE:
5535 case RTX_COMM_COMPARE:
5536 return simplify_relational_operation (code, mode,
5537 ((GET_MODE (XEXP (x, 0))
5538 != VOIDmode)
5539 ? GET_MODE (XEXP (x, 0))
5540 : GET_MODE (XEXP (x, 1))),
5541 XEXP (x, 0),
5542 XEXP (x, 1));
5544 case RTX_EXTRA:
5545 if (code == SUBREG)
5546 return simplify_subreg (mode, SUBREG_REG (x),
5547 GET_MODE (SUBREG_REG (x)),
5548 SUBREG_BYTE (x));
5549 break;
5551 case RTX_OBJ:
5552 if (code == LO_SUM)
5554 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5555 if (GET_CODE (XEXP (x, 0)) == HIGH
5556 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5557 return XEXP (x, 1);
5559 break;
5561 default:
5562 break;
5564 return NULL;