Merged r158907 through r159238 into branch.
[official-gcc.git] / gcc / simplify-rtx.c
blobfbbf7ad5e12b94363fa08a86171b4d8456b3eaaf
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, const_rtx);
53 static bool plus_minus_operand_p (const_rtx);
54 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
57 unsigned int);
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
59 rtx, rtx);
60 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
61 enum machine_mode, rtx, rtx);
62 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
63 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
64 rtx, rtx, rtx, rtx);
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
68 static rtx
69 neg_const_int (enum machine_mode mode, const_rtx i)
71 return gen_int_mode (- INTVAL (i), mode);
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
77 bool
78 mode_signbit_p (enum machine_mode mode, const_rtx x)
80 unsigned HOST_WIDE_INT val;
81 unsigned int width;
83 if (GET_MODE_CLASS (mode) != MODE_INT)
84 return false;
86 width = GET_MODE_BITSIZE (mode);
87 if (width == 0)
88 return false;
90 if (width <= HOST_BITS_PER_WIDE_INT
91 && CONST_INT_P (x))
92 val = INTVAL (x);
93 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x) == 0)
97 val = CONST_DOUBLE_HIGH (x);
98 width -= HOST_BITS_PER_WIDE_INT;
100 else
101 return false;
103 if (width < HOST_BITS_PER_WIDE_INT)
104 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
105 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
112 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
113 rtx op1)
115 rtx tem;
117 /* If this simplifies, do it. */
118 tem = simplify_binary_operation (code, mode, op0, op1);
119 if (tem)
120 return tem;
122 /* Put complex operands first and constants second if commutative. */
123 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
124 && swap_commutative_operands_p (op0, op1))
125 tem = op0, op0 = op1, op1 = tem;
127 return gen_rtx_fmt_ee (code, mode, op0, op1);
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
133 avoid_constant_pool_reference (rtx x)
135 rtx c, tmp, addr;
136 enum machine_mode cmode;
137 HOST_WIDE_INT offset = 0;
139 switch (GET_CODE (x))
141 case MEM:
142 break;
144 case FLOAT_EXTEND:
145 /* Handle float extensions of constant pool references. */
146 tmp = XEXP (x, 0);
147 c = avoid_constant_pool_reference (tmp);
148 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
150 REAL_VALUE_TYPE d;
152 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
155 return x;
157 default:
158 return x;
161 if (GET_MODE (x) == BLKmode)
162 return x;
164 addr = XEXP (x, 0);
166 /* Call target hook to avoid the effects of -fpic etc.... */
167 addr = targetm.delegitimize_address (addr);
169 /* Split the address into a base and integer offset. */
170 if (GET_CODE (addr) == CONST
171 && GET_CODE (XEXP (addr, 0)) == PLUS
172 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
174 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
175 addr = XEXP (XEXP (addr, 0), 0);
178 if (GET_CODE (addr) == LO_SUM)
179 addr = XEXP (addr, 1);
181 /* If this is a constant pool reference, we can turn it into its
182 constant and hope that simplifications happen. */
183 if (GET_CODE (addr) == SYMBOL_REF
184 && CONSTANT_POOL_ADDRESS_P (addr))
186 c = get_pool_constant (addr);
187 cmode = get_pool_mode (addr);
189 /* If we're accessing the constant in a different mode than it was
190 originally stored, attempt to fix that up via subreg simplifications.
191 If that fails we have no choice but to return the original memory. */
192 if (offset != 0 || cmode != GET_MODE (x))
194 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
195 if (tem && CONSTANT_P (tem))
196 return tem;
198 else
199 return c;
202 return x;
205 /* Simplify a MEM based on its attributes. This is the default
206 delegitimize_address target hook, and it's recommended that every
207 overrider call it. */
210 delegitimize_mem_from_attrs (rtx x)
212 if (MEM_P (x)
213 && MEM_EXPR (x)
214 && (!MEM_OFFSET (x)
215 || GET_CODE (MEM_OFFSET (x)) == CONST_INT))
217 tree decl = MEM_EXPR (x);
218 enum machine_mode mode = GET_MODE (x);
219 HOST_WIDE_INT offset = 0;
221 switch (TREE_CODE (decl))
223 default:
224 decl = NULL;
225 break;
227 case VAR_DECL:
228 break;
230 case ARRAY_REF:
231 case ARRAY_RANGE_REF:
232 case COMPONENT_REF:
233 case BIT_FIELD_REF:
234 case REALPART_EXPR:
235 case IMAGPART_EXPR:
236 case VIEW_CONVERT_EXPR:
238 HOST_WIDE_INT bitsize, bitpos;
239 tree toffset;
240 int unsignedp = 0, volatilep = 0;
242 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
243 &mode, &unsignedp, &volatilep, false);
244 if (bitsize != GET_MODE_BITSIZE (mode)
245 || (bitpos % BITS_PER_UNIT)
246 || (toffset && !host_integerp (toffset, 0)))
247 decl = NULL;
248 else
250 offset += bitpos / BITS_PER_UNIT;
251 if (toffset)
252 offset += TREE_INT_CST_LOW (toffset);
254 break;
258 if (decl
259 && mode == GET_MODE (x)
260 && TREE_CODE (decl) == VAR_DECL
261 && (TREE_STATIC (decl)
262 || DECL_THREAD_LOCAL_P (decl))
263 && DECL_RTL_SET_P (decl)
264 && MEM_P (DECL_RTL (decl)))
266 rtx newx;
268 if (MEM_OFFSET (x))
269 offset += INTVAL (MEM_OFFSET (x));
271 newx = DECL_RTL (decl);
273 if (MEM_P (newx))
275 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
277 /* Avoid creating a new MEM needlessly if we already had
278 the same address. We do if there's no OFFSET and the
279 old address X is identical to NEWX, or if X is of the
280 form (plus NEWX OFFSET), or the NEWX is of the form
281 (plus Y (const_int Z)) and X is that with the offset
282 added: (plus Y (const_int Z+OFFSET)). */
283 if (!((offset == 0
284 || (GET_CODE (o) == PLUS
285 && GET_CODE (XEXP (o, 1)) == CONST_INT
286 && (offset == INTVAL (XEXP (o, 1))
287 || (GET_CODE (n) == PLUS
288 && GET_CODE (XEXP (n, 1)) == CONST_INT
289 && (INTVAL (XEXP (n, 1)) + offset
290 == INTVAL (XEXP (o, 1)))
291 && (n = XEXP (n, 0))))
292 && (o = XEXP (o, 0))))
293 && rtx_equal_p (o, n)))
294 x = adjust_address_nv (newx, mode, offset);
296 else if (GET_MODE (x) == GET_MODE (newx)
297 && offset == 0)
298 x = newx;
302 return x;
305 /* Make a unary operation by first seeing if it folds and otherwise making
306 the specified operation. */
309 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
310 enum machine_mode op_mode)
312 rtx tem;
314 /* If this simplifies, use it. */
315 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
316 return tem;
318 return gen_rtx_fmt_e (code, mode, op);
321 /* Likewise for ternary operations. */
324 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
325 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
327 rtx tem;
329 /* If this simplifies, use it. */
330 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
331 op0, op1, op2)))
332 return tem;
334 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
337 /* Likewise, for relational operations.
338 CMP_MODE specifies mode comparison is done in. */
341 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
342 enum machine_mode cmp_mode, rtx op0, rtx op1)
344 rtx tem;
346 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
347 op0, op1)))
348 return tem;
350 return gen_rtx_fmt_ee (code, mode, op0, op1);
353 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
354 and simplify the result. If FN is non-NULL, call this callback on each
355 X, if it returns non-NULL, replace X with its return value and simplify the
356 result. */
359 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
360 rtx (*fn) (rtx, const_rtx, void *), void *data)
362 enum rtx_code code = GET_CODE (x);
363 enum machine_mode mode = GET_MODE (x);
364 enum machine_mode op_mode;
365 const char *fmt;
366 rtx op0, op1, op2, newx, op;
367 rtvec vec, newvec;
368 int i, j;
370 if (__builtin_expect (fn != NULL, 0))
372 newx = fn (x, old_rtx, data);
373 if (newx)
374 return newx;
376 else if (rtx_equal_p (x, old_rtx))
377 return copy_rtx ((rtx) data);
379 switch (GET_RTX_CLASS (code))
381 case RTX_UNARY:
382 op0 = XEXP (x, 0);
383 op_mode = GET_MODE (op0);
384 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
385 if (op0 == XEXP (x, 0))
386 return x;
387 return simplify_gen_unary (code, mode, op0, op_mode);
389 case RTX_BIN_ARITH:
390 case RTX_COMM_ARITH:
391 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
392 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
393 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
394 return x;
395 return simplify_gen_binary (code, mode, op0, op1);
397 case RTX_COMPARE:
398 case RTX_COMM_COMPARE:
399 op0 = XEXP (x, 0);
400 op1 = XEXP (x, 1);
401 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
402 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
403 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
404 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
405 return x;
406 return simplify_gen_relational (code, mode, op_mode, op0, op1);
408 case RTX_TERNARY:
409 case RTX_BITFIELD_OPS:
410 op0 = XEXP (x, 0);
411 op_mode = GET_MODE (op0);
412 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
413 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
414 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
415 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
416 return x;
417 if (op_mode == VOIDmode)
418 op_mode = GET_MODE (op0);
419 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
421 case RTX_EXTRA:
422 if (code == SUBREG)
424 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
425 if (op0 == SUBREG_REG (x))
426 return x;
427 op0 = simplify_gen_subreg (GET_MODE (x), op0,
428 GET_MODE (SUBREG_REG (x)),
429 SUBREG_BYTE (x));
430 return op0 ? op0 : x;
432 break;
434 case RTX_OBJ:
435 if (code == MEM)
437 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
438 if (op0 == XEXP (x, 0))
439 return x;
440 return replace_equiv_address_nv (x, op0);
442 else if (code == LO_SUM)
444 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
445 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
447 /* (lo_sum (high x) x) -> x */
448 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
449 return op1;
451 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
452 return x;
453 return gen_rtx_LO_SUM (mode, op0, op1);
455 break;
457 default:
458 break;
461 newx = x;
462 fmt = GET_RTX_FORMAT (code);
463 for (i = 0; fmt[i]; i++)
464 switch (fmt[i])
466 case 'E':
467 vec = XVEC (x, i);
468 newvec = XVEC (newx, i);
469 for (j = 0; j < GET_NUM_ELEM (vec); j++)
471 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
472 old_rtx, fn, data);
473 if (op != RTVEC_ELT (vec, j))
475 if (newvec == vec)
477 newvec = shallow_copy_rtvec (vec);
478 if (x == newx)
479 newx = shallow_copy_rtx (x);
480 XVEC (newx, i) = newvec;
482 RTVEC_ELT (newvec, j) = op;
485 break;
487 case 'e':
488 if (XEXP (x, i))
490 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
491 if (op != XEXP (x, i))
493 if (x == newx)
494 newx = shallow_copy_rtx (x);
495 XEXP (newx, i) = op;
498 break;
500 return newx;
503 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
504 resulting RTX. Return a new RTX which is as simplified as possible. */
507 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
509 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
512 /* Try to simplify a unary operation CODE whose output mode is to be
513 MODE with input operand OP whose mode was originally OP_MODE.
514 Return zero if no simplification can be made. */
516 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
517 rtx op, enum machine_mode op_mode)
519 rtx trueop, tem;
521 trueop = avoid_constant_pool_reference (op);
523 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
524 if (tem)
525 return tem;
527 return simplify_unary_operation_1 (code, mode, op);
530 /* Perform some simplifications we can do even if the operands
531 aren't constant. */
532 static rtx
533 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
535 enum rtx_code reversed;
536 rtx temp;
538 switch (code)
540 case NOT:
541 /* (not (not X)) == X. */
542 if (GET_CODE (op) == NOT)
543 return XEXP (op, 0);
545 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
546 comparison is all ones. */
547 if (COMPARISON_P (op)
548 && (mode == BImode || STORE_FLAG_VALUE == -1)
549 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
550 return simplify_gen_relational (reversed, mode, VOIDmode,
551 XEXP (op, 0), XEXP (op, 1));
553 /* (not (plus X -1)) can become (neg X). */
554 if (GET_CODE (op) == PLUS
555 && XEXP (op, 1) == constm1_rtx)
556 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
558 /* Similarly, (not (neg X)) is (plus X -1). */
559 if (GET_CODE (op) == NEG)
560 return plus_constant (XEXP (op, 0), -1);
562 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
563 if (GET_CODE (op) == XOR
564 && CONST_INT_P (XEXP (op, 1))
565 && (temp = simplify_unary_operation (NOT, mode,
566 XEXP (op, 1), mode)) != 0)
567 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
569 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
570 if (GET_CODE (op) == PLUS
571 && CONST_INT_P (XEXP (op, 1))
572 && mode_signbit_p (mode, XEXP (op, 1))
573 && (temp = simplify_unary_operation (NOT, mode,
574 XEXP (op, 1), mode)) != 0)
575 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
578 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
579 operands other than 1, but that is not valid. We could do a
580 similar simplification for (not (lshiftrt C X)) where C is
581 just the sign bit, but this doesn't seem common enough to
582 bother with. */
583 if (GET_CODE (op) == ASHIFT
584 && XEXP (op, 0) == const1_rtx)
586 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
587 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
590 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
591 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
592 so we can perform the above simplification. */
594 if (STORE_FLAG_VALUE == -1
595 && GET_CODE (op) == ASHIFTRT
596 && GET_CODE (XEXP (op, 1))
597 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
598 return simplify_gen_relational (GE, mode, VOIDmode,
599 XEXP (op, 0), const0_rtx);
602 if (GET_CODE (op) == SUBREG
603 && subreg_lowpart_p (op)
604 && (GET_MODE_SIZE (GET_MODE (op))
605 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
606 && GET_CODE (SUBREG_REG (op)) == ASHIFT
607 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
609 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
610 rtx x;
612 x = gen_rtx_ROTATE (inner_mode,
613 simplify_gen_unary (NOT, inner_mode, const1_rtx,
614 inner_mode),
615 XEXP (SUBREG_REG (op), 1));
616 return rtl_hooks.gen_lowpart_no_emit (mode, x);
619 /* Apply De Morgan's laws to reduce number of patterns for machines
620 with negating logical insns (and-not, nand, etc.). If result has
621 only one NOT, put it first, since that is how the patterns are
622 coded. */
624 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
626 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
627 enum machine_mode op_mode;
629 op_mode = GET_MODE (in1);
630 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
632 op_mode = GET_MODE (in2);
633 if (op_mode == VOIDmode)
634 op_mode = mode;
635 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
637 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
639 rtx tem = in2;
640 in2 = in1; in1 = tem;
643 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
644 mode, in1, in2);
646 break;
648 case NEG:
649 /* (neg (neg X)) == X. */
650 if (GET_CODE (op) == NEG)
651 return XEXP (op, 0);
653 /* (neg (plus X 1)) can become (not X). */
654 if (GET_CODE (op) == PLUS
655 && XEXP (op, 1) == const1_rtx)
656 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
658 /* Similarly, (neg (not X)) is (plus X 1). */
659 if (GET_CODE (op) == NOT)
660 return plus_constant (XEXP (op, 0), 1);
662 /* (neg (minus X Y)) can become (minus Y X). This transformation
663 isn't safe for modes with signed zeros, since if X and Y are
664 both +0, (minus Y X) is the same as (minus X Y). If the
665 rounding mode is towards +infinity (or -infinity) then the two
666 expressions will be rounded differently. */
667 if (GET_CODE (op) == MINUS
668 && !HONOR_SIGNED_ZEROS (mode)
669 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
670 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
672 if (GET_CODE (op) == PLUS
673 && !HONOR_SIGNED_ZEROS (mode)
674 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
676 /* (neg (plus A C)) is simplified to (minus -C A). */
677 if (CONST_INT_P (XEXP (op, 1))
678 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
680 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
681 if (temp)
682 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
685 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
686 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
687 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
690 /* (neg (mult A B)) becomes (mult (neg A) B).
691 This works even for floating-point values. */
692 if (GET_CODE (op) == MULT
693 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
695 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
696 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
699 /* NEG commutes with ASHIFT since it is multiplication. Only do
700 this if we can then eliminate the NEG (e.g., if the operand
701 is a constant). */
702 if (GET_CODE (op) == ASHIFT)
704 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
705 if (temp)
706 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
709 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
710 C is equal to the width of MODE minus 1. */
711 if (GET_CODE (op) == ASHIFTRT
712 && CONST_INT_P (XEXP (op, 1))
713 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
714 return simplify_gen_binary (LSHIFTRT, mode,
715 XEXP (op, 0), XEXP (op, 1));
717 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
718 C is equal to the width of MODE minus 1. */
719 if (GET_CODE (op) == LSHIFTRT
720 && CONST_INT_P (XEXP (op, 1))
721 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
722 return simplify_gen_binary (ASHIFTRT, mode,
723 XEXP (op, 0), XEXP (op, 1));
725 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
726 if (GET_CODE (op) == XOR
727 && XEXP (op, 1) == const1_rtx
728 && nonzero_bits (XEXP (op, 0), mode) == 1)
729 return plus_constant (XEXP (op, 0), -1);
731 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
732 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
733 if (GET_CODE (op) == LT
734 && XEXP (op, 1) == const0_rtx
735 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
737 enum machine_mode inner = GET_MODE (XEXP (op, 0));
738 int isize = GET_MODE_BITSIZE (inner);
739 if (STORE_FLAG_VALUE == 1)
741 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
742 GEN_INT (isize - 1));
743 if (mode == inner)
744 return temp;
745 if (GET_MODE_BITSIZE (mode) > isize)
746 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
747 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
749 else if (STORE_FLAG_VALUE == -1)
751 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
752 GEN_INT (isize - 1));
753 if (mode == inner)
754 return temp;
755 if (GET_MODE_BITSIZE (mode) > isize)
756 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
757 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
760 break;
762 case TRUNCATE:
763 /* We can't handle truncation to a partial integer mode here
764 because we don't know the real bitsize of the partial
765 integer mode. */
766 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
767 break;
769 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
770 if ((GET_CODE (op) == SIGN_EXTEND
771 || GET_CODE (op) == ZERO_EXTEND)
772 && GET_MODE (XEXP (op, 0)) == mode)
773 return XEXP (op, 0);
775 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
776 (OP:SI foo:SI) if OP is NEG or ABS. */
777 if ((GET_CODE (op) == ABS
778 || GET_CODE (op) == NEG)
779 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
780 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
781 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
782 return simplify_gen_unary (GET_CODE (op), mode,
783 XEXP (XEXP (op, 0), 0), mode);
785 /* (truncate:A (subreg:B (truncate:C X) 0)) is
786 (truncate:A X). */
787 if (GET_CODE (op) == SUBREG
788 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
789 && subreg_lowpart_p (op))
790 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
791 GET_MODE (XEXP (SUBREG_REG (op), 0)));
793 /* If we know that the value is already truncated, we can
794 replace the TRUNCATE with a SUBREG. Note that this is also
795 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
796 modes we just have to apply a different definition for
797 truncation. But don't do this for an (LSHIFTRT (MULT ...))
798 since this will cause problems with the umulXi3_highpart
799 patterns. */
800 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
801 GET_MODE_BITSIZE (GET_MODE (op)))
802 ? (num_sign_bit_copies (op, GET_MODE (op))
803 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
804 - GET_MODE_BITSIZE (mode)))
805 : truncated_to_mode (mode, op))
806 && ! (GET_CODE (op) == LSHIFTRT
807 && GET_CODE (XEXP (op, 0)) == MULT))
808 return rtl_hooks.gen_lowpart_no_emit (mode, op);
810 /* A truncate of a comparison can be replaced with a subreg if
811 STORE_FLAG_VALUE permits. This is like the previous test,
812 but it works even if the comparison is done in a mode larger
813 than HOST_BITS_PER_WIDE_INT. */
814 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
815 && COMPARISON_P (op)
816 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
817 return rtl_hooks.gen_lowpart_no_emit (mode, op);
818 break;
820 case FLOAT_TRUNCATE:
821 if (DECIMAL_FLOAT_MODE_P (mode))
822 break;
824 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
825 if (GET_CODE (op) == FLOAT_EXTEND
826 && GET_MODE (XEXP (op, 0)) == mode)
827 return XEXP (op, 0);
829 /* (float_truncate:SF (float_truncate:DF foo:XF))
830 = (float_truncate:SF foo:XF).
831 This may eliminate double rounding, so it is unsafe.
833 (float_truncate:SF (float_extend:XF foo:DF))
834 = (float_truncate:SF foo:DF).
836 (float_truncate:DF (float_extend:XF foo:SF))
837 = (float_extend:SF foo:DF). */
838 if ((GET_CODE (op) == FLOAT_TRUNCATE
839 && flag_unsafe_math_optimizations)
840 || GET_CODE (op) == FLOAT_EXTEND)
841 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
842 0)))
843 > GET_MODE_SIZE (mode)
844 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
845 mode,
846 XEXP (op, 0), mode);
848 /* (float_truncate (float x)) is (float x) */
849 if (GET_CODE (op) == FLOAT
850 && (flag_unsafe_math_optimizations
851 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
852 && ((unsigned)significand_size (GET_MODE (op))
853 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
854 - num_sign_bit_copies (XEXP (op, 0),
855 GET_MODE (XEXP (op, 0))))))))
856 return simplify_gen_unary (FLOAT, mode,
857 XEXP (op, 0),
858 GET_MODE (XEXP (op, 0)));
860 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
861 (OP:SF foo:SF) if OP is NEG or ABS. */
862 if ((GET_CODE (op) == ABS
863 || GET_CODE (op) == NEG)
864 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
865 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
866 return simplify_gen_unary (GET_CODE (op), mode,
867 XEXP (XEXP (op, 0), 0), mode);
869 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
870 is (float_truncate:SF x). */
871 if (GET_CODE (op) == SUBREG
872 && subreg_lowpart_p (op)
873 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
874 return SUBREG_REG (op);
875 break;
877 case FLOAT_EXTEND:
878 if (DECIMAL_FLOAT_MODE_P (mode))
879 break;
881 /* (float_extend (float_extend x)) is (float_extend x)
883 (float_extend (float x)) is (float x) assuming that double
884 rounding can't happen.
886 if (GET_CODE (op) == FLOAT_EXTEND
887 || (GET_CODE (op) == FLOAT
888 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
889 && ((unsigned)significand_size (GET_MODE (op))
890 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
891 - num_sign_bit_copies (XEXP (op, 0),
892 GET_MODE (XEXP (op, 0)))))))
893 return simplify_gen_unary (GET_CODE (op), mode,
894 XEXP (op, 0),
895 GET_MODE (XEXP (op, 0)));
897 break;
899 case ABS:
900 /* (abs (neg <foo>)) -> (abs <foo>) */
901 if (GET_CODE (op) == NEG)
902 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
903 GET_MODE (XEXP (op, 0)));
905 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
906 do nothing. */
907 if (GET_MODE (op) == VOIDmode)
908 break;
910 /* If operand is something known to be positive, ignore the ABS. */
911 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
912 || ((GET_MODE_BITSIZE (GET_MODE (op))
913 <= HOST_BITS_PER_WIDE_INT)
914 && ((nonzero_bits (op, GET_MODE (op))
915 & ((HOST_WIDE_INT) 1
916 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
917 == 0)))
918 return op;
920 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
921 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
922 return gen_rtx_NEG (mode, op);
924 break;
926 case FFS:
927 /* (ffs (*_extend <X>)) = (ffs <X>) */
928 if (GET_CODE (op) == SIGN_EXTEND
929 || GET_CODE (op) == ZERO_EXTEND)
930 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
931 GET_MODE (XEXP (op, 0)));
932 break;
934 case POPCOUNT:
935 switch (GET_CODE (op))
937 case BSWAP:
938 case ZERO_EXTEND:
939 /* (popcount (zero_extend <X>)) = (popcount <X>) */
940 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
941 GET_MODE (XEXP (op, 0)));
943 case ROTATE:
944 case ROTATERT:
945 /* Rotations don't affect popcount. */
946 if (!side_effects_p (XEXP (op, 1)))
947 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
948 GET_MODE (XEXP (op, 0)));
949 break;
951 default:
952 break;
954 break;
956 case PARITY:
957 switch (GET_CODE (op))
959 case NOT:
960 case BSWAP:
961 case ZERO_EXTEND:
962 case SIGN_EXTEND:
963 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
964 GET_MODE (XEXP (op, 0)));
966 case ROTATE:
967 case ROTATERT:
968 /* Rotations don't affect parity. */
969 if (!side_effects_p (XEXP (op, 1)))
970 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
971 GET_MODE (XEXP (op, 0)));
972 break;
974 default:
975 break;
977 break;
979 case BSWAP:
980 /* (bswap (bswap x)) -> x. */
981 if (GET_CODE (op) == BSWAP)
982 return XEXP (op, 0);
983 break;
985 case FLOAT:
986 /* (float (sign_extend <X>)) = (float <X>). */
987 if (GET_CODE (op) == SIGN_EXTEND)
988 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
989 GET_MODE (XEXP (op, 0)));
990 break;
992 case SIGN_EXTEND:
993 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
994 becomes just the MINUS if its mode is MODE. This allows
995 folding switch statements on machines using casesi (such as
996 the VAX). */
997 if (GET_CODE (op) == TRUNCATE
998 && GET_MODE (XEXP (op, 0)) == mode
999 && GET_CODE (XEXP (op, 0)) == MINUS
1000 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1001 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1002 return XEXP (op, 0);
1004 /* Check for a sign extension of a subreg of a promoted
1005 variable, where the promotion is sign-extended, and the
1006 target mode is the same as the variable's promotion. */
1007 if (GET_CODE (op) == SUBREG
1008 && SUBREG_PROMOTED_VAR_P (op)
1009 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1010 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1011 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1013 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1014 /* As we do not know which address space the pointer is refering to,
1015 we can do this only if the target does not support different pointer
1016 or address modes depending on the address space. */
1017 if (target_default_pointer_address_modes_p ()
1018 && ! POINTERS_EXTEND_UNSIGNED
1019 && mode == Pmode && GET_MODE (op) == ptr_mode
1020 && (CONSTANT_P (op)
1021 || (GET_CODE (op) == SUBREG
1022 && REG_P (SUBREG_REG (op))
1023 && REG_POINTER (SUBREG_REG (op))
1024 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1025 return convert_memory_address (Pmode, op);
1026 #endif
1027 break;
1029 case ZERO_EXTEND:
1030 /* Check for a zero extension of a subreg of a promoted
1031 variable, where the promotion is zero-extended, and the
1032 target mode is the same as the variable's promotion. */
1033 if (GET_CODE (op) == SUBREG
1034 && SUBREG_PROMOTED_VAR_P (op)
1035 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1036 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1037 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1039 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1040 /* As we do not know which address space the pointer is refering to,
1041 we can do this only if the target does not support different pointer
1042 or address modes depending on the address space. */
1043 if (target_default_pointer_address_modes_p ()
1044 && POINTERS_EXTEND_UNSIGNED > 0
1045 && mode == Pmode && GET_MODE (op) == ptr_mode
1046 && (CONSTANT_P (op)
1047 || (GET_CODE (op) == SUBREG
1048 && REG_P (SUBREG_REG (op))
1049 && REG_POINTER (SUBREG_REG (op))
1050 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1051 return convert_memory_address (Pmode, op);
1052 #endif
1053 break;
1055 default:
1056 break;
1059 return 0;
1062 /* Try to compute the value of a unary operation CODE whose output mode is to
1063 be MODE with input operand OP whose mode was originally OP_MODE.
1064 Return zero if the value cannot be computed. */
1066 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1067 rtx op, enum machine_mode op_mode)
1069 unsigned int width = GET_MODE_BITSIZE (mode);
1071 if (code == VEC_DUPLICATE)
1073 gcc_assert (VECTOR_MODE_P (mode));
1074 if (GET_MODE (op) != VOIDmode)
1076 if (!VECTOR_MODE_P (GET_MODE (op)))
1077 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1078 else
1079 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1080 (GET_MODE (op)));
1082 if (CONST_INT_P (op) || GET_CODE (op) == CONST_DOUBLE
1083 || GET_CODE (op) == CONST_VECTOR)
1085 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1086 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1087 rtvec v = rtvec_alloc (n_elts);
1088 unsigned int i;
1090 if (GET_CODE (op) != CONST_VECTOR)
1091 for (i = 0; i < n_elts; i++)
1092 RTVEC_ELT (v, i) = op;
1093 else
1095 enum machine_mode inmode = GET_MODE (op);
1096 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1097 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1099 gcc_assert (in_n_elts < n_elts);
1100 gcc_assert ((n_elts % in_n_elts) == 0);
1101 for (i = 0; i < n_elts; i++)
1102 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1104 return gen_rtx_CONST_VECTOR (mode, v);
1108 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1110 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1111 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1112 enum machine_mode opmode = GET_MODE (op);
1113 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1114 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1115 rtvec v = rtvec_alloc (n_elts);
1116 unsigned int i;
1118 gcc_assert (op_n_elts == n_elts);
1119 for (i = 0; i < n_elts; i++)
1121 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1122 CONST_VECTOR_ELT (op, i),
1123 GET_MODE_INNER (opmode));
1124 if (!x)
1125 return 0;
1126 RTVEC_ELT (v, i) = x;
1128 return gen_rtx_CONST_VECTOR (mode, v);
1131 /* The order of these tests is critical so that, for example, we don't
1132 check the wrong mode (input vs. output) for a conversion operation,
1133 such as FIX. At some point, this should be simplified. */
1135 if (code == FLOAT && GET_MODE (op) == VOIDmode
1136 && (GET_CODE (op) == CONST_DOUBLE || CONST_INT_P (op)))
1138 HOST_WIDE_INT hv, lv;
1139 REAL_VALUE_TYPE d;
1141 if (CONST_INT_P (op))
1142 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1143 else
1144 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1146 REAL_VALUE_FROM_INT (d, lv, hv, mode);
1147 d = real_value_truncate (mode, d);
1148 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1150 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
1151 && (GET_CODE (op) == CONST_DOUBLE
1152 || CONST_INT_P (op)))
1154 HOST_WIDE_INT hv, lv;
1155 REAL_VALUE_TYPE d;
1157 if (CONST_INT_P (op))
1158 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1159 else
1160 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1162 if (op_mode == VOIDmode)
1164 /* We don't know how to interpret negative-looking numbers in
1165 this case, so don't try to fold those. */
1166 if (hv < 0)
1167 return 0;
1169 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
1171 else
1172 hv = 0, lv &= GET_MODE_MASK (op_mode);
1174 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1175 d = real_value_truncate (mode, d);
1176 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1179 if (CONST_INT_P (op)
1180 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1182 HOST_WIDE_INT arg0 = INTVAL (op);
1183 HOST_WIDE_INT val;
1185 switch (code)
1187 case NOT:
1188 val = ~ arg0;
1189 break;
1191 case NEG:
1192 val = - arg0;
1193 break;
1195 case ABS:
1196 val = (arg0 >= 0 ? arg0 : - arg0);
1197 break;
1199 case FFS:
1200 /* Don't use ffs here. Instead, get low order bit and then its
1201 number. If arg0 is zero, this will return 0, as desired. */
1202 arg0 &= GET_MODE_MASK (mode);
1203 val = exact_log2 (arg0 & (- arg0)) + 1;
1204 break;
1206 case CLZ:
1207 arg0 &= GET_MODE_MASK (mode);
1208 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1210 else
1211 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1212 break;
1214 case CTZ:
1215 arg0 &= GET_MODE_MASK (mode);
1216 if (arg0 == 0)
1218 /* Even if the value at zero is undefined, we have to come
1219 up with some replacement. Seems good enough. */
1220 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1221 val = GET_MODE_BITSIZE (mode);
1223 else
1224 val = exact_log2 (arg0 & -arg0);
1225 break;
1227 case POPCOUNT:
1228 arg0 &= GET_MODE_MASK (mode);
1229 val = 0;
1230 while (arg0)
1231 val++, arg0 &= arg0 - 1;
1232 break;
1234 case PARITY:
1235 arg0 &= GET_MODE_MASK (mode);
1236 val = 0;
1237 while (arg0)
1238 val++, arg0 &= arg0 - 1;
1239 val &= 1;
1240 break;
1242 case BSWAP:
1244 unsigned int s;
1246 val = 0;
1247 for (s = 0; s < width; s += 8)
1249 unsigned int d = width - s - 8;
1250 unsigned HOST_WIDE_INT byte;
1251 byte = (arg0 >> s) & 0xff;
1252 val |= byte << d;
1255 break;
1257 case TRUNCATE:
1258 val = arg0;
1259 break;
1261 case ZERO_EXTEND:
1262 /* When zero-extending a CONST_INT, we need to know its
1263 original mode. */
1264 gcc_assert (op_mode != VOIDmode);
1265 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1267 /* If we were really extending the mode,
1268 we would have to distinguish between zero-extension
1269 and sign-extension. */
1270 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1271 val = arg0;
1273 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1274 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1275 else
1276 return 0;
1277 break;
1279 case SIGN_EXTEND:
1280 if (op_mode == VOIDmode)
1281 op_mode = mode;
1282 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1284 /* If we were really extending the mode,
1285 we would have to distinguish between zero-extension
1286 and sign-extension. */
1287 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1288 val = arg0;
1290 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1293 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1294 if (val
1295 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1296 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1298 else
1299 return 0;
1300 break;
1302 case SQRT:
1303 case FLOAT_EXTEND:
1304 case FLOAT_TRUNCATE:
1305 case SS_TRUNCATE:
1306 case US_TRUNCATE:
1307 case SS_NEG:
1308 case US_NEG:
1309 case SS_ABS:
1310 return 0;
1312 default:
1313 gcc_unreachable ();
1316 return gen_int_mode (val, mode);
1319 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1320 for a DImode operation on a CONST_INT. */
1321 else if (GET_MODE (op) == VOIDmode
1322 && width <= HOST_BITS_PER_WIDE_INT * 2
1323 && (GET_CODE (op) == CONST_DOUBLE
1324 || CONST_INT_P (op)))
1326 unsigned HOST_WIDE_INT l1, lv;
1327 HOST_WIDE_INT h1, hv;
1329 if (GET_CODE (op) == CONST_DOUBLE)
1330 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1331 else
1332 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1334 switch (code)
1336 case NOT:
1337 lv = ~ l1;
1338 hv = ~ h1;
1339 break;
1341 case NEG:
1342 neg_double (l1, h1, &lv, &hv);
1343 break;
1345 case ABS:
1346 if (h1 < 0)
1347 neg_double (l1, h1, &lv, &hv);
1348 else
1349 lv = l1, hv = h1;
1350 break;
1352 case FFS:
1353 hv = 0;
1354 if (l1 == 0)
1356 if (h1 == 0)
1357 lv = 0;
1358 else
1359 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1361 else
1362 lv = exact_log2 (l1 & -l1) + 1;
1363 break;
1365 case CLZ:
1366 hv = 0;
1367 if (h1 != 0)
1368 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1369 - HOST_BITS_PER_WIDE_INT;
1370 else if (l1 != 0)
1371 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1372 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1373 lv = GET_MODE_BITSIZE (mode);
1374 break;
1376 case CTZ:
1377 hv = 0;
1378 if (l1 != 0)
1379 lv = exact_log2 (l1 & -l1);
1380 else if (h1 != 0)
1381 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1382 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1383 lv = GET_MODE_BITSIZE (mode);
1384 break;
1386 case POPCOUNT:
1387 hv = 0;
1388 lv = 0;
1389 while (l1)
1390 lv++, l1 &= l1 - 1;
1391 while (h1)
1392 lv++, h1 &= h1 - 1;
1393 break;
1395 case PARITY:
1396 hv = 0;
1397 lv = 0;
1398 while (l1)
1399 lv++, l1 &= l1 - 1;
1400 while (h1)
1401 lv++, h1 &= h1 - 1;
1402 lv &= 1;
1403 break;
1405 case BSWAP:
1407 unsigned int s;
1409 hv = 0;
1410 lv = 0;
1411 for (s = 0; s < width; s += 8)
1413 unsigned int d = width - s - 8;
1414 unsigned HOST_WIDE_INT byte;
1416 if (s < HOST_BITS_PER_WIDE_INT)
1417 byte = (l1 >> s) & 0xff;
1418 else
1419 byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1421 if (d < HOST_BITS_PER_WIDE_INT)
1422 lv |= byte << d;
1423 else
1424 hv |= byte << (d - HOST_BITS_PER_WIDE_INT);
1427 break;
1429 case TRUNCATE:
1430 /* This is just a change-of-mode, so do nothing. */
1431 lv = l1, hv = h1;
1432 break;
1434 case ZERO_EXTEND:
1435 gcc_assert (op_mode != VOIDmode);
1437 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1438 return 0;
1440 hv = 0;
1441 lv = l1 & GET_MODE_MASK (op_mode);
1442 break;
1444 case SIGN_EXTEND:
1445 if (op_mode == VOIDmode
1446 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1447 return 0;
1448 else
1450 lv = l1 & GET_MODE_MASK (op_mode);
1451 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1452 && (lv & ((HOST_WIDE_INT) 1
1453 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1454 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1456 hv = HWI_SIGN_EXTEND (lv);
1458 break;
1460 case SQRT:
1461 return 0;
1463 default:
1464 return 0;
1467 return immed_double_const (lv, hv, mode);
1470 else if (GET_CODE (op) == CONST_DOUBLE
1471 && SCALAR_FLOAT_MODE_P (mode))
1473 REAL_VALUE_TYPE d, t;
1474 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1476 switch (code)
1478 case SQRT:
1479 if (HONOR_SNANS (mode) && real_isnan (&d))
1480 return 0;
1481 real_sqrt (&t, mode, &d);
1482 d = t;
1483 break;
1484 case ABS:
1485 d = REAL_VALUE_ABS (d);
1486 break;
1487 case NEG:
1488 d = REAL_VALUE_NEGATE (d);
1489 break;
1490 case FLOAT_TRUNCATE:
1491 d = real_value_truncate (mode, d);
1492 break;
1493 case FLOAT_EXTEND:
1494 /* All this does is change the mode. */
1495 break;
1496 case FIX:
1497 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1498 break;
1499 case NOT:
1501 long tmp[4];
1502 int i;
1504 real_to_target (tmp, &d, GET_MODE (op));
1505 for (i = 0; i < 4; i++)
1506 tmp[i] = ~tmp[i];
1507 real_from_target (&d, tmp, mode);
1508 break;
1510 default:
1511 gcc_unreachable ();
1513 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1516 else if (GET_CODE (op) == CONST_DOUBLE
1517 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1518 && GET_MODE_CLASS (mode) == MODE_INT
1519 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1521 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1522 operators are intentionally left unspecified (to ease implementation
1523 by target backends), for consistency, this routine implements the
1524 same semantics for constant folding as used by the middle-end. */
1526 /* This was formerly used only for non-IEEE float.
1527 eggert@twinsun.com says it is safe for IEEE also. */
1528 HOST_WIDE_INT xh, xl, th, tl;
1529 REAL_VALUE_TYPE x, t;
1530 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1531 switch (code)
1533 case FIX:
1534 if (REAL_VALUE_ISNAN (x))
1535 return const0_rtx;
1537 /* Test against the signed upper bound. */
1538 if (width > HOST_BITS_PER_WIDE_INT)
1540 th = ((unsigned HOST_WIDE_INT) 1
1541 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1542 tl = -1;
1544 else
1546 th = 0;
1547 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1549 real_from_integer (&t, VOIDmode, tl, th, 0);
1550 if (REAL_VALUES_LESS (t, x))
1552 xh = th;
1553 xl = tl;
1554 break;
1557 /* Test against the signed lower bound. */
1558 if (width > HOST_BITS_PER_WIDE_INT)
1560 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1561 tl = 0;
1563 else
1565 th = -1;
1566 tl = (HOST_WIDE_INT) -1 << (width - 1);
1568 real_from_integer (&t, VOIDmode, tl, th, 0);
1569 if (REAL_VALUES_LESS (x, t))
1571 xh = th;
1572 xl = tl;
1573 break;
1575 REAL_VALUE_TO_INT (&xl, &xh, x);
1576 break;
1578 case UNSIGNED_FIX:
1579 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1580 return const0_rtx;
1582 /* Test against the unsigned upper bound. */
1583 if (width == 2*HOST_BITS_PER_WIDE_INT)
1585 th = -1;
1586 tl = -1;
1588 else if (width >= HOST_BITS_PER_WIDE_INT)
1590 th = ((unsigned HOST_WIDE_INT) 1
1591 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1592 tl = -1;
1594 else
1596 th = 0;
1597 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1599 real_from_integer (&t, VOIDmode, tl, th, 1);
1600 if (REAL_VALUES_LESS (t, x))
1602 xh = th;
1603 xl = tl;
1604 break;
1607 REAL_VALUE_TO_INT (&xl, &xh, x);
1608 break;
1610 default:
1611 gcc_unreachable ();
1613 return immed_double_const (xl, xh, mode);
1616 return NULL_RTX;
1619 /* Subroutine of simplify_binary_operation to simplify a commutative,
1620 associative binary operation CODE with result mode MODE, operating
1621 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1622 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1623 canonicalization is possible. */
1625 static rtx
1626 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1627 rtx op0, rtx op1)
1629 rtx tem;
1631 /* Linearize the operator to the left. */
1632 if (GET_CODE (op1) == code)
1634 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1635 if (GET_CODE (op0) == code)
1637 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1638 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1641 /* "a op (b op c)" becomes "(b op c) op a". */
1642 if (! swap_commutative_operands_p (op1, op0))
1643 return simplify_gen_binary (code, mode, op1, op0);
1645 tem = op0;
1646 op0 = op1;
1647 op1 = tem;
1650 if (GET_CODE (op0) == code)
1652 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1653 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1655 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1656 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1659 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1660 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1661 if (tem != 0)
1662 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1664 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1665 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1666 if (tem != 0)
1667 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1670 return 0;
1674 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1675 and OP1. Return 0 if no simplification is possible.
1677 Don't use this for relational operations such as EQ or LT.
1678 Use simplify_relational_operation instead. */
1680 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1681 rtx op0, rtx op1)
1683 rtx trueop0, trueop1;
1684 rtx tem;
1686 /* Relational operations don't work here. We must know the mode
1687 of the operands in order to do the comparison correctly.
1688 Assuming a full word can give incorrect results.
1689 Consider comparing 128 with -128 in QImode. */
1690 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1691 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1693 /* Make sure the constant is second. */
1694 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1695 && swap_commutative_operands_p (op0, op1))
1697 tem = op0, op0 = op1, op1 = tem;
1700 trueop0 = avoid_constant_pool_reference (op0);
1701 trueop1 = avoid_constant_pool_reference (op1);
1703 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1704 if (tem)
1705 return tem;
1706 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1709 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1710 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1711 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1712 actual constants. */
1714 static rtx
1715 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1716 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1718 rtx tem, reversed, opleft, opright;
1719 HOST_WIDE_INT val;
1720 unsigned int width = GET_MODE_BITSIZE (mode);
1722 /* Even if we can't compute a constant result,
1723 there are some cases worth simplifying. */
1725 switch (code)
1727 case PLUS:
1728 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1729 when x is NaN, infinite, or finite and nonzero. They aren't
1730 when x is -0 and the rounding mode is not towards -infinity,
1731 since (-0) + 0 is then 0. */
1732 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1733 return op0;
1735 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1736 transformations are safe even for IEEE. */
1737 if (GET_CODE (op0) == NEG)
1738 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1739 else if (GET_CODE (op1) == NEG)
1740 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1742 /* (~a) + 1 -> -a */
1743 if (INTEGRAL_MODE_P (mode)
1744 && GET_CODE (op0) == NOT
1745 && trueop1 == const1_rtx)
1746 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1748 /* Handle both-operands-constant cases. We can only add
1749 CONST_INTs to constants since the sum of relocatable symbols
1750 can't be handled by most assemblers. Don't add CONST_INT
1751 to CONST_INT since overflow won't be computed properly if wider
1752 than HOST_BITS_PER_WIDE_INT. */
1754 if ((GET_CODE (op0) == CONST
1755 || GET_CODE (op0) == SYMBOL_REF
1756 || GET_CODE (op0) == LABEL_REF)
1757 && CONST_INT_P (op1))
1758 return plus_constant (op0, INTVAL (op1));
1759 else if ((GET_CODE (op1) == CONST
1760 || GET_CODE (op1) == SYMBOL_REF
1761 || GET_CODE (op1) == LABEL_REF)
1762 && CONST_INT_P (op0))
1763 return plus_constant (op1, INTVAL (op0));
1765 /* See if this is something like X * C - X or vice versa or
1766 if the multiplication is written as a shift. If so, we can
1767 distribute and make a new multiply, shift, or maybe just
1768 have X (if C is 2 in the example above). But don't make
1769 something more expensive than we had before. */
1771 if (SCALAR_INT_MODE_P (mode))
1773 double_int coeff0, coeff1;
1774 rtx lhs = op0, rhs = op1;
1776 coeff0 = double_int_one;
1777 coeff1 = double_int_one;
1779 if (GET_CODE (lhs) == NEG)
1781 coeff0 = double_int_minus_one;
1782 lhs = XEXP (lhs, 0);
1784 else if (GET_CODE (lhs) == MULT
1785 && CONST_INT_P (XEXP (lhs, 1)))
1787 coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
1788 lhs = XEXP (lhs, 0);
1790 else if (GET_CODE (lhs) == ASHIFT
1791 && CONST_INT_P (XEXP (lhs, 1))
1792 && INTVAL (XEXP (lhs, 1)) >= 0
1793 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1795 coeff0 = double_int_setbit (double_int_zero,
1796 INTVAL (XEXP (lhs, 1)));
1797 lhs = XEXP (lhs, 0);
1800 if (GET_CODE (rhs) == NEG)
1802 coeff1 = double_int_minus_one;
1803 rhs = XEXP (rhs, 0);
1805 else if (GET_CODE (rhs) == MULT
1806 && CONST_INT_P (XEXP (rhs, 1)))
1808 coeff1 = shwi_to_double_int (INTVAL (XEXP (rhs, 1)));
1809 rhs = XEXP (rhs, 0);
1811 else if (GET_CODE (rhs) == ASHIFT
1812 && CONST_INT_P (XEXP (rhs, 1))
1813 && INTVAL (XEXP (rhs, 1)) >= 0
1814 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1816 coeff1 = double_int_setbit (double_int_zero,
1817 INTVAL (XEXP (rhs, 1)));
1818 rhs = XEXP (rhs, 0);
1821 if (rtx_equal_p (lhs, rhs))
1823 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1824 rtx coeff;
1825 double_int val;
1826 bool speed = optimize_function_for_speed_p (cfun);
1828 val = double_int_add (coeff0, coeff1);
1829 coeff = immed_double_int_const (val, mode);
1831 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1832 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
1833 ? tem : 0;
1837 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1838 if ((CONST_INT_P (op1)
1839 || GET_CODE (op1) == CONST_DOUBLE)
1840 && GET_CODE (op0) == XOR
1841 && (CONST_INT_P (XEXP (op0, 1))
1842 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1843 && mode_signbit_p (mode, op1))
1844 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1845 simplify_gen_binary (XOR, mode, op1,
1846 XEXP (op0, 1)));
1848 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1849 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1850 && GET_CODE (op0) == MULT
1851 && GET_CODE (XEXP (op0, 0)) == NEG)
1853 rtx in1, in2;
1855 in1 = XEXP (XEXP (op0, 0), 0);
1856 in2 = XEXP (op0, 1);
1857 return simplify_gen_binary (MINUS, mode, op1,
1858 simplify_gen_binary (MULT, mode,
1859 in1, in2));
1862 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1863 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1864 is 1. */
1865 if (COMPARISON_P (op0)
1866 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1867 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1868 && (reversed = reversed_comparison (op0, mode)))
1869 return
1870 simplify_gen_unary (NEG, mode, reversed, mode);
1872 /* If one of the operands is a PLUS or a MINUS, see if we can
1873 simplify this by the associative law.
1874 Don't use the associative law for floating point.
1875 The inaccuracy makes it nonassociative,
1876 and subtle programs can break if operations are associated. */
1878 if (INTEGRAL_MODE_P (mode)
1879 && (plus_minus_operand_p (op0)
1880 || plus_minus_operand_p (op1))
1881 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1882 return tem;
1884 /* Reassociate floating point addition only when the user
1885 specifies associative math operations. */
1886 if (FLOAT_MODE_P (mode)
1887 && flag_associative_math)
1889 tem = simplify_associative_operation (code, mode, op0, op1);
1890 if (tem)
1891 return tem;
1893 break;
1895 case COMPARE:
1896 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1897 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1898 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1899 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1901 rtx xop00 = XEXP (op0, 0);
1902 rtx xop10 = XEXP (op1, 0);
1904 #ifdef HAVE_cc0
1905 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1906 #else
1907 if (REG_P (xop00) && REG_P (xop10)
1908 && GET_MODE (xop00) == GET_MODE (xop10)
1909 && REGNO (xop00) == REGNO (xop10)
1910 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1911 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1912 #endif
1913 return xop00;
1915 break;
1917 case MINUS:
1918 /* We can't assume x-x is 0 even with non-IEEE floating point,
1919 but since it is zero except in very strange circumstances, we
1920 will treat it as zero with -ffinite-math-only. */
1921 if (rtx_equal_p (trueop0, trueop1)
1922 && ! side_effects_p (op0)
1923 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
1924 return CONST0_RTX (mode);
1926 /* Change subtraction from zero into negation. (0 - x) is the
1927 same as -x when x is NaN, infinite, or finite and nonzero.
1928 But if the mode has signed zeros, and does not round towards
1929 -infinity, then 0 - 0 is 0, not -0. */
1930 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1931 return simplify_gen_unary (NEG, mode, op1, mode);
1933 /* (-1 - a) is ~a. */
1934 if (trueop0 == constm1_rtx)
1935 return simplify_gen_unary (NOT, mode, op1, mode);
1937 /* Subtracting 0 has no effect unless the mode has signed zeros
1938 and supports rounding towards -infinity. In such a case,
1939 0 - 0 is -0. */
1940 if (!(HONOR_SIGNED_ZEROS (mode)
1941 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1942 && trueop1 == CONST0_RTX (mode))
1943 return op0;
1945 /* See if this is something like X * C - X or vice versa or
1946 if the multiplication is written as a shift. If so, we can
1947 distribute and make a new multiply, shift, or maybe just
1948 have X (if C is 2 in the example above). But don't make
1949 something more expensive than we had before. */
1951 if (SCALAR_INT_MODE_P (mode))
1953 double_int coeff0, negcoeff1;
1954 rtx lhs = op0, rhs = op1;
1956 coeff0 = double_int_one;
1957 negcoeff1 = double_int_minus_one;
1959 if (GET_CODE (lhs) == NEG)
1961 coeff0 = double_int_minus_one;
1962 lhs = XEXP (lhs, 0);
1964 else if (GET_CODE (lhs) == MULT
1965 && CONST_INT_P (XEXP (lhs, 1)))
1967 coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
1968 lhs = XEXP (lhs, 0);
1970 else if (GET_CODE (lhs) == ASHIFT
1971 && CONST_INT_P (XEXP (lhs, 1))
1972 && INTVAL (XEXP (lhs, 1)) >= 0
1973 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1975 coeff0 = double_int_setbit (double_int_zero,
1976 INTVAL (XEXP (lhs, 1)));
1977 lhs = XEXP (lhs, 0);
1980 if (GET_CODE (rhs) == NEG)
1982 negcoeff1 = double_int_one;
1983 rhs = XEXP (rhs, 0);
1985 else if (GET_CODE (rhs) == MULT
1986 && CONST_INT_P (XEXP (rhs, 1)))
1988 negcoeff1 = shwi_to_double_int (-INTVAL (XEXP (rhs, 1)));
1989 rhs = XEXP (rhs, 0);
1991 else if (GET_CODE (rhs) == ASHIFT
1992 && CONST_INT_P (XEXP (rhs, 1))
1993 && INTVAL (XEXP (rhs, 1)) >= 0
1994 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1996 negcoeff1 = double_int_setbit (double_int_zero,
1997 INTVAL (XEXP (rhs, 1)));
1998 negcoeff1 = double_int_neg (negcoeff1);
1999 rhs = XEXP (rhs, 0);
2002 if (rtx_equal_p (lhs, rhs))
2004 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2005 rtx coeff;
2006 double_int val;
2007 bool speed = optimize_function_for_speed_p (cfun);
2009 val = double_int_add (coeff0, negcoeff1);
2010 coeff = immed_double_int_const (val, mode);
2012 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2013 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
2014 ? tem : 0;
2018 /* (a - (-b)) -> (a + b). True even for IEEE. */
2019 if (GET_CODE (op1) == NEG)
2020 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2022 /* (-x - c) may be simplified as (-c - x). */
2023 if (GET_CODE (op0) == NEG
2024 && (CONST_INT_P (op1)
2025 || GET_CODE (op1) == CONST_DOUBLE))
2027 tem = simplify_unary_operation (NEG, mode, op1, mode);
2028 if (tem)
2029 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2032 /* Don't let a relocatable value get a negative coeff. */
2033 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2034 return simplify_gen_binary (PLUS, mode,
2035 op0,
2036 neg_const_int (mode, op1));
2038 /* (x - (x & y)) -> (x & ~y) */
2039 if (GET_CODE (op1) == AND)
2041 if (rtx_equal_p (op0, XEXP (op1, 0)))
2043 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2044 GET_MODE (XEXP (op1, 1)));
2045 return simplify_gen_binary (AND, mode, op0, tem);
2047 if (rtx_equal_p (op0, XEXP (op1, 1)))
2049 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2050 GET_MODE (XEXP (op1, 0)));
2051 return simplify_gen_binary (AND, mode, op0, tem);
2055 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2056 by reversing the comparison code if valid. */
2057 if (STORE_FLAG_VALUE == 1
2058 && trueop0 == const1_rtx
2059 && COMPARISON_P (op1)
2060 && (reversed = reversed_comparison (op1, mode)))
2061 return reversed;
2063 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2064 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2065 && GET_CODE (op1) == MULT
2066 && GET_CODE (XEXP (op1, 0)) == NEG)
2068 rtx in1, in2;
2070 in1 = XEXP (XEXP (op1, 0), 0);
2071 in2 = XEXP (op1, 1);
2072 return simplify_gen_binary (PLUS, mode,
2073 simplify_gen_binary (MULT, mode,
2074 in1, in2),
2075 op0);
2078 /* Canonicalize (minus (neg A) (mult B C)) to
2079 (minus (mult (neg B) C) A). */
2080 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2081 && GET_CODE (op1) == MULT
2082 && GET_CODE (op0) == NEG)
2084 rtx in1, in2;
2086 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2087 in2 = XEXP (op1, 1);
2088 return simplify_gen_binary (MINUS, mode,
2089 simplify_gen_binary (MULT, mode,
2090 in1, in2),
2091 XEXP (op0, 0));
2094 /* If one of the operands is a PLUS or a MINUS, see if we can
2095 simplify this by the associative law. This will, for example,
2096 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2097 Don't use the associative law for floating point.
2098 The inaccuracy makes it nonassociative,
2099 and subtle programs can break if operations are associated. */
2101 if (INTEGRAL_MODE_P (mode)
2102 && (plus_minus_operand_p (op0)
2103 || plus_minus_operand_p (op1))
2104 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2105 return tem;
2106 break;
2108 case MULT:
2109 if (trueop1 == constm1_rtx)
2110 return simplify_gen_unary (NEG, mode, op0, mode);
2112 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2113 x is NaN, since x * 0 is then also NaN. Nor is it valid
2114 when the mode has signed zeros, since multiplying a negative
2115 number by 0 will give -0, not 0. */
2116 if (!HONOR_NANS (mode)
2117 && !HONOR_SIGNED_ZEROS (mode)
2118 && trueop1 == CONST0_RTX (mode)
2119 && ! side_effects_p (op0))
2120 return op1;
2122 /* In IEEE floating point, x*1 is not equivalent to x for
2123 signalling NaNs. */
2124 if (!HONOR_SNANS (mode)
2125 && trueop1 == CONST1_RTX (mode))
2126 return op0;
2128 /* Convert multiply by constant power of two into shift unless
2129 we are still generating RTL. This test is a kludge. */
2130 if (CONST_INT_P (trueop1)
2131 && (val = exact_log2 (INTVAL (trueop1))) >= 0
2132 /* If the mode is larger than the host word size, and the
2133 uppermost bit is set, then this isn't a power of two due
2134 to implicit sign extension. */
2135 && (width <= HOST_BITS_PER_WIDE_INT
2136 || val != HOST_BITS_PER_WIDE_INT - 1))
2137 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2139 /* Likewise for multipliers wider than a word. */
2140 if (GET_CODE (trueop1) == CONST_DOUBLE
2141 && (GET_MODE (trueop1) == VOIDmode
2142 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
2143 && GET_MODE (op0) == mode
2144 && CONST_DOUBLE_LOW (trueop1) == 0
2145 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
2146 return simplify_gen_binary (ASHIFT, mode, op0,
2147 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2149 /* x*2 is x+x and x*(-1) is -x */
2150 if (GET_CODE (trueop1) == CONST_DOUBLE
2151 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2152 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2153 && GET_MODE (op0) == mode)
2155 REAL_VALUE_TYPE d;
2156 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2158 if (REAL_VALUES_EQUAL (d, dconst2))
2159 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2161 if (!HONOR_SNANS (mode)
2162 && REAL_VALUES_EQUAL (d, dconstm1))
2163 return simplify_gen_unary (NEG, mode, op0, mode);
2166 /* Optimize -x * -x as x * x. */
2167 if (FLOAT_MODE_P (mode)
2168 && GET_CODE (op0) == NEG
2169 && GET_CODE (op1) == NEG
2170 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2171 && !side_effects_p (XEXP (op0, 0)))
2172 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2174 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2175 if (SCALAR_FLOAT_MODE_P (mode)
2176 && GET_CODE (op0) == ABS
2177 && GET_CODE (op1) == ABS
2178 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2179 && !side_effects_p (XEXP (op0, 0)))
2180 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2182 /* Reassociate multiplication, but for floating point MULTs
2183 only when the user specifies unsafe math optimizations. */
2184 if (! FLOAT_MODE_P (mode)
2185 || flag_unsafe_math_optimizations)
2187 tem = simplify_associative_operation (code, mode, op0, op1);
2188 if (tem)
2189 return tem;
2191 break;
2193 case IOR:
2194 if (trueop1 == const0_rtx)
2195 return op0;
2196 if (CONST_INT_P (trueop1)
2197 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2198 == GET_MODE_MASK (mode)))
2199 return op1;
2200 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2201 return op0;
2202 /* A | (~A) -> -1 */
2203 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2204 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2205 && ! side_effects_p (op0)
2206 && SCALAR_INT_MODE_P (mode))
2207 return constm1_rtx;
2209 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2210 if (CONST_INT_P (op1)
2211 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2212 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
2213 return op1;
2215 /* Canonicalize (X & C1) | C2. */
2216 if (GET_CODE (op0) == AND
2217 && CONST_INT_P (trueop1)
2218 && CONST_INT_P (XEXP (op0, 1)))
2220 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2221 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2222 HOST_WIDE_INT c2 = INTVAL (trueop1);
2224 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2225 if ((c1 & c2) == c1
2226 && !side_effects_p (XEXP (op0, 0)))
2227 return trueop1;
2229 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2230 if (((c1|c2) & mask) == mask)
2231 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2233 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2234 if (((c1 & ~c2) & mask) != (c1 & mask))
2236 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2237 gen_int_mode (c1 & ~c2, mode));
2238 return simplify_gen_binary (IOR, mode, tem, op1);
2242 /* Convert (A & B) | A to A. */
2243 if (GET_CODE (op0) == AND
2244 && (rtx_equal_p (XEXP (op0, 0), op1)
2245 || rtx_equal_p (XEXP (op0, 1), op1))
2246 && ! side_effects_p (XEXP (op0, 0))
2247 && ! side_effects_p (XEXP (op0, 1)))
2248 return op1;
2250 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2251 mode size to (rotate A CX). */
2253 if (GET_CODE (op1) == ASHIFT
2254 || GET_CODE (op1) == SUBREG)
2256 opleft = op1;
2257 opright = op0;
2259 else
2261 opright = op1;
2262 opleft = op0;
2265 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2266 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2267 && CONST_INT_P (XEXP (opleft, 1))
2268 && CONST_INT_P (XEXP (opright, 1))
2269 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2270 == GET_MODE_BITSIZE (mode)))
2271 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2273 /* Same, but for ashift that has been "simplified" to a wider mode
2274 by simplify_shift_const. */
2276 if (GET_CODE (opleft) == SUBREG
2277 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2278 && GET_CODE (opright) == LSHIFTRT
2279 && GET_CODE (XEXP (opright, 0)) == SUBREG
2280 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2281 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2282 && (GET_MODE_SIZE (GET_MODE (opleft))
2283 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2284 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2285 SUBREG_REG (XEXP (opright, 0)))
2286 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2287 && CONST_INT_P (XEXP (opright, 1))
2288 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2289 == GET_MODE_BITSIZE (mode)))
2290 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2291 XEXP (SUBREG_REG (opleft), 1));
2293 /* If we have (ior (and (X C1) C2)), simplify this by making
2294 C1 as small as possible if C1 actually changes. */
2295 if (CONST_INT_P (op1)
2296 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2297 || INTVAL (op1) > 0)
2298 && GET_CODE (op0) == AND
2299 && CONST_INT_P (XEXP (op0, 1))
2300 && CONST_INT_P (op1)
2301 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2302 return simplify_gen_binary (IOR, mode,
2303 simplify_gen_binary
2304 (AND, mode, XEXP (op0, 0),
2305 GEN_INT (INTVAL (XEXP (op0, 1))
2306 & ~INTVAL (op1))),
2307 op1);
2309 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2310 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2311 the PLUS does not affect any of the bits in OP1: then we can do
2312 the IOR as a PLUS and we can associate. This is valid if OP1
2313 can be safely shifted left C bits. */
2314 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2315 && GET_CODE (XEXP (op0, 0)) == PLUS
2316 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2317 && CONST_INT_P (XEXP (op0, 1))
2318 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2320 int count = INTVAL (XEXP (op0, 1));
2321 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2323 if (mask >> count == INTVAL (trueop1)
2324 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2325 return simplify_gen_binary (ASHIFTRT, mode,
2326 plus_constant (XEXP (op0, 0), mask),
2327 XEXP (op0, 1));
2330 tem = simplify_associative_operation (code, mode, op0, op1);
2331 if (tem)
2332 return tem;
2333 break;
2335 case XOR:
2336 if (trueop1 == const0_rtx)
2337 return op0;
2338 if (CONST_INT_P (trueop1)
2339 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2340 == GET_MODE_MASK (mode)))
2341 return simplify_gen_unary (NOT, mode, op0, mode);
2342 if (rtx_equal_p (trueop0, trueop1)
2343 && ! side_effects_p (op0)
2344 && GET_MODE_CLASS (mode) != MODE_CC)
2345 return CONST0_RTX (mode);
2347 /* Canonicalize XOR of the most significant bit to PLUS. */
2348 if ((CONST_INT_P (op1)
2349 || GET_CODE (op1) == CONST_DOUBLE)
2350 && mode_signbit_p (mode, op1))
2351 return simplify_gen_binary (PLUS, mode, op0, op1);
2352 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2353 if ((CONST_INT_P (op1)
2354 || GET_CODE (op1) == CONST_DOUBLE)
2355 && GET_CODE (op0) == PLUS
2356 && (CONST_INT_P (XEXP (op0, 1))
2357 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2358 && mode_signbit_p (mode, XEXP (op0, 1)))
2359 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2360 simplify_gen_binary (XOR, mode, op1,
2361 XEXP (op0, 1)));
2363 /* If we are XORing two things that have no bits in common,
2364 convert them into an IOR. This helps to detect rotation encoded
2365 using those methods and possibly other simplifications. */
2367 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2368 && (nonzero_bits (op0, mode)
2369 & nonzero_bits (op1, mode)) == 0)
2370 return (simplify_gen_binary (IOR, mode, op0, op1));
2372 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2373 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2374 (NOT y). */
2376 int num_negated = 0;
2378 if (GET_CODE (op0) == NOT)
2379 num_negated++, op0 = XEXP (op0, 0);
2380 if (GET_CODE (op1) == NOT)
2381 num_negated++, op1 = XEXP (op1, 0);
2383 if (num_negated == 2)
2384 return simplify_gen_binary (XOR, mode, op0, op1);
2385 else if (num_negated == 1)
2386 return simplify_gen_unary (NOT, mode,
2387 simplify_gen_binary (XOR, mode, op0, op1),
2388 mode);
2391 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2392 correspond to a machine insn or result in further simplifications
2393 if B is a constant. */
2395 if (GET_CODE (op0) == AND
2396 && rtx_equal_p (XEXP (op0, 1), op1)
2397 && ! side_effects_p (op1))
2398 return simplify_gen_binary (AND, mode,
2399 simplify_gen_unary (NOT, mode,
2400 XEXP (op0, 0), mode),
2401 op1);
2403 else if (GET_CODE (op0) == AND
2404 && rtx_equal_p (XEXP (op0, 0), op1)
2405 && ! side_effects_p (op1))
2406 return simplify_gen_binary (AND, mode,
2407 simplify_gen_unary (NOT, mode,
2408 XEXP (op0, 1), mode),
2409 op1);
2411 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2412 comparison if STORE_FLAG_VALUE is 1. */
2413 if (STORE_FLAG_VALUE == 1
2414 && trueop1 == const1_rtx
2415 && COMPARISON_P (op0)
2416 && (reversed = reversed_comparison (op0, mode)))
2417 return reversed;
2419 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2420 is (lt foo (const_int 0)), so we can perform the above
2421 simplification if STORE_FLAG_VALUE is 1. */
2423 if (STORE_FLAG_VALUE == 1
2424 && trueop1 == const1_rtx
2425 && GET_CODE (op0) == LSHIFTRT
2426 && CONST_INT_P (XEXP (op0, 1))
2427 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2428 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2430 /* (xor (comparison foo bar) (const_int sign-bit))
2431 when STORE_FLAG_VALUE is the sign bit. */
2432 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2433 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2434 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2435 && trueop1 == const_true_rtx
2436 && COMPARISON_P (op0)
2437 && (reversed = reversed_comparison (op0, mode)))
2438 return reversed;
2440 tem = simplify_associative_operation (code, mode, op0, op1);
2441 if (tem)
2442 return tem;
2443 break;
2445 case AND:
2446 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2447 return trueop1;
2448 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
2450 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2451 HOST_WIDE_INT nzop1;
2452 if (CONST_INT_P (trueop1))
2454 HOST_WIDE_INT val1 = INTVAL (trueop1);
2455 /* If we are turning off bits already known off in OP0, we need
2456 not do an AND. */
2457 if ((nzop0 & ~val1) == 0)
2458 return op0;
2460 nzop1 = nonzero_bits (trueop1, mode);
2461 /* If we are clearing all the nonzero bits, the result is zero. */
2462 if ((nzop1 & nzop0) == 0
2463 && !side_effects_p (op0) && !side_effects_p (op1))
2464 return CONST0_RTX (mode);
2466 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2467 && GET_MODE_CLASS (mode) != MODE_CC)
2468 return op0;
2469 /* A & (~A) -> 0 */
2470 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2471 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2472 && ! side_effects_p (op0)
2473 && GET_MODE_CLASS (mode) != MODE_CC)
2474 return CONST0_RTX (mode);
2476 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2477 there are no nonzero bits of C outside of X's mode. */
2478 if ((GET_CODE (op0) == SIGN_EXTEND
2479 || GET_CODE (op0) == ZERO_EXTEND)
2480 && CONST_INT_P (trueop1)
2481 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2482 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2483 & INTVAL (trueop1)) == 0)
2485 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2486 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2487 gen_int_mode (INTVAL (trueop1),
2488 imode));
2489 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2492 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2493 we might be able to further simplify the AND with X and potentially
2494 remove the truncation altogether. */
2495 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2497 rtx x = XEXP (op0, 0);
2498 enum machine_mode xmode = GET_MODE (x);
2499 tem = simplify_gen_binary (AND, xmode, x,
2500 gen_int_mode (INTVAL (trueop1), xmode));
2501 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2504 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2505 if (GET_CODE (op0) == IOR
2506 && CONST_INT_P (trueop1)
2507 && CONST_INT_P (XEXP (op0, 1)))
2509 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2510 return simplify_gen_binary (IOR, mode,
2511 simplify_gen_binary (AND, mode,
2512 XEXP (op0, 0), op1),
2513 gen_int_mode (tmp, mode));
2516 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2517 insn (and may simplify more). */
2518 if (GET_CODE (op0) == XOR
2519 && rtx_equal_p (XEXP (op0, 0), op1)
2520 && ! side_effects_p (op1))
2521 return simplify_gen_binary (AND, mode,
2522 simplify_gen_unary (NOT, mode,
2523 XEXP (op0, 1), mode),
2524 op1);
2526 if (GET_CODE (op0) == XOR
2527 && rtx_equal_p (XEXP (op0, 1), op1)
2528 && ! side_effects_p (op1))
2529 return simplify_gen_binary (AND, mode,
2530 simplify_gen_unary (NOT, mode,
2531 XEXP (op0, 0), mode),
2532 op1);
2534 /* Similarly for (~(A ^ B)) & A. */
2535 if (GET_CODE (op0) == NOT
2536 && GET_CODE (XEXP (op0, 0)) == XOR
2537 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2538 && ! side_effects_p (op1))
2539 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2541 if (GET_CODE (op0) == NOT
2542 && GET_CODE (XEXP (op0, 0)) == XOR
2543 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2544 && ! side_effects_p (op1))
2545 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2547 /* Convert (A | B) & A to A. */
2548 if (GET_CODE (op0) == IOR
2549 && (rtx_equal_p (XEXP (op0, 0), op1)
2550 || rtx_equal_p (XEXP (op0, 1), op1))
2551 && ! side_effects_p (XEXP (op0, 0))
2552 && ! side_effects_p (XEXP (op0, 1)))
2553 return op1;
2555 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2556 ((A & N) + B) & M -> (A + B) & M
2557 Similarly if (N & M) == 0,
2558 ((A | N) + B) & M -> (A + B) & M
2559 and for - instead of + and/or ^ instead of |.
2560 Also, if (N & M) == 0, then
2561 (A +- N) & M -> A & M. */
2562 if (CONST_INT_P (trueop1)
2563 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2564 && ~INTVAL (trueop1)
2565 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2566 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2568 rtx pmop[2];
2569 int which;
2571 pmop[0] = XEXP (op0, 0);
2572 pmop[1] = XEXP (op0, 1);
2574 if (CONST_INT_P (pmop[1])
2575 && (INTVAL (pmop[1]) & INTVAL (trueop1)) == 0)
2576 return simplify_gen_binary (AND, mode, pmop[0], op1);
2578 for (which = 0; which < 2; which++)
2580 tem = pmop[which];
2581 switch (GET_CODE (tem))
2583 case AND:
2584 if (CONST_INT_P (XEXP (tem, 1))
2585 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2586 == INTVAL (trueop1))
2587 pmop[which] = XEXP (tem, 0);
2588 break;
2589 case IOR:
2590 case XOR:
2591 if (CONST_INT_P (XEXP (tem, 1))
2592 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2593 pmop[which] = XEXP (tem, 0);
2594 break;
2595 default:
2596 break;
2600 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2602 tem = simplify_gen_binary (GET_CODE (op0), mode,
2603 pmop[0], pmop[1]);
2604 return simplify_gen_binary (code, mode, tem, op1);
2608 /* (and X (ior (not X) Y) -> (and X Y) */
2609 if (GET_CODE (op1) == IOR
2610 && GET_CODE (XEXP (op1, 0)) == NOT
2611 && op0 == XEXP (XEXP (op1, 0), 0))
2612 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2614 /* (and (ior (not X) Y) X) -> (and X Y) */
2615 if (GET_CODE (op0) == IOR
2616 && GET_CODE (XEXP (op0, 0)) == NOT
2617 && op1 == XEXP (XEXP (op0, 0), 0))
2618 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2620 tem = simplify_associative_operation (code, mode, op0, op1);
2621 if (tem)
2622 return tem;
2623 break;
2625 case UDIV:
2626 /* 0/x is 0 (or x&0 if x has side-effects). */
2627 if (trueop0 == CONST0_RTX (mode))
2629 if (side_effects_p (op1))
2630 return simplify_gen_binary (AND, mode, op1, trueop0);
2631 return trueop0;
2633 /* x/1 is x. */
2634 if (trueop1 == CONST1_RTX (mode))
2635 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2636 /* Convert divide by power of two into shift. */
2637 if (CONST_INT_P (trueop1)
2638 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2639 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2640 break;
2642 case DIV:
2643 /* Handle floating point and integers separately. */
2644 if (SCALAR_FLOAT_MODE_P (mode))
2646 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2647 safe for modes with NaNs, since 0.0 / 0.0 will then be
2648 NaN rather than 0.0. Nor is it safe for modes with signed
2649 zeros, since dividing 0 by a negative number gives -0.0 */
2650 if (trueop0 == CONST0_RTX (mode)
2651 && !HONOR_NANS (mode)
2652 && !HONOR_SIGNED_ZEROS (mode)
2653 && ! side_effects_p (op1))
2654 return op0;
2655 /* x/1.0 is x. */
2656 if (trueop1 == CONST1_RTX (mode)
2657 && !HONOR_SNANS (mode))
2658 return op0;
2660 if (GET_CODE (trueop1) == CONST_DOUBLE
2661 && trueop1 != CONST0_RTX (mode))
2663 REAL_VALUE_TYPE d;
2664 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2666 /* x/-1.0 is -x. */
2667 if (REAL_VALUES_EQUAL (d, dconstm1)
2668 && !HONOR_SNANS (mode))
2669 return simplify_gen_unary (NEG, mode, op0, mode);
2671 /* Change FP division by a constant into multiplication.
2672 Only do this with -freciprocal-math. */
2673 if (flag_reciprocal_math
2674 && !REAL_VALUES_EQUAL (d, dconst0))
2676 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2677 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2678 return simplify_gen_binary (MULT, mode, op0, tem);
2682 else
2684 /* 0/x is 0 (or x&0 if x has side-effects). */
2685 if (trueop0 == CONST0_RTX (mode))
2687 if (side_effects_p (op1))
2688 return simplify_gen_binary (AND, mode, op1, trueop0);
2689 return trueop0;
2691 /* x/1 is x. */
2692 if (trueop1 == CONST1_RTX (mode))
2693 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2694 /* x/-1 is -x. */
2695 if (trueop1 == constm1_rtx)
2697 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2698 return simplify_gen_unary (NEG, mode, x, mode);
2701 break;
2703 case UMOD:
2704 /* 0%x is 0 (or x&0 if x has side-effects). */
2705 if (trueop0 == CONST0_RTX (mode))
2707 if (side_effects_p (op1))
2708 return simplify_gen_binary (AND, mode, op1, trueop0);
2709 return trueop0;
2711 /* x%1 is 0 (of x&0 if x has side-effects). */
2712 if (trueop1 == CONST1_RTX (mode))
2714 if (side_effects_p (op0))
2715 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2716 return CONST0_RTX (mode);
2718 /* Implement modulus by power of two as AND. */
2719 if (CONST_INT_P (trueop1)
2720 && exact_log2 (INTVAL (trueop1)) > 0)
2721 return simplify_gen_binary (AND, mode, op0,
2722 GEN_INT (INTVAL (op1) - 1));
2723 break;
2725 case MOD:
2726 /* 0%x is 0 (or x&0 if x has side-effects). */
2727 if (trueop0 == CONST0_RTX (mode))
2729 if (side_effects_p (op1))
2730 return simplify_gen_binary (AND, mode, op1, trueop0);
2731 return trueop0;
2733 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2734 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2736 if (side_effects_p (op0))
2737 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2738 return CONST0_RTX (mode);
2740 break;
2742 case ROTATERT:
2743 case ROTATE:
2744 case ASHIFTRT:
2745 if (trueop1 == CONST0_RTX (mode))
2746 return op0;
2747 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2748 return op0;
2749 /* Rotating ~0 always results in ~0. */
2750 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
2751 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2752 && ! side_effects_p (op1))
2753 return op0;
2754 canonicalize_shift:
2755 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
2757 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
2758 if (val != INTVAL (op1))
2759 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
2761 break;
2763 case ASHIFT:
2764 case SS_ASHIFT:
2765 case US_ASHIFT:
2766 if (trueop1 == CONST0_RTX (mode))
2767 return op0;
2768 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2769 return op0;
2770 goto canonicalize_shift;
2772 case LSHIFTRT:
2773 if (trueop1 == CONST0_RTX (mode))
2774 return op0;
2775 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2776 return op0;
2777 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2778 if (GET_CODE (op0) == CLZ
2779 && CONST_INT_P (trueop1)
2780 && STORE_FLAG_VALUE == 1
2781 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2783 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2784 unsigned HOST_WIDE_INT zero_val = 0;
2786 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2787 && zero_val == GET_MODE_BITSIZE (imode)
2788 && INTVAL (trueop1) == exact_log2 (zero_val))
2789 return simplify_gen_relational (EQ, mode, imode,
2790 XEXP (op0, 0), const0_rtx);
2792 goto canonicalize_shift;
2794 case SMIN:
2795 if (width <= HOST_BITS_PER_WIDE_INT
2796 && CONST_INT_P (trueop1)
2797 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2798 && ! side_effects_p (op0))
2799 return op1;
2800 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2801 return op0;
2802 tem = simplify_associative_operation (code, mode, op0, op1);
2803 if (tem)
2804 return tem;
2805 break;
2807 case SMAX:
2808 if (width <= HOST_BITS_PER_WIDE_INT
2809 && CONST_INT_P (trueop1)
2810 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2811 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2812 && ! side_effects_p (op0))
2813 return op1;
2814 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2815 return op0;
2816 tem = simplify_associative_operation (code, mode, op0, op1);
2817 if (tem)
2818 return tem;
2819 break;
2821 case UMIN:
2822 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2823 return op1;
2824 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2825 return op0;
2826 tem = simplify_associative_operation (code, mode, op0, op1);
2827 if (tem)
2828 return tem;
2829 break;
2831 case UMAX:
2832 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2833 return op1;
2834 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2835 return op0;
2836 tem = simplify_associative_operation (code, mode, op0, op1);
2837 if (tem)
2838 return tem;
2839 break;
2841 case SS_PLUS:
2842 case US_PLUS:
2843 case SS_MINUS:
2844 case US_MINUS:
2845 case SS_MULT:
2846 case US_MULT:
2847 case SS_DIV:
2848 case US_DIV:
2849 /* ??? There are simplifications that can be done. */
2850 return 0;
2852 case VEC_SELECT:
2853 if (!VECTOR_MODE_P (mode))
2855 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2856 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2857 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2858 gcc_assert (XVECLEN (trueop1, 0) == 1);
2859 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
2861 if (GET_CODE (trueop0) == CONST_VECTOR)
2862 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2863 (trueop1, 0, 0)));
2865 /* Extract a scalar element from a nested VEC_SELECT expression
2866 (with optional nested VEC_CONCAT expression). Some targets
2867 (i386) extract scalar element from a vector using chain of
2868 nested VEC_SELECT expressions. When input operand is a memory
2869 operand, this operation can be simplified to a simple scalar
2870 load from an offseted memory address. */
2871 if (GET_CODE (trueop0) == VEC_SELECT)
2873 rtx op0 = XEXP (trueop0, 0);
2874 rtx op1 = XEXP (trueop0, 1);
2876 enum machine_mode opmode = GET_MODE (op0);
2877 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
2878 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
2880 int i = INTVAL (XVECEXP (trueop1, 0, 0));
2881 int elem;
2883 rtvec vec;
2884 rtx tmp_op, tmp;
2886 gcc_assert (GET_CODE (op1) == PARALLEL);
2887 gcc_assert (i < n_elts);
2889 /* Select element, pointed by nested selector. */
2890 elem = INTVAL (XVECEXP (op1, 0, i));
2892 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
2893 if (GET_CODE (op0) == VEC_CONCAT)
2895 rtx op00 = XEXP (op0, 0);
2896 rtx op01 = XEXP (op0, 1);
2898 enum machine_mode mode00, mode01;
2899 int n_elts00, n_elts01;
2901 mode00 = GET_MODE (op00);
2902 mode01 = GET_MODE (op01);
2904 /* Find out number of elements of each operand. */
2905 if (VECTOR_MODE_P (mode00))
2907 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
2908 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
2910 else
2911 n_elts00 = 1;
2913 if (VECTOR_MODE_P (mode01))
2915 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
2916 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
2918 else
2919 n_elts01 = 1;
2921 gcc_assert (n_elts == n_elts00 + n_elts01);
2923 /* Select correct operand of VEC_CONCAT
2924 and adjust selector. */
2925 if (elem < n_elts01)
2926 tmp_op = op00;
2927 else
2929 tmp_op = op01;
2930 elem -= n_elts00;
2933 else
2934 tmp_op = op0;
2936 vec = rtvec_alloc (1);
2937 RTVEC_ELT (vec, 0) = GEN_INT (elem);
2939 tmp = gen_rtx_fmt_ee (code, mode,
2940 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
2941 return tmp;
2943 if (GET_CODE (trueop0) == VEC_DUPLICATE
2944 && GET_MODE (XEXP (trueop0, 0)) == mode)
2945 return XEXP (trueop0, 0);
2947 else
2949 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2950 gcc_assert (GET_MODE_INNER (mode)
2951 == GET_MODE_INNER (GET_MODE (trueop0)));
2952 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2954 if (GET_CODE (trueop0) == CONST_VECTOR)
2956 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2957 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2958 rtvec v = rtvec_alloc (n_elts);
2959 unsigned int i;
2961 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2962 for (i = 0; i < n_elts; i++)
2964 rtx x = XVECEXP (trueop1, 0, i);
2966 gcc_assert (CONST_INT_P (x));
2967 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2968 INTVAL (x));
2971 return gen_rtx_CONST_VECTOR (mode, v);
2975 if (XVECLEN (trueop1, 0) == 1
2976 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
2977 && GET_CODE (trueop0) == VEC_CONCAT)
2979 rtx vec = trueop0;
2980 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2982 /* Try to find the element in the VEC_CONCAT. */
2983 while (GET_MODE (vec) != mode
2984 && GET_CODE (vec) == VEC_CONCAT)
2986 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2987 if (offset < vec_size)
2988 vec = XEXP (vec, 0);
2989 else
2991 offset -= vec_size;
2992 vec = XEXP (vec, 1);
2994 vec = avoid_constant_pool_reference (vec);
2997 if (GET_MODE (vec) == mode)
2998 return vec;
3001 return 0;
3002 case VEC_CONCAT:
3004 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3005 ? GET_MODE (trueop0)
3006 : GET_MODE_INNER (mode));
3007 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3008 ? GET_MODE (trueop1)
3009 : GET_MODE_INNER (mode));
3011 gcc_assert (VECTOR_MODE_P (mode));
3012 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3013 == GET_MODE_SIZE (mode));
3015 if (VECTOR_MODE_P (op0_mode))
3016 gcc_assert (GET_MODE_INNER (mode)
3017 == GET_MODE_INNER (op0_mode));
3018 else
3019 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3021 if (VECTOR_MODE_P (op1_mode))
3022 gcc_assert (GET_MODE_INNER (mode)
3023 == GET_MODE_INNER (op1_mode));
3024 else
3025 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3027 if ((GET_CODE (trueop0) == CONST_VECTOR
3028 || CONST_INT_P (trueop0)
3029 || GET_CODE (trueop0) == CONST_DOUBLE)
3030 && (GET_CODE (trueop1) == CONST_VECTOR
3031 || CONST_INT_P (trueop1)
3032 || GET_CODE (trueop1) == CONST_DOUBLE))
3034 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3035 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3036 rtvec v = rtvec_alloc (n_elts);
3037 unsigned int i;
3038 unsigned in_n_elts = 1;
3040 if (VECTOR_MODE_P (op0_mode))
3041 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3042 for (i = 0; i < n_elts; i++)
3044 if (i < in_n_elts)
3046 if (!VECTOR_MODE_P (op0_mode))
3047 RTVEC_ELT (v, i) = trueop0;
3048 else
3049 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3051 else
3053 if (!VECTOR_MODE_P (op1_mode))
3054 RTVEC_ELT (v, i) = trueop1;
3055 else
3056 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3057 i - in_n_elts);
3061 return gen_rtx_CONST_VECTOR (mode, v);
3064 return 0;
3066 default:
3067 gcc_unreachable ();
3070 return 0;
3074 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3075 rtx op0, rtx op1)
3077 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3078 HOST_WIDE_INT val;
3079 unsigned int width = GET_MODE_BITSIZE (mode);
3081 if (VECTOR_MODE_P (mode)
3082 && code != VEC_CONCAT
3083 && GET_CODE (op0) == CONST_VECTOR
3084 && GET_CODE (op1) == CONST_VECTOR)
3086 unsigned n_elts = GET_MODE_NUNITS (mode);
3087 enum machine_mode op0mode = GET_MODE (op0);
3088 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3089 enum machine_mode op1mode = GET_MODE (op1);
3090 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3091 rtvec v = rtvec_alloc (n_elts);
3092 unsigned int i;
3094 gcc_assert (op0_n_elts == n_elts);
3095 gcc_assert (op1_n_elts == n_elts);
3096 for (i = 0; i < n_elts; i++)
3098 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3099 CONST_VECTOR_ELT (op0, i),
3100 CONST_VECTOR_ELT (op1, i));
3101 if (!x)
3102 return 0;
3103 RTVEC_ELT (v, i) = x;
3106 return gen_rtx_CONST_VECTOR (mode, v);
3109 if (VECTOR_MODE_P (mode)
3110 && code == VEC_CONCAT
3111 && (CONST_INT_P (op0)
3112 || GET_CODE (op0) == CONST_DOUBLE
3113 || GET_CODE (op0) == CONST_FIXED)
3114 && (CONST_INT_P (op1)
3115 || GET_CODE (op1) == CONST_DOUBLE
3116 || GET_CODE (op1) == CONST_FIXED))
3118 unsigned n_elts = GET_MODE_NUNITS (mode);
3119 rtvec v = rtvec_alloc (n_elts);
3121 gcc_assert (n_elts >= 2);
3122 if (n_elts == 2)
3124 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3125 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3127 RTVEC_ELT (v, 0) = op0;
3128 RTVEC_ELT (v, 1) = op1;
3130 else
3132 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3133 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3134 unsigned i;
3136 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3137 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3138 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3140 for (i = 0; i < op0_n_elts; ++i)
3141 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3142 for (i = 0; i < op1_n_elts; ++i)
3143 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3146 return gen_rtx_CONST_VECTOR (mode, v);
3149 if (SCALAR_FLOAT_MODE_P (mode)
3150 && GET_CODE (op0) == CONST_DOUBLE
3151 && GET_CODE (op1) == CONST_DOUBLE
3152 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3154 if (code == AND
3155 || code == IOR
3156 || code == XOR)
3158 long tmp0[4];
3159 long tmp1[4];
3160 REAL_VALUE_TYPE r;
3161 int i;
3163 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3164 GET_MODE (op0));
3165 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3166 GET_MODE (op1));
3167 for (i = 0; i < 4; i++)
3169 switch (code)
3171 case AND:
3172 tmp0[i] &= tmp1[i];
3173 break;
3174 case IOR:
3175 tmp0[i] |= tmp1[i];
3176 break;
3177 case XOR:
3178 tmp0[i] ^= tmp1[i];
3179 break;
3180 default:
3181 gcc_unreachable ();
3184 real_from_target (&r, tmp0, mode);
3185 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3187 else
3189 REAL_VALUE_TYPE f0, f1, value, result;
3190 bool inexact;
3192 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3193 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3194 real_convert (&f0, mode, &f0);
3195 real_convert (&f1, mode, &f1);
3197 if (HONOR_SNANS (mode)
3198 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3199 return 0;
3201 if (code == DIV
3202 && REAL_VALUES_EQUAL (f1, dconst0)
3203 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3204 return 0;
3206 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3207 && flag_trapping_math
3208 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3210 int s0 = REAL_VALUE_NEGATIVE (f0);
3211 int s1 = REAL_VALUE_NEGATIVE (f1);
3213 switch (code)
3215 case PLUS:
3216 /* Inf + -Inf = NaN plus exception. */
3217 if (s0 != s1)
3218 return 0;
3219 break;
3220 case MINUS:
3221 /* Inf - Inf = NaN plus exception. */
3222 if (s0 == s1)
3223 return 0;
3224 break;
3225 case DIV:
3226 /* Inf / Inf = NaN plus exception. */
3227 return 0;
3228 default:
3229 break;
3233 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3234 && flag_trapping_math
3235 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3236 || (REAL_VALUE_ISINF (f1)
3237 && REAL_VALUES_EQUAL (f0, dconst0))))
3238 /* Inf * 0 = NaN plus exception. */
3239 return 0;
3241 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3242 &f0, &f1);
3243 real_convert (&result, mode, &value);
3245 /* Don't constant fold this floating point operation if
3246 the result has overflowed and flag_trapping_math. */
3248 if (flag_trapping_math
3249 && MODE_HAS_INFINITIES (mode)
3250 && REAL_VALUE_ISINF (result)
3251 && !REAL_VALUE_ISINF (f0)
3252 && !REAL_VALUE_ISINF (f1))
3253 /* Overflow plus exception. */
3254 return 0;
3256 /* Don't constant fold this floating point operation if the
3257 result may dependent upon the run-time rounding mode and
3258 flag_rounding_math is set, or if GCC's software emulation
3259 is unable to accurately represent the result. */
3261 if ((flag_rounding_math
3262 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3263 && (inexact || !real_identical (&result, &value)))
3264 return NULL_RTX;
3266 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3270 /* We can fold some multi-word operations. */
3271 if (GET_MODE_CLASS (mode) == MODE_INT
3272 && width == HOST_BITS_PER_WIDE_INT * 2
3273 && (GET_CODE (op0) == CONST_DOUBLE || CONST_INT_P (op0))
3274 && (GET_CODE (op1) == CONST_DOUBLE || CONST_INT_P (op1)))
3276 unsigned HOST_WIDE_INT l1, l2, lv, lt;
3277 HOST_WIDE_INT h1, h2, hv, ht;
3279 if (GET_CODE (op0) == CONST_DOUBLE)
3280 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
3281 else
3282 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
3284 if (GET_CODE (op1) == CONST_DOUBLE)
3285 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
3286 else
3287 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
3289 switch (code)
3291 case MINUS:
3292 /* A - B == A + (-B). */
3293 neg_double (l2, h2, &lv, &hv);
3294 l2 = lv, h2 = hv;
3296 /* Fall through.... */
3298 case PLUS:
3299 add_double (l1, h1, l2, h2, &lv, &hv);
3300 break;
3302 case MULT:
3303 mul_double (l1, h1, l2, h2, &lv, &hv);
3304 break;
3306 case DIV:
3307 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3308 &lv, &hv, &lt, &ht))
3309 return 0;
3310 break;
3312 case MOD:
3313 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3314 &lt, &ht, &lv, &hv))
3315 return 0;
3316 break;
3318 case UDIV:
3319 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3320 &lv, &hv, &lt, &ht))
3321 return 0;
3322 break;
3324 case UMOD:
3325 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3326 &lt, &ht, &lv, &hv))
3327 return 0;
3328 break;
3330 case AND:
3331 lv = l1 & l2, hv = h1 & h2;
3332 break;
3334 case IOR:
3335 lv = l1 | l2, hv = h1 | h2;
3336 break;
3338 case XOR:
3339 lv = l1 ^ l2, hv = h1 ^ h2;
3340 break;
3342 case SMIN:
3343 if (h1 < h2
3344 || (h1 == h2
3345 && ((unsigned HOST_WIDE_INT) l1
3346 < (unsigned HOST_WIDE_INT) l2)))
3347 lv = l1, hv = h1;
3348 else
3349 lv = l2, hv = h2;
3350 break;
3352 case SMAX:
3353 if (h1 > h2
3354 || (h1 == h2
3355 && ((unsigned HOST_WIDE_INT) l1
3356 > (unsigned HOST_WIDE_INT) l2)))
3357 lv = l1, hv = h1;
3358 else
3359 lv = l2, hv = h2;
3360 break;
3362 case UMIN:
3363 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
3364 || (h1 == h2
3365 && ((unsigned HOST_WIDE_INT) l1
3366 < (unsigned HOST_WIDE_INT) l2)))
3367 lv = l1, hv = h1;
3368 else
3369 lv = l2, hv = h2;
3370 break;
3372 case UMAX:
3373 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
3374 || (h1 == h2
3375 && ((unsigned HOST_WIDE_INT) l1
3376 > (unsigned HOST_WIDE_INT) l2)))
3377 lv = l1, hv = h1;
3378 else
3379 lv = l2, hv = h2;
3380 break;
3382 case LSHIFTRT: case ASHIFTRT:
3383 case ASHIFT:
3384 case ROTATE: case ROTATERT:
3385 if (SHIFT_COUNT_TRUNCATED)
3386 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
3388 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
3389 return 0;
3391 if (code == LSHIFTRT || code == ASHIFTRT)
3392 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
3393 code == ASHIFTRT);
3394 else if (code == ASHIFT)
3395 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
3396 else if (code == ROTATE)
3397 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3398 else /* code == ROTATERT */
3399 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3400 break;
3402 default:
3403 return 0;
3406 return immed_double_const (lv, hv, mode);
3409 if (CONST_INT_P (op0) && CONST_INT_P (op1)
3410 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3412 /* Get the integer argument values in two forms:
3413 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3415 arg0 = INTVAL (op0);
3416 arg1 = INTVAL (op1);
3418 if (width < HOST_BITS_PER_WIDE_INT)
3420 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
3421 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
3423 arg0s = arg0;
3424 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3425 arg0s |= ((HOST_WIDE_INT) (-1) << width);
3427 arg1s = arg1;
3428 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3429 arg1s |= ((HOST_WIDE_INT) (-1) << width);
3431 else
3433 arg0s = arg0;
3434 arg1s = arg1;
3437 /* Compute the value of the arithmetic. */
3439 switch (code)
3441 case PLUS:
3442 val = arg0s + arg1s;
3443 break;
3445 case MINUS:
3446 val = arg0s - arg1s;
3447 break;
3449 case MULT:
3450 val = arg0s * arg1s;
3451 break;
3453 case DIV:
3454 if (arg1s == 0
3455 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3456 && arg1s == -1))
3457 return 0;
3458 val = arg0s / arg1s;
3459 break;
3461 case MOD:
3462 if (arg1s == 0
3463 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3464 && arg1s == -1))
3465 return 0;
3466 val = arg0s % arg1s;
3467 break;
3469 case UDIV:
3470 if (arg1 == 0
3471 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3472 && arg1s == -1))
3473 return 0;
3474 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3475 break;
3477 case UMOD:
3478 if (arg1 == 0
3479 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3480 && arg1s == -1))
3481 return 0;
3482 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3483 break;
3485 case AND:
3486 val = arg0 & arg1;
3487 break;
3489 case IOR:
3490 val = arg0 | arg1;
3491 break;
3493 case XOR:
3494 val = arg0 ^ arg1;
3495 break;
3497 case LSHIFTRT:
3498 case ASHIFT:
3499 case ASHIFTRT:
3500 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3501 the value is in range. We can't return any old value for
3502 out-of-range arguments because either the middle-end (via
3503 shift_truncation_mask) or the back-end might be relying on
3504 target-specific knowledge. Nor can we rely on
3505 shift_truncation_mask, since the shift might not be part of an
3506 ashlM3, lshrM3 or ashrM3 instruction. */
3507 if (SHIFT_COUNT_TRUNCATED)
3508 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3509 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3510 return 0;
3512 val = (code == ASHIFT
3513 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3514 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3516 /* Sign-extend the result for arithmetic right shifts. */
3517 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3518 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3519 break;
3521 case ROTATERT:
3522 if (arg1 < 0)
3523 return 0;
3525 arg1 %= width;
3526 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3527 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3528 break;
3530 case ROTATE:
3531 if (arg1 < 0)
3532 return 0;
3534 arg1 %= width;
3535 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3536 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3537 break;
3539 case COMPARE:
3540 /* Do nothing here. */
3541 return 0;
3543 case SMIN:
3544 val = arg0s <= arg1s ? arg0s : arg1s;
3545 break;
3547 case UMIN:
3548 val = ((unsigned HOST_WIDE_INT) arg0
3549 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3550 break;
3552 case SMAX:
3553 val = arg0s > arg1s ? arg0s : arg1s;
3554 break;
3556 case UMAX:
3557 val = ((unsigned HOST_WIDE_INT) arg0
3558 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3559 break;
3561 case SS_PLUS:
3562 case US_PLUS:
3563 case SS_MINUS:
3564 case US_MINUS:
3565 case SS_MULT:
3566 case US_MULT:
3567 case SS_DIV:
3568 case US_DIV:
3569 case SS_ASHIFT:
3570 case US_ASHIFT:
3571 /* ??? There are simplifications that can be done. */
3572 return 0;
3574 default:
3575 gcc_unreachable ();
3578 return gen_int_mode (val, mode);
3581 return NULL_RTX;
3586 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3587 PLUS or MINUS.
3589 Rather than test for specific case, we do this by a brute-force method
3590 and do all possible simplifications until no more changes occur. Then
3591 we rebuild the operation. */
3593 struct simplify_plus_minus_op_data
3595 rtx op;
3596 short neg;
3599 static bool
3600 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3602 int result;
3604 result = (commutative_operand_precedence (y)
3605 - commutative_operand_precedence (x));
3606 if (result)
3607 return result > 0;
3609 /* Group together equal REGs to do more simplification. */
3610 if (REG_P (x) && REG_P (y))
3611 return REGNO (x) > REGNO (y);
3612 else
3613 return false;
3616 static rtx
3617 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3618 rtx op1)
3620 struct simplify_plus_minus_op_data ops[8];
3621 rtx result, tem;
3622 int n_ops = 2, input_ops = 2;
3623 int changed, n_constants = 0, canonicalized = 0;
3624 int i, j;
3626 memset (ops, 0, sizeof ops);
3628 /* Set up the two operands and then expand them until nothing has been
3629 changed. If we run out of room in our array, give up; this should
3630 almost never happen. */
3632 ops[0].op = op0;
3633 ops[0].neg = 0;
3634 ops[1].op = op1;
3635 ops[1].neg = (code == MINUS);
3639 changed = 0;
3641 for (i = 0; i < n_ops; i++)
3643 rtx this_op = ops[i].op;
3644 int this_neg = ops[i].neg;
3645 enum rtx_code this_code = GET_CODE (this_op);
3647 switch (this_code)
3649 case PLUS:
3650 case MINUS:
3651 if (n_ops == 7)
3652 return NULL_RTX;
3654 ops[n_ops].op = XEXP (this_op, 1);
3655 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3656 n_ops++;
3658 ops[i].op = XEXP (this_op, 0);
3659 input_ops++;
3660 changed = 1;
3661 canonicalized |= this_neg;
3662 break;
3664 case NEG:
3665 ops[i].op = XEXP (this_op, 0);
3666 ops[i].neg = ! this_neg;
3667 changed = 1;
3668 canonicalized = 1;
3669 break;
3671 case CONST:
3672 if (n_ops < 7
3673 && GET_CODE (XEXP (this_op, 0)) == PLUS
3674 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3675 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3677 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3678 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3679 ops[n_ops].neg = this_neg;
3680 n_ops++;
3681 changed = 1;
3682 canonicalized = 1;
3684 break;
3686 case NOT:
3687 /* ~a -> (-a - 1) */
3688 if (n_ops != 7)
3690 ops[n_ops].op = constm1_rtx;
3691 ops[n_ops++].neg = this_neg;
3692 ops[i].op = XEXP (this_op, 0);
3693 ops[i].neg = !this_neg;
3694 changed = 1;
3695 canonicalized = 1;
3697 break;
3699 case CONST_INT:
3700 n_constants++;
3701 if (this_neg)
3703 ops[i].op = neg_const_int (mode, this_op);
3704 ops[i].neg = 0;
3705 changed = 1;
3706 canonicalized = 1;
3708 break;
3710 default:
3711 break;
3715 while (changed);
3717 if (n_constants > 1)
3718 canonicalized = 1;
3720 gcc_assert (n_ops >= 2);
3722 /* If we only have two operands, we can avoid the loops. */
3723 if (n_ops == 2)
3725 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3726 rtx lhs, rhs;
3728 /* Get the two operands. Be careful with the order, especially for
3729 the cases where code == MINUS. */
3730 if (ops[0].neg && ops[1].neg)
3732 lhs = gen_rtx_NEG (mode, ops[0].op);
3733 rhs = ops[1].op;
3735 else if (ops[0].neg)
3737 lhs = ops[1].op;
3738 rhs = ops[0].op;
3740 else
3742 lhs = ops[0].op;
3743 rhs = ops[1].op;
3746 return simplify_const_binary_operation (code, mode, lhs, rhs);
3749 /* Now simplify each pair of operands until nothing changes. */
3752 /* Insertion sort is good enough for an eight-element array. */
3753 for (i = 1; i < n_ops; i++)
3755 struct simplify_plus_minus_op_data save;
3756 j = i - 1;
3757 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
3758 continue;
3760 canonicalized = 1;
3761 save = ops[i];
3763 ops[j + 1] = ops[j];
3764 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
3765 ops[j + 1] = save;
3768 changed = 0;
3769 for (i = n_ops - 1; i > 0; i--)
3770 for (j = i - 1; j >= 0; j--)
3772 rtx lhs = ops[j].op, rhs = ops[i].op;
3773 int lneg = ops[j].neg, rneg = ops[i].neg;
3775 if (lhs != 0 && rhs != 0)
3777 enum rtx_code ncode = PLUS;
3779 if (lneg != rneg)
3781 ncode = MINUS;
3782 if (lneg)
3783 tem = lhs, lhs = rhs, rhs = tem;
3785 else if (swap_commutative_operands_p (lhs, rhs))
3786 tem = lhs, lhs = rhs, rhs = tem;
3788 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
3789 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
3791 rtx tem_lhs, tem_rhs;
3793 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3794 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3795 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3797 if (tem && !CONSTANT_P (tem))
3798 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3800 else
3801 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3803 /* Reject "simplifications" that just wrap the two
3804 arguments in a CONST. Failure to do so can result
3805 in infinite recursion with simplify_binary_operation
3806 when it calls us to simplify CONST operations. */
3807 if (tem
3808 && ! (GET_CODE (tem) == CONST
3809 && GET_CODE (XEXP (tem, 0)) == ncode
3810 && XEXP (XEXP (tem, 0), 0) == lhs
3811 && XEXP (XEXP (tem, 0), 1) == rhs))
3813 lneg &= rneg;
3814 if (GET_CODE (tem) == NEG)
3815 tem = XEXP (tem, 0), lneg = !lneg;
3816 if (CONST_INT_P (tem) && lneg)
3817 tem = neg_const_int (mode, tem), lneg = 0;
3819 ops[i].op = tem;
3820 ops[i].neg = lneg;
3821 ops[j].op = NULL_RTX;
3822 changed = 1;
3823 canonicalized = 1;
3828 /* If nothing changed, fail. */
3829 if (!canonicalized)
3830 return NULL_RTX;
3832 /* Pack all the operands to the lower-numbered entries. */
3833 for (i = 0, j = 0; j < n_ops; j++)
3834 if (ops[j].op)
3836 ops[i] = ops[j];
3837 i++;
3839 n_ops = i;
3841 while (changed);
3843 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3844 if (n_ops == 2
3845 && CONST_INT_P (ops[1].op)
3846 && CONSTANT_P (ops[0].op)
3847 && ops[0].neg)
3848 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3850 /* We suppressed creation of trivial CONST expressions in the
3851 combination loop to avoid recursion. Create one manually now.
3852 The combination loop should have ensured that there is exactly
3853 one CONST_INT, and the sort will have ensured that it is last
3854 in the array and that any other constant will be next-to-last. */
3856 if (n_ops > 1
3857 && CONST_INT_P (ops[n_ops - 1].op)
3858 && CONSTANT_P (ops[n_ops - 2].op))
3860 rtx value = ops[n_ops - 1].op;
3861 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3862 value = neg_const_int (mode, value);
3863 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3864 n_ops--;
3867 /* Put a non-negated operand first, if possible. */
3869 for (i = 0; i < n_ops && ops[i].neg; i++)
3870 continue;
3871 if (i == n_ops)
3872 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3873 else if (i != 0)
3875 tem = ops[0].op;
3876 ops[0] = ops[i];
3877 ops[i].op = tem;
3878 ops[i].neg = 1;
3881 /* Now make the result by performing the requested operations. */
3882 result = ops[0].op;
3883 for (i = 1; i < n_ops; i++)
3884 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3885 mode, result, ops[i].op);
3887 return result;
3890 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3891 static bool
3892 plus_minus_operand_p (const_rtx x)
3894 return GET_CODE (x) == PLUS
3895 || GET_CODE (x) == MINUS
3896 || (GET_CODE (x) == CONST
3897 && GET_CODE (XEXP (x, 0)) == PLUS
3898 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3899 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3902 /* Like simplify_binary_operation except used for relational operators.
3903 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3904 not also be VOIDmode.
3906 CMP_MODE specifies in which mode the comparison is done in, so it is
3907 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3908 the operands or, if both are VOIDmode, the operands are compared in
3909 "infinite precision". */
3911 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3912 enum machine_mode cmp_mode, rtx op0, rtx op1)
3914 rtx tem, trueop0, trueop1;
3916 if (cmp_mode == VOIDmode)
3917 cmp_mode = GET_MODE (op0);
3918 if (cmp_mode == VOIDmode)
3919 cmp_mode = GET_MODE (op1);
3921 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3922 if (tem)
3924 if (SCALAR_FLOAT_MODE_P (mode))
3926 if (tem == const0_rtx)
3927 return CONST0_RTX (mode);
3928 #ifdef FLOAT_STORE_FLAG_VALUE
3930 REAL_VALUE_TYPE val;
3931 val = FLOAT_STORE_FLAG_VALUE (mode);
3932 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3934 #else
3935 return NULL_RTX;
3936 #endif
3938 if (VECTOR_MODE_P (mode))
3940 if (tem == const0_rtx)
3941 return CONST0_RTX (mode);
3942 #ifdef VECTOR_STORE_FLAG_VALUE
3944 int i, units;
3945 rtvec v;
3947 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3948 if (val == NULL_RTX)
3949 return NULL_RTX;
3950 if (val == const1_rtx)
3951 return CONST1_RTX (mode);
3953 units = GET_MODE_NUNITS (mode);
3954 v = rtvec_alloc (units);
3955 for (i = 0; i < units; i++)
3956 RTVEC_ELT (v, i) = val;
3957 return gen_rtx_raw_CONST_VECTOR (mode, v);
3959 #else
3960 return NULL_RTX;
3961 #endif
3964 return tem;
3967 /* For the following tests, ensure const0_rtx is op1. */
3968 if (swap_commutative_operands_p (op0, op1)
3969 || (op0 == const0_rtx && op1 != const0_rtx))
3970 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3972 /* If op0 is a compare, extract the comparison arguments from it. */
3973 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3974 return simplify_gen_relational (code, mode, VOIDmode,
3975 XEXP (op0, 0), XEXP (op0, 1));
3977 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
3978 || CC0_P (op0))
3979 return NULL_RTX;
3981 trueop0 = avoid_constant_pool_reference (op0);
3982 trueop1 = avoid_constant_pool_reference (op1);
3983 return simplify_relational_operation_1 (code, mode, cmp_mode,
3984 trueop0, trueop1);
3987 /* This part of simplify_relational_operation is only used when CMP_MODE
3988 is not in class MODE_CC (i.e. it is a real comparison).
3990 MODE is the mode of the result, while CMP_MODE specifies in which
3991 mode the comparison is done in, so it is the mode of the operands. */
3993 static rtx
3994 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
3995 enum machine_mode cmp_mode, rtx op0, rtx op1)
3997 enum rtx_code op0code = GET_CODE (op0);
3999 if (op1 == const0_rtx && COMPARISON_P (op0))
4001 /* If op0 is a comparison, extract the comparison arguments
4002 from it. */
4003 if (code == NE)
4005 if (GET_MODE (op0) == mode)
4006 return simplify_rtx (op0);
4007 else
4008 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4009 XEXP (op0, 0), XEXP (op0, 1));
4011 else if (code == EQ)
4013 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4014 if (new_code != UNKNOWN)
4015 return simplify_gen_relational (new_code, mode, VOIDmode,
4016 XEXP (op0, 0), XEXP (op0, 1));
4020 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4021 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4022 if ((code == LTU || code == GEU)
4023 && GET_CODE (op0) == PLUS
4024 && CONST_INT_P (XEXP (op0, 1))
4025 && (rtx_equal_p (op1, XEXP (op0, 0))
4026 || rtx_equal_p (op1, XEXP (op0, 1))))
4028 rtx new_cmp
4029 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4030 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4031 cmp_mode, XEXP (op0, 0), new_cmp);
4034 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4035 if ((code == LTU || code == GEU)
4036 && GET_CODE (op0) == PLUS
4037 && rtx_equal_p (op1, XEXP (op0, 1))
4038 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4039 && !rtx_equal_p (op1, XEXP (op0, 0)))
4040 return simplify_gen_relational (code, mode, cmp_mode, op0,
4041 copy_rtx (XEXP (op0, 0)));
4043 if (op1 == const0_rtx)
4045 /* Canonicalize (GTU x 0) as (NE x 0). */
4046 if (code == GTU)
4047 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4048 /* Canonicalize (LEU x 0) as (EQ x 0). */
4049 if (code == LEU)
4050 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4052 else if (op1 == const1_rtx)
4054 switch (code)
4056 case GE:
4057 /* Canonicalize (GE x 1) as (GT x 0). */
4058 return simplify_gen_relational (GT, mode, cmp_mode,
4059 op0, const0_rtx);
4060 case GEU:
4061 /* Canonicalize (GEU x 1) as (NE x 0). */
4062 return simplify_gen_relational (NE, mode, cmp_mode,
4063 op0, const0_rtx);
4064 case LT:
4065 /* Canonicalize (LT x 1) as (LE x 0). */
4066 return simplify_gen_relational (LE, mode, cmp_mode,
4067 op0, const0_rtx);
4068 case LTU:
4069 /* Canonicalize (LTU x 1) as (EQ x 0). */
4070 return simplify_gen_relational (EQ, mode, cmp_mode,
4071 op0, const0_rtx);
4072 default:
4073 break;
4076 else if (op1 == constm1_rtx)
4078 /* Canonicalize (LE x -1) as (LT x 0). */
4079 if (code == LE)
4080 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4081 /* Canonicalize (GT x -1) as (GE x 0). */
4082 if (code == GT)
4083 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4086 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4087 if ((code == EQ || code == NE)
4088 && (op0code == PLUS || op0code == MINUS)
4089 && CONSTANT_P (op1)
4090 && CONSTANT_P (XEXP (op0, 1))
4091 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4093 rtx x = XEXP (op0, 0);
4094 rtx c = XEXP (op0, 1);
4096 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
4097 cmp_mode, op1, c);
4098 return simplify_gen_relational (code, mode, cmp_mode, x, c);
4101 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4102 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4103 if (code == NE
4104 && op1 == const0_rtx
4105 && GET_MODE_CLASS (mode) == MODE_INT
4106 && cmp_mode != VOIDmode
4107 /* ??? Work-around BImode bugs in the ia64 backend. */
4108 && mode != BImode
4109 && cmp_mode != BImode
4110 && nonzero_bits (op0, cmp_mode) == 1
4111 && STORE_FLAG_VALUE == 1)
4112 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4113 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4114 : lowpart_subreg (mode, op0, cmp_mode);
4116 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4117 if ((code == EQ || code == NE)
4118 && op1 == const0_rtx
4119 && op0code == XOR)
4120 return simplify_gen_relational (code, mode, cmp_mode,
4121 XEXP (op0, 0), XEXP (op0, 1));
4123 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4124 if ((code == EQ || code == NE)
4125 && op0code == XOR
4126 && rtx_equal_p (XEXP (op0, 0), op1)
4127 && !side_effects_p (XEXP (op0, 0)))
4128 return simplify_gen_relational (code, mode, cmp_mode,
4129 XEXP (op0, 1), const0_rtx);
4131 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4132 if ((code == EQ || code == NE)
4133 && op0code == XOR
4134 && rtx_equal_p (XEXP (op0, 1), op1)
4135 && !side_effects_p (XEXP (op0, 1)))
4136 return simplify_gen_relational (code, mode, cmp_mode,
4137 XEXP (op0, 0), const0_rtx);
4139 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4140 if ((code == EQ || code == NE)
4141 && op0code == XOR
4142 && (CONST_INT_P (op1)
4143 || GET_CODE (op1) == CONST_DOUBLE)
4144 && (CONST_INT_P (XEXP (op0, 1))
4145 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
4146 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4147 simplify_gen_binary (XOR, cmp_mode,
4148 XEXP (op0, 1), op1));
4150 if (op0code == POPCOUNT && op1 == const0_rtx)
4151 switch (code)
4153 case EQ:
4154 case LE:
4155 case LEU:
4156 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4157 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4158 XEXP (op0, 0), const0_rtx);
4160 case NE:
4161 case GT:
4162 case GTU:
4163 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4164 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4165 XEXP (op0, 0), const0_rtx);
4167 default:
4168 break;
4171 return NULL_RTX;
4174 enum
4176 CMP_EQ = 1,
4177 CMP_LT = 2,
4178 CMP_GT = 4,
4179 CMP_LTU = 8,
4180 CMP_GTU = 16
4184 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4185 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4186 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4187 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4188 For floating-point comparisons, assume that the operands were ordered. */
4190 static rtx
4191 comparison_result (enum rtx_code code, int known_results)
4193 switch (code)
4195 case EQ:
4196 case UNEQ:
4197 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4198 case NE:
4199 case LTGT:
4200 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4202 case LT:
4203 case UNLT:
4204 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4205 case GE:
4206 case UNGE:
4207 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4209 case GT:
4210 case UNGT:
4211 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4212 case LE:
4213 case UNLE:
4214 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4216 case LTU:
4217 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4218 case GEU:
4219 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4221 case GTU:
4222 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4223 case LEU:
4224 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4226 case ORDERED:
4227 return const_true_rtx;
4228 case UNORDERED:
4229 return const0_rtx;
4230 default:
4231 gcc_unreachable ();
4235 /* Check if the given comparison (done in the given MODE) is actually a
4236 tautology or a contradiction.
4237 If no simplification is possible, this function returns zero.
4238 Otherwise, it returns either const_true_rtx or const0_rtx. */
4241 simplify_const_relational_operation (enum rtx_code code,
4242 enum machine_mode mode,
4243 rtx op0, rtx op1)
4245 rtx tem;
4246 rtx trueop0;
4247 rtx trueop1;
4249 gcc_assert (mode != VOIDmode
4250 || (GET_MODE (op0) == VOIDmode
4251 && GET_MODE (op1) == VOIDmode));
4253 /* If op0 is a compare, extract the comparison arguments from it. */
4254 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4256 op1 = XEXP (op0, 1);
4257 op0 = XEXP (op0, 0);
4259 if (GET_MODE (op0) != VOIDmode)
4260 mode = GET_MODE (op0);
4261 else if (GET_MODE (op1) != VOIDmode)
4262 mode = GET_MODE (op1);
4263 else
4264 return 0;
4267 /* We can't simplify MODE_CC values since we don't know what the
4268 actual comparison is. */
4269 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4270 return 0;
4272 /* Make sure the constant is second. */
4273 if (swap_commutative_operands_p (op0, op1))
4275 tem = op0, op0 = op1, op1 = tem;
4276 code = swap_condition (code);
4279 trueop0 = avoid_constant_pool_reference (op0);
4280 trueop1 = avoid_constant_pool_reference (op1);
4282 /* For integer comparisons of A and B maybe we can simplify A - B and can
4283 then simplify a comparison of that with zero. If A and B are both either
4284 a register or a CONST_INT, this can't help; testing for these cases will
4285 prevent infinite recursion here and speed things up.
4287 We can only do this for EQ and NE comparisons as otherwise we may
4288 lose or introduce overflow which we cannot disregard as undefined as
4289 we do not know the signedness of the operation on either the left or
4290 the right hand side of the comparison. */
4292 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4293 && (code == EQ || code == NE)
4294 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4295 && (REG_P (op1) || CONST_INT_P (trueop1)))
4296 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4297 /* We cannot do this if tem is a nonzero address. */
4298 && ! nonzero_address_p (tem))
4299 return simplify_const_relational_operation (signed_condition (code),
4300 mode, tem, const0_rtx);
4302 if (! HONOR_NANS (mode) && code == ORDERED)
4303 return const_true_rtx;
4305 if (! HONOR_NANS (mode) && code == UNORDERED)
4306 return const0_rtx;
4308 /* For modes without NaNs, if the two operands are equal, we know the
4309 result except if they have side-effects. Even with NaNs we know
4310 the result of unordered comparisons and, if signaling NaNs are
4311 irrelevant, also the result of LT/GT/LTGT. */
4312 if ((! HONOR_NANS (GET_MODE (trueop0))
4313 || code == UNEQ || code == UNLE || code == UNGE
4314 || ((code == LT || code == GT || code == LTGT)
4315 && ! HONOR_SNANS (GET_MODE (trueop0))))
4316 && rtx_equal_p (trueop0, trueop1)
4317 && ! side_effects_p (trueop0))
4318 return comparison_result (code, CMP_EQ);
4320 /* If the operands are floating-point constants, see if we can fold
4321 the result. */
4322 if (GET_CODE (trueop0) == CONST_DOUBLE
4323 && GET_CODE (trueop1) == CONST_DOUBLE
4324 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4326 REAL_VALUE_TYPE d0, d1;
4328 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4329 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4331 /* Comparisons are unordered iff at least one of the values is NaN. */
4332 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4333 switch (code)
4335 case UNEQ:
4336 case UNLT:
4337 case UNGT:
4338 case UNLE:
4339 case UNGE:
4340 case NE:
4341 case UNORDERED:
4342 return const_true_rtx;
4343 case EQ:
4344 case LT:
4345 case GT:
4346 case LE:
4347 case GE:
4348 case LTGT:
4349 case ORDERED:
4350 return const0_rtx;
4351 default:
4352 return 0;
4355 return comparison_result (code,
4356 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4357 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4360 /* Otherwise, see if the operands are both integers. */
4361 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4362 && (GET_CODE (trueop0) == CONST_DOUBLE
4363 || CONST_INT_P (trueop0))
4364 && (GET_CODE (trueop1) == CONST_DOUBLE
4365 || CONST_INT_P (trueop1)))
4367 int width = GET_MODE_BITSIZE (mode);
4368 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4369 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4371 /* Get the two words comprising each integer constant. */
4372 if (GET_CODE (trueop0) == CONST_DOUBLE)
4374 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4375 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4377 else
4379 l0u = l0s = INTVAL (trueop0);
4380 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4383 if (GET_CODE (trueop1) == CONST_DOUBLE)
4385 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4386 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4388 else
4390 l1u = l1s = INTVAL (trueop1);
4391 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4394 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4395 we have to sign or zero-extend the values. */
4396 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4398 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
4399 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
4401 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
4402 l0s |= ((HOST_WIDE_INT) (-1) << width);
4404 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
4405 l1s |= ((HOST_WIDE_INT) (-1) << width);
4407 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4408 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4410 if (h0u == h1u && l0u == l1u)
4411 return comparison_result (code, CMP_EQ);
4412 else
4414 int cr;
4415 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
4416 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
4417 return comparison_result (code, cr);
4421 /* Optimize comparisons with upper and lower bounds. */
4422 if (SCALAR_INT_MODE_P (mode)
4423 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4424 && CONST_INT_P (trueop1))
4426 int sign;
4427 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4428 HOST_WIDE_INT val = INTVAL (trueop1);
4429 HOST_WIDE_INT mmin, mmax;
4431 if (code == GEU
4432 || code == LEU
4433 || code == GTU
4434 || code == LTU)
4435 sign = 0;
4436 else
4437 sign = 1;
4439 /* Get a reduced range if the sign bit is zero. */
4440 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4442 mmin = 0;
4443 mmax = nonzero;
4445 else
4447 rtx mmin_rtx, mmax_rtx;
4448 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4450 mmin = INTVAL (mmin_rtx);
4451 mmax = INTVAL (mmax_rtx);
4452 if (sign)
4454 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4456 mmin >>= (sign_copies - 1);
4457 mmax >>= (sign_copies - 1);
4461 switch (code)
4463 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4464 case GEU:
4465 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4466 return const_true_rtx;
4467 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4468 return const0_rtx;
4469 break;
4470 case GE:
4471 if (val <= mmin)
4472 return const_true_rtx;
4473 if (val > mmax)
4474 return const0_rtx;
4475 break;
4477 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4478 case LEU:
4479 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4480 return const_true_rtx;
4481 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4482 return const0_rtx;
4483 break;
4484 case LE:
4485 if (val >= mmax)
4486 return const_true_rtx;
4487 if (val < mmin)
4488 return const0_rtx;
4489 break;
4491 case EQ:
4492 /* x == y is always false for y out of range. */
4493 if (val < mmin || val > mmax)
4494 return const0_rtx;
4495 break;
4497 /* x > y is always false for y >= mmax, always true for y < mmin. */
4498 case GTU:
4499 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4500 return const0_rtx;
4501 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4502 return const_true_rtx;
4503 break;
4504 case GT:
4505 if (val >= mmax)
4506 return const0_rtx;
4507 if (val < mmin)
4508 return const_true_rtx;
4509 break;
4511 /* x < y is always false for y <= mmin, always true for y > mmax. */
4512 case LTU:
4513 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4514 return const0_rtx;
4515 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4516 return const_true_rtx;
4517 break;
4518 case LT:
4519 if (val <= mmin)
4520 return const0_rtx;
4521 if (val > mmax)
4522 return const_true_rtx;
4523 break;
4525 case NE:
4526 /* x != y is always true for y out of range. */
4527 if (val < mmin || val > mmax)
4528 return const_true_rtx;
4529 break;
4531 default:
4532 break;
4536 /* Optimize integer comparisons with zero. */
4537 if (trueop1 == const0_rtx)
4539 /* Some addresses are known to be nonzero. We don't know
4540 their sign, but equality comparisons are known. */
4541 if (nonzero_address_p (trueop0))
4543 if (code == EQ || code == LEU)
4544 return const0_rtx;
4545 if (code == NE || code == GTU)
4546 return const_true_rtx;
4549 /* See if the first operand is an IOR with a constant. If so, we
4550 may be able to determine the result of this comparison. */
4551 if (GET_CODE (op0) == IOR)
4553 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
4554 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
4556 int sign_bitnum = GET_MODE_BITSIZE (mode) - 1;
4557 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
4558 && (INTVAL (inner_const)
4559 & ((HOST_WIDE_INT) 1 << sign_bitnum)));
4561 switch (code)
4563 case EQ:
4564 case LEU:
4565 return const0_rtx;
4566 case NE:
4567 case GTU:
4568 return const_true_rtx;
4569 case LT:
4570 case LE:
4571 if (has_sign)
4572 return const_true_rtx;
4573 break;
4574 case GT:
4575 case GE:
4576 if (has_sign)
4577 return const0_rtx;
4578 break;
4579 default:
4580 break;
4586 /* Optimize comparison of ABS with zero. */
4587 if (trueop1 == CONST0_RTX (mode)
4588 && (GET_CODE (trueop0) == ABS
4589 || (GET_CODE (trueop0) == FLOAT_EXTEND
4590 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
4592 switch (code)
4594 case LT:
4595 /* Optimize abs(x) < 0.0. */
4596 if (!HONOR_SNANS (mode)
4597 && (!INTEGRAL_MODE_P (mode)
4598 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4600 if (INTEGRAL_MODE_P (mode)
4601 && (issue_strict_overflow_warning
4602 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4603 warning (OPT_Wstrict_overflow,
4604 ("assuming signed overflow does not occur when "
4605 "assuming abs (x) < 0 is false"));
4606 return const0_rtx;
4608 break;
4610 case GE:
4611 /* Optimize abs(x) >= 0.0. */
4612 if (!HONOR_NANS (mode)
4613 && (!INTEGRAL_MODE_P (mode)
4614 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4616 if (INTEGRAL_MODE_P (mode)
4617 && (issue_strict_overflow_warning
4618 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4619 warning (OPT_Wstrict_overflow,
4620 ("assuming signed overflow does not occur when "
4621 "assuming abs (x) >= 0 is true"));
4622 return const_true_rtx;
4624 break;
4626 case UNGE:
4627 /* Optimize ! (abs(x) < 0.0). */
4628 return const_true_rtx;
4630 default:
4631 break;
4635 return 0;
4638 /* Simplify CODE, an operation with result mode MODE and three operands,
4639 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4640 a constant. Return 0 if no simplifications is possible. */
4643 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4644 enum machine_mode op0_mode, rtx op0, rtx op1,
4645 rtx op2)
4647 unsigned int width = GET_MODE_BITSIZE (mode);
4649 /* VOIDmode means "infinite" precision. */
4650 if (width == 0)
4651 width = HOST_BITS_PER_WIDE_INT;
4653 switch (code)
4655 case SIGN_EXTRACT:
4656 case ZERO_EXTRACT:
4657 if (CONST_INT_P (op0)
4658 && CONST_INT_P (op1)
4659 && CONST_INT_P (op2)
4660 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4661 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4663 /* Extracting a bit-field from a constant */
4664 HOST_WIDE_INT val = INTVAL (op0);
4666 if (BITS_BIG_ENDIAN)
4667 val >>= (GET_MODE_BITSIZE (op0_mode)
4668 - INTVAL (op2) - INTVAL (op1));
4669 else
4670 val >>= INTVAL (op2);
4672 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4674 /* First zero-extend. */
4675 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4676 /* If desired, propagate sign bit. */
4677 if (code == SIGN_EXTRACT
4678 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4679 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4682 /* Clear the bits that don't belong in our mode,
4683 unless they and our sign bit are all one.
4684 So we get either a reasonable negative value or a reasonable
4685 unsigned value for this mode. */
4686 if (width < HOST_BITS_PER_WIDE_INT
4687 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4688 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4689 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4691 return gen_int_mode (val, mode);
4693 break;
4695 case IF_THEN_ELSE:
4696 if (CONST_INT_P (op0))
4697 return op0 != const0_rtx ? op1 : op2;
4699 /* Convert c ? a : a into "a". */
4700 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4701 return op1;
4703 /* Convert a != b ? a : b into "a". */
4704 if (GET_CODE (op0) == NE
4705 && ! side_effects_p (op0)
4706 && ! HONOR_NANS (mode)
4707 && ! HONOR_SIGNED_ZEROS (mode)
4708 && ((rtx_equal_p (XEXP (op0, 0), op1)
4709 && rtx_equal_p (XEXP (op0, 1), op2))
4710 || (rtx_equal_p (XEXP (op0, 0), op2)
4711 && rtx_equal_p (XEXP (op0, 1), op1))))
4712 return op1;
4714 /* Convert a == b ? a : b into "b". */
4715 if (GET_CODE (op0) == EQ
4716 && ! side_effects_p (op0)
4717 && ! HONOR_NANS (mode)
4718 && ! HONOR_SIGNED_ZEROS (mode)
4719 && ((rtx_equal_p (XEXP (op0, 0), op1)
4720 && rtx_equal_p (XEXP (op0, 1), op2))
4721 || (rtx_equal_p (XEXP (op0, 0), op2)
4722 && rtx_equal_p (XEXP (op0, 1), op1))))
4723 return op2;
4725 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4727 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4728 ? GET_MODE (XEXP (op0, 1))
4729 : GET_MODE (XEXP (op0, 0)));
4730 rtx temp;
4732 /* Look for happy constants in op1 and op2. */
4733 if (CONST_INT_P (op1) && CONST_INT_P (op2))
4735 HOST_WIDE_INT t = INTVAL (op1);
4736 HOST_WIDE_INT f = INTVAL (op2);
4738 if (t == STORE_FLAG_VALUE && f == 0)
4739 code = GET_CODE (op0);
4740 else if (t == 0 && f == STORE_FLAG_VALUE)
4742 enum rtx_code tmp;
4743 tmp = reversed_comparison_code (op0, NULL_RTX);
4744 if (tmp == UNKNOWN)
4745 break;
4746 code = tmp;
4748 else
4749 break;
4751 return simplify_gen_relational (code, mode, cmp_mode,
4752 XEXP (op0, 0), XEXP (op0, 1));
4755 if (cmp_mode == VOIDmode)
4756 cmp_mode = op0_mode;
4757 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4758 cmp_mode, XEXP (op0, 0),
4759 XEXP (op0, 1));
4761 /* See if any simplifications were possible. */
4762 if (temp)
4764 if (CONST_INT_P (temp))
4765 return temp == const0_rtx ? op2 : op1;
4766 else if (temp)
4767 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4770 break;
4772 case VEC_MERGE:
4773 gcc_assert (GET_MODE (op0) == mode);
4774 gcc_assert (GET_MODE (op1) == mode);
4775 gcc_assert (VECTOR_MODE_P (mode));
4776 op2 = avoid_constant_pool_reference (op2);
4777 if (CONST_INT_P (op2))
4779 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4780 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4781 int mask = (1 << n_elts) - 1;
4783 if (!(INTVAL (op2) & mask))
4784 return op1;
4785 if ((INTVAL (op2) & mask) == mask)
4786 return op0;
4788 op0 = avoid_constant_pool_reference (op0);
4789 op1 = avoid_constant_pool_reference (op1);
4790 if (GET_CODE (op0) == CONST_VECTOR
4791 && GET_CODE (op1) == CONST_VECTOR)
4793 rtvec v = rtvec_alloc (n_elts);
4794 unsigned int i;
4796 for (i = 0; i < n_elts; i++)
4797 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4798 ? CONST_VECTOR_ELT (op0, i)
4799 : CONST_VECTOR_ELT (op1, i));
4800 return gen_rtx_CONST_VECTOR (mode, v);
4803 break;
4805 default:
4806 gcc_unreachable ();
4809 return 0;
4812 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
4813 or CONST_VECTOR,
4814 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
4816 Works by unpacking OP into a collection of 8-bit values
4817 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4818 and then repacking them again for OUTERMODE. */
4820 static rtx
4821 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4822 enum machine_mode innermode, unsigned int byte)
4824 /* We support up to 512-bit values (for V8DFmode). */
4825 enum {
4826 max_bitsize = 512,
4827 value_bit = 8,
4828 value_mask = (1 << value_bit) - 1
4830 unsigned char value[max_bitsize / value_bit];
4831 int value_start;
4832 int i;
4833 int elem;
4835 int num_elem;
4836 rtx * elems;
4837 int elem_bitsize;
4838 rtx result_s;
4839 rtvec result_v = NULL;
4840 enum mode_class outer_class;
4841 enum machine_mode outer_submode;
4843 /* Some ports misuse CCmode. */
4844 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
4845 return op;
4847 /* We have no way to represent a complex constant at the rtl level. */
4848 if (COMPLEX_MODE_P (outermode))
4849 return NULL_RTX;
4851 /* Unpack the value. */
4853 if (GET_CODE (op) == CONST_VECTOR)
4855 num_elem = CONST_VECTOR_NUNITS (op);
4856 elems = &CONST_VECTOR_ELT (op, 0);
4857 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4859 else
4861 num_elem = 1;
4862 elems = &op;
4863 elem_bitsize = max_bitsize;
4865 /* If this asserts, it is too complicated; reducing value_bit may help. */
4866 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4867 /* I don't know how to handle endianness of sub-units. */
4868 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4870 for (elem = 0; elem < num_elem; elem++)
4872 unsigned char * vp;
4873 rtx el = elems[elem];
4875 /* Vectors are kept in target memory order. (This is probably
4876 a mistake.) */
4878 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4879 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4880 / BITS_PER_UNIT);
4881 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4882 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4883 unsigned bytele = (subword_byte % UNITS_PER_WORD
4884 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4885 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4888 switch (GET_CODE (el))
4890 case CONST_INT:
4891 for (i = 0;
4892 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4893 i += value_bit)
4894 *vp++ = INTVAL (el) >> i;
4895 /* CONST_INTs are always logically sign-extended. */
4896 for (; i < elem_bitsize; i += value_bit)
4897 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4898 break;
4900 case CONST_DOUBLE:
4901 if (GET_MODE (el) == VOIDmode)
4903 /* If this triggers, someone should have generated a
4904 CONST_INT instead. */
4905 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4907 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4908 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4909 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4911 *vp++
4912 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4913 i += value_bit;
4915 /* It shouldn't matter what's done here, so fill it with
4916 zero. */
4917 for (; i < elem_bitsize; i += value_bit)
4918 *vp++ = 0;
4920 else
4922 long tmp[max_bitsize / 32];
4923 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4925 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4926 gcc_assert (bitsize <= elem_bitsize);
4927 gcc_assert (bitsize % value_bit == 0);
4929 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4930 GET_MODE (el));
4932 /* real_to_target produces its result in words affected by
4933 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4934 and use WORDS_BIG_ENDIAN instead; see the documentation
4935 of SUBREG in rtl.texi. */
4936 for (i = 0; i < bitsize; i += value_bit)
4938 int ibase;
4939 if (WORDS_BIG_ENDIAN)
4940 ibase = bitsize - 1 - i;
4941 else
4942 ibase = i;
4943 *vp++ = tmp[ibase / 32] >> i % 32;
4946 /* It shouldn't matter what's done here, so fill it with
4947 zero. */
4948 for (; i < elem_bitsize; i += value_bit)
4949 *vp++ = 0;
4951 break;
4953 case CONST_FIXED:
4954 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4956 for (i = 0; i < elem_bitsize; i += value_bit)
4957 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
4959 else
4961 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4962 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
4963 for (; i < 2 * HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4964 i += value_bit)
4965 *vp++ = CONST_FIXED_VALUE_HIGH (el)
4966 >> (i - HOST_BITS_PER_WIDE_INT);
4967 for (; i < elem_bitsize; i += value_bit)
4968 *vp++ = 0;
4970 break;
4972 default:
4973 gcc_unreachable ();
4977 /* Now, pick the right byte to start with. */
4978 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4979 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4980 will already have offset 0. */
4981 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4983 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4984 - byte);
4985 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4986 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4987 byte = (subword_byte % UNITS_PER_WORD
4988 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4991 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4992 so if it's become negative it will instead be very large.) */
4993 gcc_assert (byte < GET_MODE_SIZE (innermode));
4995 /* Convert from bytes to chunks of size value_bit. */
4996 value_start = byte * (BITS_PER_UNIT / value_bit);
4998 /* Re-pack the value. */
5000 if (VECTOR_MODE_P (outermode))
5002 num_elem = GET_MODE_NUNITS (outermode);
5003 result_v = rtvec_alloc (num_elem);
5004 elems = &RTVEC_ELT (result_v, 0);
5005 outer_submode = GET_MODE_INNER (outermode);
5007 else
5009 num_elem = 1;
5010 elems = &result_s;
5011 outer_submode = outermode;
5014 outer_class = GET_MODE_CLASS (outer_submode);
5015 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5017 gcc_assert (elem_bitsize % value_bit == 0);
5018 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5020 for (elem = 0; elem < num_elem; elem++)
5022 unsigned char *vp;
5024 /* Vectors are stored in target memory order. (This is probably
5025 a mistake.) */
5027 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5028 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5029 / BITS_PER_UNIT);
5030 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5031 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5032 unsigned bytele = (subword_byte % UNITS_PER_WORD
5033 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5034 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5037 switch (outer_class)
5039 case MODE_INT:
5040 case MODE_PARTIAL_INT:
5042 unsigned HOST_WIDE_INT hi = 0, lo = 0;
5044 for (i = 0;
5045 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5046 i += value_bit)
5047 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
5048 for (; i < elem_bitsize; i += value_bit)
5049 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
5050 << (i - HOST_BITS_PER_WIDE_INT));
5052 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5053 know why. */
5054 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5055 elems[elem] = gen_int_mode (lo, outer_submode);
5056 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
5057 elems[elem] = immed_double_const (lo, hi, outer_submode);
5058 else
5059 return NULL_RTX;
5061 break;
5063 case MODE_FLOAT:
5064 case MODE_DECIMAL_FLOAT:
5066 REAL_VALUE_TYPE r;
5067 long tmp[max_bitsize / 32];
5069 /* real_from_target wants its input in words affected by
5070 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5071 and use WORDS_BIG_ENDIAN instead; see the documentation
5072 of SUBREG in rtl.texi. */
5073 for (i = 0; i < max_bitsize / 32; i++)
5074 tmp[i] = 0;
5075 for (i = 0; i < elem_bitsize; i += value_bit)
5077 int ibase;
5078 if (WORDS_BIG_ENDIAN)
5079 ibase = elem_bitsize - 1 - i;
5080 else
5081 ibase = i;
5082 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5085 real_from_target (&r, tmp, outer_submode);
5086 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5088 break;
5090 case MODE_FRACT:
5091 case MODE_UFRACT:
5092 case MODE_ACCUM:
5093 case MODE_UACCUM:
5095 FIXED_VALUE_TYPE f;
5096 f.data.low = 0;
5097 f.data.high = 0;
5098 f.mode = outer_submode;
5100 for (i = 0;
5101 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5102 i += value_bit)
5103 f.data.low |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
5104 for (; i < elem_bitsize; i += value_bit)
5105 f.data.high |= ((HOST_WIDE_INT)(*vp++ & value_mask)
5106 << (i - HOST_BITS_PER_WIDE_INT));
5108 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5110 break;
5112 default:
5113 gcc_unreachable ();
5116 if (VECTOR_MODE_P (outermode))
5117 return gen_rtx_CONST_VECTOR (outermode, result_v);
5118 else
5119 return result_s;
5122 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5123 Return 0 if no simplifications are possible. */
5125 simplify_subreg (enum machine_mode outermode, rtx op,
5126 enum machine_mode innermode, unsigned int byte)
5128 /* Little bit of sanity checking. */
5129 gcc_assert (innermode != VOIDmode);
5130 gcc_assert (outermode != VOIDmode);
5131 gcc_assert (innermode != BLKmode);
5132 gcc_assert (outermode != BLKmode);
5134 gcc_assert (GET_MODE (op) == innermode
5135 || GET_MODE (op) == VOIDmode);
5137 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
5138 gcc_assert (byte < GET_MODE_SIZE (innermode));
5140 if (outermode == innermode && !byte)
5141 return op;
5143 if (CONST_INT_P (op)
5144 || GET_CODE (op) == CONST_DOUBLE
5145 || GET_CODE (op) == CONST_FIXED
5146 || GET_CODE (op) == CONST_VECTOR)
5147 return simplify_immed_subreg (outermode, op, innermode, byte);
5149 /* Changing mode twice with SUBREG => just change it once,
5150 or not at all if changing back op starting mode. */
5151 if (GET_CODE (op) == SUBREG)
5153 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5154 int final_offset = byte + SUBREG_BYTE (op);
5155 rtx newx;
5157 if (outermode == innermostmode
5158 && byte == 0 && SUBREG_BYTE (op) == 0)
5159 return SUBREG_REG (op);
5161 /* The SUBREG_BYTE represents offset, as if the value were stored
5162 in memory. Irritating exception is paradoxical subreg, where
5163 we define SUBREG_BYTE to be 0. On big endian machines, this
5164 value should be negative. For a moment, undo this exception. */
5165 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5167 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5168 if (WORDS_BIG_ENDIAN)
5169 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5170 if (BYTES_BIG_ENDIAN)
5171 final_offset += difference % UNITS_PER_WORD;
5173 if (SUBREG_BYTE (op) == 0
5174 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5176 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5177 if (WORDS_BIG_ENDIAN)
5178 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5179 if (BYTES_BIG_ENDIAN)
5180 final_offset += difference % UNITS_PER_WORD;
5183 /* See whether resulting subreg will be paradoxical. */
5184 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5186 /* In nonparadoxical subregs we can't handle negative offsets. */
5187 if (final_offset < 0)
5188 return NULL_RTX;
5189 /* Bail out in case resulting subreg would be incorrect. */
5190 if (final_offset % GET_MODE_SIZE (outermode)
5191 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5192 return NULL_RTX;
5194 else
5196 int offset = 0;
5197 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5199 /* In paradoxical subreg, see if we are still looking on lower part.
5200 If so, our SUBREG_BYTE will be 0. */
5201 if (WORDS_BIG_ENDIAN)
5202 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5203 if (BYTES_BIG_ENDIAN)
5204 offset += difference % UNITS_PER_WORD;
5205 if (offset == final_offset)
5206 final_offset = 0;
5207 else
5208 return NULL_RTX;
5211 /* Recurse for further possible simplifications. */
5212 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5213 final_offset);
5214 if (newx)
5215 return newx;
5216 if (validate_subreg (outermode, innermostmode,
5217 SUBREG_REG (op), final_offset))
5219 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5220 if (SUBREG_PROMOTED_VAR_P (op)
5221 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5222 && GET_MODE_CLASS (outermode) == MODE_INT
5223 && IN_RANGE (GET_MODE_SIZE (outermode),
5224 GET_MODE_SIZE (innermode),
5225 GET_MODE_SIZE (innermostmode))
5226 && subreg_lowpart_p (newx))
5228 SUBREG_PROMOTED_VAR_P (newx) = 1;
5229 SUBREG_PROMOTED_UNSIGNED_SET
5230 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5232 return newx;
5234 return NULL_RTX;
5237 /* Merge implicit and explicit truncations. */
5239 if (GET_CODE (op) == TRUNCATE
5240 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
5241 && subreg_lowpart_offset (outermode, innermode) == byte)
5242 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
5243 GET_MODE (XEXP (op, 0)));
5245 /* SUBREG of a hard register => just change the register number
5246 and/or mode. If the hard register is not valid in that mode,
5247 suppress this simplification. If the hard register is the stack,
5248 frame, or argument pointer, leave this as a SUBREG. */
5250 if (REG_P (op) && HARD_REGISTER_P (op))
5252 unsigned int regno, final_regno;
5254 regno = REGNO (op);
5255 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5256 if (HARD_REGISTER_NUM_P (final_regno))
5258 rtx x;
5259 int final_offset = byte;
5261 /* Adjust offset for paradoxical subregs. */
5262 if (byte == 0
5263 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5265 int difference = (GET_MODE_SIZE (innermode)
5266 - GET_MODE_SIZE (outermode));
5267 if (WORDS_BIG_ENDIAN)
5268 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5269 if (BYTES_BIG_ENDIAN)
5270 final_offset += difference % UNITS_PER_WORD;
5273 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5275 /* Propagate original regno. We don't have any way to specify
5276 the offset inside original regno, so do so only for lowpart.
5277 The information is used only by alias analysis that can not
5278 grog partial register anyway. */
5280 if (subreg_lowpart_offset (outermode, innermode) == byte)
5281 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5282 return x;
5286 /* If we have a SUBREG of a register that we are replacing and we are
5287 replacing it with a MEM, make a new MEM and try replacing the
5288 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5289 or if we would be widening it. */
5291 if (MEM_P (op)
5292 && ! mode_dependent_address_p (XEXP (op, 0))
5293 /* Allow splitting of volatile memory references in case we don't
5294 have instruction to move the whole thing. */
5295 && (! MEM_VOLATILE_P (op)
5296 || ! have_insn_for (SET, innermode))
5297 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5298 return adjust_address_nv (op, outermode, byte);
5300 /* Handle complex values represented as CONCAT
5301 of real and imaginary part. */
5302 if (GET_CODE (op) == CONCAT)
5304 unsigned int part_size, final_offset;
5305 rtx part, res;
5307 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5308 if (byte < part_size)
5310 part = XEXP (op, 0);
5311 final_offset = byte;
5313 else
5315 part = XEXP (op, 1);
5316 final_offset = byte - part_size;
5319 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5320 return NULL_RTX;
5322 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5323 if (res)
5324 return res;
5325 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5326 return gen_rtx_SUBREG (outermode, part, final_offset);
5327 return NULL_RTX;
5330 /* Optimize SUBREG truncations of zero and sign extended values. */
5331 if ((GET_CODE (op) == ZERO_EXTEND
5332 || GET_CODE (op) == SIGN_EXTEND)
5333 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
5335 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5337 /* If we're requesting the lowpart of a zero or sign extension,
5338 there are three possibilities. If the outermode is the same
5339 as the origmode, we can omit both the extension and the subreg.
5340 If the outermode is not larger than the origmode, we can apply
5341 the truncation without the extension. Finally, if the outermode
5342 is larger than the origmode, but both are integer modes, we
5343 can just extend to the appropriate mode. */
5344 if (bitpos == 0)
5346 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
5347 if (outermode == origmode)
5348 return XEXP (op, 0);
5349 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
5350 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
5351 subreg_lowpart_offset (outermode,
5352 origmode));
5353 if (SCALAR_INT_MODE_P (outermode))
5354 return simplify_gen_unary (GET_CODE (op), outermode,
5355 XEXP (op, 0), origmode);
5358 /* A SUBREG resulting from a zero extension may fold to zero if
5359 it extracts higher bits that the ZERO_EXTEND's source bits. */
5360 if (GET_CODE (op) == ZERO_EXTEND
5361 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
5362 return CONST0_RTX (outermode);
5365 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5366 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5367 the outer subreg is effectively a truncation to the original mode. */
5368 if ((GET_CODE (op) == LSHIFTRT
5369 || GET_CODE (op) == ASHIFTRT)
5370 && SCALAR_INT_MODE_P (outermode)
5371 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5372 to avoid the possibility that an outer LSHIFTRT shifts by more
5373 than the sign extension's sign_bit_copies and introduces zeros
5374 into the high bits of the result. */
5375 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
5376 && CONST_INT_P (XEXP (op, 1))
5377 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
5378 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5379 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5380 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5381 return simplify_gen_binary (ASHIFTRT, outermode,
5382 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5384 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5385 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5386 the outer subreg is effectively a truncation to the original mode. */
5387 if ((GET_CODE (op) == LSHIFTRT
5388 || GET_CODE (op) == ASHIFTRT)
5389 && SCALAR_INT_MODE_P (outermode)
5390 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5391 && CONST_INT_P (XEXP (op, 1))
5392 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5393 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5394 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5395 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5396 return simplify_gen_binary (LSHIFTRT, outermode,
5397 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5399 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5400 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5401 the outer subreg is effectively a truncation to the original mode. */
5402 if (GET_CODE (op) == ASHIFT
5403 && SCALAR_INT_MODE_P (outermode)
5404 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5405 && CONST_INT_P (XEXP (op, 1))
5406 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5407 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
5408 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5409 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5410 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5411 return simplify_gen_binary (ASHIFT, outermode,
5412 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5414 /* Recognize a word extraction from a multi-word subreg. */
5415 if ((GET_CODE (op) == LSHIFTRT
5416 || GET_CODE (op) == ASHIFTRT)
5417 && SCALAR_INT_MODE_P (outermode)
5418 && GET_MODE_BITSIZE (outermode) >= BITS_PER_WORD
5419 && GET_MODE_BITSIZE (innermode) >= (2 * GET_MODE_BITSIZE (outermode))
5420 && CONST_INT_P (XEXP (op, 1))
5421 && (INTVAL (XEXP (op, 1)) & (GET_MODE_BITSIZE (outermode) - 1)) == 0
5422 && INTVAL (XEXP (op, 1)) >= 0
5423 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
5424 && byte == subreg_lowpart_offset (outermode, innermode))
5426 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5427 return simplify_gen_subreg (outermode, XEXP (op, 0), innermode,
5428 (WORDS_BIG_ENDIAN
5429 ? byte - shifted_bytes
5430 : byte + shifted_bytes));
5433 return NULL_RTX;
5436 /* Make a SUBREG operation or equivalent if it folds. */
5439 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5440 enum machine_mode innermode, unsigned int byte)
5442 rtx newx;
5444 newx = simplify_subreg (outermode, op, innermode, byte);
5445 if (newx)
5446 return newx;
5448 if (GET_CODE (op) == SUBREG
5449 || GET_CODE (op) == CONCAT
5450 || GET_MODE (op) == VOIDmode)
5451 return NULL_RTX;
5453 if (validate_subreg (outermode, innermode, op, byte))
5454 return gen_rtx_SUBREG (outermode, op, byte);
5456 return NULL_RTX;
5459 /* Simplify X, an rtx expression.
5461 Return the simplified expression or NULL if no simplifications
5462 were possible.
5464 This is the preferred entry point into the simplification routines;
5465 however, we still allow passes to call the more specific routines.
5467 Right now GCC has three (yes, three) major bodies of RTL simplification
5468 code that need to be unified.
5470 1. fold_rtx in cse.c. This code uses various CSE specific
5471 information to aid in RTL simplification.
5473 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5474 it uses combine specific information to aid in RTL
5475 simplification.
5477 3. The routines in this file.
5480 Long term we want to only have one body of simplification code; to
5481 get to that state I recommend the following steps:
5483 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5484 which are not pass dependent state into these routines.
5486 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5487 use this routine whenever possible.
5489 3. Allow for pass dependent state to be provided to these
5490 routines and add simplifications based on the pass dependent
5491 state. Remove code from cse.c & combine.c that becomes
5492 redundant/dead.
5494 It will take time, but ultimately the compiler will be easier to
5495 maintain and improve. It's totally silly that when we add a
5496 simplification that it needs to be added to 4 places (3 for RTL
5497 simplification and 1 for tree simplification. */
5500 simplify_rtx (const_rtx x)
5502 const enum rtx_code code = GET_CODE (x);
5503 const enum machine_mode mode = GET_MODE (x);
5505 switch (GET_RTX_CLASS (code))
5507 case RTX_UNARY:
5508 return simplify_unary_operation (code, mode,
5509 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5510 case RTX_COMM_ARITH:
5511 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5512 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5514 /* Fall through.... */
5516 case RTX_BIN_ARITH:
5517 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5519 case RTX_TERNARY:
5520 case RTX_BITFIELD_OPS:
5521 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5522 XEXP (x, 0), XEXP (x, 1),
5523 XEXP (x, 2));
5525 case RTX_COMPARE:
5526 case RTX_COMM_COMPARE:
5527 return simplify_relational_operation (code, mode,
5528 ((GET_MODE (XEXP (x, 0))
5529 != VOIDmode)
5530 ? GET_MODE (XEXP (x, 0))
5531 : GET_MODE (XEXP (x, 1))),
5532 XEXP (x, 0),
5533 XEXP (x, 1));
5535 case RTX_EXTRA:
5536 if (code == SUBREG)
5537 return simplify_subreg (mode, SUBREG_REG (x),
5538 GET_MODE (SUBREG_REG (x)),
5539 SUBREG_BYTE (x));
5540 break;
5542 case RTX_OBJ:
5543 if (code == LO_SUM)
5545 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5546 if (GET_CODE (XEXP (x, 0)) == HIGH
5547 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5548 return XEXP (x, 1);
5550 break;
5552 default:
5553 break;
5555 return NULL;