Merge branch 'master' into python
[official-gcc.git] / gcc / simplify-rtx.c
blobb38ab2e59912dce35e7d45e2f9754c8607ac34ab
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "insn-config.h"
34 #include "recog.h"
35 #include "function.h"
36 #include "expr.h"
37 #include "toplev.h"
38 #include "output.h"
39 #include "ggc.h"
40 #include "target.h"
42 /* Simplification and canonicalization of RTL. */
44 /* Much code operates on (low, high) pairs; the low value is an
45 unsigned wide int, the high value a signed wide int. We
46 occasionally need to sign extend from low to high as if low were a
47 signed wide int. */
48 #define HWI_SIGN_EXTEND(low) \
49 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
51 static rtx neg_const_int (enum machine_mode, const_rtx);
52 static bool plus_minus_operand_p (const_rtx);
53 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
54 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
55 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
56 unsigned int);
57 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
58 rtx, rtx);
59 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
60 enum machine_mode, rtx, rtx);
61 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
62 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
63 rtx, rtx, rtx, rtx);
65 /* Negate a CONST_INT rtx, truncating (because a conversion from a
66 maximally negative number can overflow). */
67 static rtx
68 neg_const_int (enum machine_mode mode, const_rtx i)
70 return gen_int_mode (- INTVAL (i), mode);
73 /* Test whether expression, X, is an immediate constant that represents
74 the most significant bit of machine mode MODE. */
76 bool
77 mode_signbit_p (enum machine_mode mode, const_rtx x)
79 unsigned HOST_WIDE_INT val;
80 unsigned int width;
82 if (GET_MODE_CLASS (mode) != MODE_INT)
83 return false;
85 width = GET_MODE_BITSIZE (mode);
86 if (width == 0)
87 return false;
89 if (width <= HOST_BITS_PER_WIDE_INT
90 && CONST_INT_P (x))
91 val = INTVAL (x);
92 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
93 && GET_CODE (x) == CONST_DOUBLE
94 && CONST_DOUBLE_LOW (x) == 0)
96 val = CONST_DOUBLE_HIGH (x);
97 width -= HOST_BITS_PER_WIDE_INT;
99 else
100 return false;
102 if (width < HOST_BITS_PER_WIDE_INT)
103 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
104 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
107 /* Make a binary operation by properly ordering the operands and
108 seeing if the expression folds. */
111 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
112 rtx op1)
114 rtx tem;
116 /* If this simplifies, do it. */
117 tem = simplify_binary_operation (code, mode, op0, op1);
118 if (tem)
119 return tem;
121 /* Put complex operands first and constants second if commutative. */
122 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
123 && swap_commutative_operands_p (op0, op1))
124 tem = op0, op0 = op1, op1 = tem;
126 return gen_rtx_fmt_ee (code, mode, op0, op1);
129 /* If X is a MEM referencing the constant pool, return the real value.
130 Otherwise return X. */
132 avoid_constant_pool_reference (rtx x)
134 rtx c, tmp, addr;
135 enum machine_mode cmode;
136 HOST_WIDE_INT offset = 0;
138 switch (GET_CODE (x))
140 case MEM:
141 break;
143 case FLOAT_EXTEND:
144 /* Handle float extensions of constant pool references. */
145 tmp = XEXP (x, 0);
146 c = avoid_constant_pool_reference (tmp);
147 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
149 REAL_VALUE_TYPE d;
151 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
152 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
154 return x;
156 default:
157 return x;
160 if (GET_MODE (x) == BLKmode)
161 return x;
163 addr = XEXP (x, 0);
165 /* Call target hook to avoid the effects of -fpic etc.... */
166 addr = targetm.delegitimize_address (addr);
168 /* Split the address into a base and integer offset. */
169 if (GET_CODE (addr) == CONST
170 && GET_CODE (XEXP (addr, 0)) == PLUS
171 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
173 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
174 addr = XEXP (XEXP (addr, 0), 0);
177 if (GET_CODE (addr) == LO_SUM)
178 addr = XEXP (addr, 1);
180 /* If this is a constant pool reference, we can turn it into its
181 constant and hope that simplifications happen. */
182 if (GET_CODE (addr) == SYMBOL_REF
183 && CONSTANT_POOL_ADDRESS_P (addr))
185 c = get_pool_constant (addr);
186 cmode = get_pool_mode (addr);
188 /* If we're accessing the constant in a different mode than it was
189 originally stored, attempt to fix that up via subreg simplifications.
190 If that fails we have no choice but to return the original memory. */
191 if (offset != 0 || cmode != GET_MODE (x))
193 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
194 if (tem && CONSTANT_P (tem))
195 return tem;
197 else
198 return c;
201 return x;
204 /* Simplify a MEM based on its attributes. This is the default
205 delegitimize_address target hook, and it's recommended that every
206 overrider call it. */
209 delegitimize_mem_from_attrs (rtx x)
211 if (MEM_P (x)
212 && MEM_EXPR (x)
213 && (!MEM_OFFSET (x)
214 || GET_CODE (MEM_OFFSET (x)) == CONST_INT))
216 tree decl = MEM_EXPR (x);
217 enum machine_mode mode = GET_MODE (x);
218 HOST_WIDE_INT offset = 0;
220 switch (TREE_CODE (decl))
222 default:
223 decl = NULL;
224 break;
226 case VAR_DECL:
227 break;
229 case ARRAY_REF:
230 case ARRAY_RANGE_REF:
231 case COMPONENT_REF:
232 case BIT_FIELD_REF:
233 case REALPART_EXPR:
234 case IMAGPART_EXPR:
235 case VIEW_CONVERT_EXPR:
237 HOST_WIDE_INT bitsize, bitpos;
238 tree toffset;
239 int unsignedp = 0, volatilep = 0;
241 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
242 &mode, &unsignedp, &volatilep, false);
243 if (bitsize != GET_MODE_BITSIZE (mode)
244 || (bitpos % BITS_PER_UNIT)
245 || (toffset && !host_integerp (toffset, 0)))
246 decl = NULL;
247 else
249 offset += bitpos / BITS_PER_UNIT;
250 if (toffset)
251 offset += TREE_INT_CST_LOW (toffset);
253 break;
257 if (decl
258 && mode == GET_MODE (x)
259 && TREE_CODE (decl) == VAR_DECL
260 && (TREE_STATIC (decl)
261 || DECL_THREAD_LOCAL_P (decl))
262 && DECL_RTL_SET_P (decl)
263 && MEM_P (DECL_RTL (decl)))
265 rtx newx;
267 if (MEM_OFFSET (x))
268 offset += INTVAL (MEM_OFFSET (x));
270 newx = DECL_RTL (decl);
272 if (MEM_P (newx))
274 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
276 /* Avoid creating a new MEM needlessly if we already had
277 the same address. We do if there's no OFFSET and the
278 old address X is identical to NEWX, or if X is of the
279 form (plus NEWX OFFSET), or the NEWX is of the form
280 (plus Y (const_int Z)) and X is that with the offset
281 added: (plus Y (const_int Z+OFFSET)). */
282 if (!((offset == 0
283 || (GET_CODE (o) == PLUS
284 && GET_CODE (XEXP (o, 1)) == CONST_INT
285 && (offset == INTVAL (XEXP (o, 1))
286 || (GET_CODE (n) == PLUS
287 && GET_CODE (XEXP (n, 1)) == CONST_INT
288 && (INTVAL (XEXP (n, 1)) + offset
289 == INTVAL (XEXP (o, 1)))
290 && (n = XEXP (n, 0))))
291 && (o = XEXP (o, 0))))
292 && rtx_equal_p (o, n)))
293 x = adjust_address_nv (newx, mode, offset);
295 else if (GET_MODE (x) == GET_MODE (newx)
296 && offset == 0)
297 x = newx;
301 return x;
304 /* Make a unary operation by first seeing if it folds and otherwise making
305 the specified operation. */
308 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
309 enum machine_mode op_mode)
311 rtx tem;
313 /* If this simplifies, use it. */
314 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
315 return tem;
317 return gen_rtx_fmt_e (code, mode, op);
320 /* Likewise for ternary operations. */
323 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
324 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
326 rtx tem;
328 /* If this simplifies, use it. */
329 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
330 op0, op1, op2)))
331 return tem;
333 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
336 /* Likewise, for relational operations.
337 CMP_MODE specifies mode comparison is done in. */
340 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
341 enum machine_mode cmp_mode, rtx op0, rtx op1)
343 rtx tem;
345 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
346 op0, op1)))
347 return tem;
349 return gen_rtx_fmt_ee (code, mode, op0, op1);
352 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
353 and simplify the result. If FN is non-NULL, call this callback on each
354 X, if it returns non-NULL, replace X with its return value and simplify the
355 result. */
358 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
359 rtx (*fn) (rtx, const_rtx, void *), void *data)
361 enum rtx_code code = GET_CODE (x);
362 enum machine_mode mode = GET_MODE (x);
363 enum machine_mode op_mode;
364 const char *fmt;
365 rtx op0, op1, op2, newx, op;
366 rtvec vec, newvec;
367 int i, j;
369 if (__builtin_expect (fn != NULL, 0))
371 newx = fn (x, old_rtx, data);
372 if (newx)
373 return newx;
375 else if (rtx_equal_p (x, old_rtx))
376 return copy_rtx ((rtx) data);
378 switch (GET_RTX_CLASS (code))
380 case RTX_UNARY:
381 op0 = XEXP (x, 0);
382 op_mode = GET_MODE (op0);
383 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
384 if (op0 == XEXP (x, 0))
385 return x;
386 return simplify_gen_unary (code, mode, op0, op_mode);
388 case RTX_BIN_ARITH:
389 case RTX_COMM_ARITH:
390 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
391 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
392 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
393 return x;
394 return simplify_gen_binary (code, mode, op0, op1);
396 case RTX_COMPARE:
397 case RTX_COMM_COMPARE:
398 op0 = XEXP (x, 0);
399 op1 = XEXP (x, 1);
400 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
401 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
402 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
403 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
404 return x;
405 return simplify_gen_relational (code, mode, op_mode, op0, op1);
407 case RTX_TERNARY:
408 case RTX_BITFIELD_OPS:
409 op0 = XEXP (x, 0);
410 op_mode = GET_MODE (op0);
411 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
412 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
413 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
414 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
415 return x;
416 if (op_mode == VOIDmode)
417 op_mode = GET_MODE (op0);
418 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
420 case RTX_EXTRA:
421 if (code == SUBREG)
423 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
424 if (op0 == SUBREG_REG (x))
425 return x;
426 op0 = simplify_gen_subreg (GET_MODE (x), op0,
427 GET_MODE (SUBREG_REG (x)),
428 SUBREG_BYTE (x));
429 return op0 ? op0 : x;
431 break;
433 case RTX_OBJ:
434 if (code == MEM)
436 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
437 if (op0 == XEXP (x, 0))
438 return x;
439 return replace_equiv_address_nv (x, op0);
441 else if (code == LO_SUM)
443 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
444 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
446 /* (lo_sum (high x) x) -> x */
447 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
448 return op1;
450 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
451 return x;
452 return gen_rtx_LO_SUM (mode, op0, op1);
454 break;
456 default:
457 break;
460 newx = x;
461 fmt = GET_RTX_FORMAT (code);
462 for (i = 0; fmt[i]; i++)
463 switch (fmt[i])
465 case 'E':
466 vec = XVEC (x, i);
467 newvec = XVEC (newx, i);
468 for (j = 0; j < GET_NUM_ELEM (vec); j++)
470 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
471 old_rtx, fn, data);
472 if (op != RTVEC_ELT (vec, j))
474 if (newvec == vec)
476 newvec = shallow_copy_rtvec (vec);
477 if (x == newx)
478 newx = shallow_copy_rtx (x);
479 XVEC (newx, i) = newvec;
481 RTVEC_ELT (newvec, j) = op;
484 break;
486 case 'e':
487 if (XEXP (x, i))
489 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
490 if (op != XEXP (x, i))
492 if (x == newx)
493 newx = shallow_copy_rtx (x);
494 XEXP (newx, i) = op;
497 break;
499 return newx;
502 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
503 resulting RTX. Return a new RTX which is as simplified as possible. */
506 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
508 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
511 /* Try to simplify a unary operation CODE whose output mode is to be
512 MODE with input operand OP whose mode was originally OP_MODE.
513 Return zero if no simplification can be made. */
515 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
516 rtx op, enum machine_mode op_mode)
518 rtx trueop, tem;
520 trueop = avoid_constant_pool_reference (op);
522 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
523 if (tem)
524 return tem;
526 return simplify_unary_operation_1 (code, mode, op);
529 /* Perform some simplifications we can do even if the operands
530 aren't constant. */
531 static rtx
532 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
534 enum rtx_code reversed;
535 rtx temp;
537 switch (code)
539 case NOT:
540 /* (not (not X)) == X. */
541 if (GET_CODE (op) == NOT)
542 return XEXP (op, 0);
544 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
545 comparison is all ones. */
546 if (COMPARISON_P (op)
547 && (mode == BImode || STORE_FLAG_VALUE == -1)
548 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
549 return simplify_gen_relational (reversed, mode, VOIDmode,
550 XEXP (op, 0), XEXP (op, 1));
552 /* (not (plus X -1)) can become (neg X). */
553 if (GET_CODE (op) == PLUS
554 && XEXP (op, 1) == constm1_rtx)
555 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
557 /* Similarly, (not (neg X)) is (plus X -1). */
558 if (GET_CODE (op) == NEG)
559 return plus_constant (XEXP (op, 0), -1);
561 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
562 if (GET_CODE (op) == XOR
563 && CONST_INT_P (XEXP (op, 1))
564 && (temp = simplify_unary_operation (NOT, mode,
565 XEXP (op, 1), mode)) != 0)
566 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
568 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
569 if (GET_CODE (op) == PLUS
570 && CONST_INT_P (XEXP (op, 1))
571 && mode_signbit_p (mode, XEXP (op, 1))
572 && (temp = simplify_unary_operation (NOT, mode,
573 XEXP (op, 1), mode)) != 0)
574 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
577 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
578 operands other than 1, but that is not valid. We could do a
579 similar simplification for (not (lshiftrt C X)) where C is
580 just the sign bit, but this doesn't seem common enough to
581 bother with. */
582 if (GET_CODE (op) == ASHIFT
583 && XEXP (op, 0) == const1_rtx)
585 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
586 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
589 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
590 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
591 so we can perform the above simplification. */
593 if (STORE_FLAG_VALUE == -1
594 && GET_CODE (op) == ASHIFTRT
595 && GET_CODE (XEXP (op, 1))
596 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
597 return simplify_gen_relational (GE, mode, VOIDmode,
598 XEXP (op, 0), const0_rtx);
601 if (GET_CODE (op) == SUBREG
602 && subreg_lowpart_p (op)
603 && (GET_MODE_SIZE (GET_MODE (op))
604 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
605 && GET_CODE (SUBREG_REG (op)) == ASHIFT
606 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
608 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
609 rtx x;
611 x = gen_rtx_ROTATE (inner_mode,
612 simplify_gen_unary (NOT, inner_mode, const1_rtx,
613 inner_mode),
614 XEXP (SUBREG_REG (op), 1));
615 return rtl_hooks.gen_lowpart_no_emit (mode, x);
618 /* Apply De Morgan's laws to reduce number of patterns for machines
619 with negating logical insns (and-not, nand, etc.). If result has
620 only one NOT, put it first, since that is how the patterns are
621 coded. */
623 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
625 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
626 enum machine_mode op_mode;
628 op_mode = GET_MODE (in1);
629 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
631 op_mode = GET_MODE (in2);
632 if (op_mode == VOIDmode)
633 op_mode = mode;
634 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
636 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
638 rtx tem = in2;
639 in2 = in1; in1 = tem;
642 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
643 mode, in1, in2);
645 break;
647 case NEG:
648 /* (neg (neg X)) == X. */
649 if (GET_CODE (op) == NEG)
650 return XEXP (op, 0);
652 /* (neg (plus X 1)) can become (not X). */
653 if (GET_CODE (op) == PLUS
654 && XEXP (op, 1) == const1_rtx)
655 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
657 /* Similarly, (neg (not X)) is (plus X 1). */
658 if (GET_CODE (op) == NOT)
659 return plus_constant (XEXP (op, 0), 1);
661 /* (neg (minus X Y)) can become (minus Y X). This transformation
662 isn't safe for modes with signed zeros, since if X and Y are
663 both +0, (minus Y X) is the same as (minus X Y). If the
664 rounding mode is towards +infinity (or -infinity) then the two
665 expressions will be rounded differently. */
666 if (GET_CODE (op) == MINUS
667 && !HONOR_SIGNED_ZEROS (mode)
668 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
669 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
671 if (GET_CODE (op) == PLUS
672 && !HONOR_SIGNED_ZEROS (mode)
673 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
675 /* (neg (plus A C)) is simplified to (minus -C A). */
676 if (CONST_INT_P (XEXP (op, 1))
677 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
679 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
680 if (temp)
681 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
684 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
685 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
686 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
689 /* (neg (mult A B)) becomes (mult (neg A) B).
690 This works even for floating-point values. */
691 if (GET_CODE (op) == MULT
692 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
694 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
695 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
698 /* NEG commutes with ASHIFT since it is multiplication. Only do
699 this if we can then eliminate the NEG (e.g., if the operand
700 is a constant). */
701 if (GET_CODE (op) == ASHIFT)
703 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
704 if (temp)
705 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
708 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
709 C is equal to the width of MODE minus 1. */
710 if (GET_CODE (op) == ASHIFTRT
711 && CONST_INT_P (XEXP (op, 1))
712 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
713 return simplify_gen_binary (LSHIFTRT, mode,
714 XEXP (op, 0), XEXP (op, 1));
716 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
717 C is equal to the width of MODE minus 1. */
718 if (GET_CODE (op) == LSHIFTRT
719 && CONST_INT_P (XEXP (op, 1))
720 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
721 return simplify_gen_binary (ASHIFTRT, mode,
722 XEXP (op, 0), XEXP (op, 1));
724 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
725 if (GET_CODE (op) == XOR
726 && XEXP (op, 1) == const1_rtx
727 && nonzero_bits (XEXP (op, 0), mode) == 1)
728 return plus_constant (XEXP (op, 0), -1);
730 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
731 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
732 if (GET_CODE (op) == LT
733 && XEXP (op, 1) == const0_rtx
734 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
736 enum machine_mode inner = GET_MODE (XEXP (op, 0));
737 int isize = GET_MODE_BITSIZE (inner);
738 if (STORE_FLAG_VALUE == 1)
740 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
741 GEN_INT (isize - 1));
742 if (mode == inner)
743 return temp;
744 if (GET_MODE_BITSIZE (mode) > isize)
745 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
746 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
748 else if (STORE_FLAG_VALUE == -1)
750 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
751 GEN_INT (isize - 1));
752 if (mode == inner)
753 return temp;
754 if (GET_MODE_BITSIZE (mode) > isize)
755 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
756 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
759 break;
761 case TRUNCATE:
762 /* We can't handle truncation to a partial integer mode here
763 because we don't know the real bitsize of the partial
764 integer mode. */
765 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
766 break;
768 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
769 if ((GET_CODE (op) == SIGN_EXTEND
770 || GET_CODE (op) == ZERO_EXTEND)
771 && GET_MODE (XEXP (op, 0)) == mode)
772 return XEXP (op, 0);
774 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
775 (OP:SI foo:SI) if OP is NEG or ABS. */
776 if ((GET_CODE (op) == ABS
777 || GET_CODE (op) == NEG)
778 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
779 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
780 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
781 return simplify_gen_unary (GET_CODE (op), mode,
782 XEXP (XEXP (op, 0), 0), mode);
784 /* (truncate:A (subreg:B (truncate:C X) 0)) is
785 (truncate:A X). */
786 if (GET_CODE (op) == SUBREG
787 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
788 && subreg_lowpart_p (op))
789 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
790 GET_MODE (XEXP (SUBREG_REG (op), 0)));
792 /* If we know that the value is already truncated, we can
793 replace the TRUNCATE with a SUBREG. Note that this is also
794 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
795 modes we just have to apply a different definition for
796 truncation. But don't do this for an (LSHIFTRT (MULT ...))
797 since this will cause problems with the umulXi3_highpart
798 patterns. */
799 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
800 GET_MODE_BITSIZE (GET_MODE (op)))
801 ? (num_sign_bit_copies (op, GET_MODE (op))
802 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
803 - GET_MODE_BITSIZE (mode)))
804 : truncated_to_mode (mode, op))
805 && ! (GET_CODE (op) == LSHIFTRT
806 && GET_CODE (XEXP (op, 0)) == MULT))
807 return rtl_hooks.gen_lowpart_no_emit (mode, op);
809 /* A truncate of a comparison can be replaced with a subreg if
810 STORE_FLAG_VALUE permits. This is like the previous test,
811 but it works even if the comparison is done in a mode larger
812 than HOST_BITS_PER_WIDE_INT. */
813 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
814 && COMPARISON_P (op)
815 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
816 return rtl_hooks.gen_lowpart_no_emit (mode, op);
817 break;
819 case FLOAT_TRUNCATE:
820 if (DECIMAL_FLOAT_MODE_P (mode))
821 break;
823 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
824 if (GET_CODE (op) == FLOAT_EXTEND
825 && GET_MODE (XEXP (op, 0)) == mode)
826 return XEXP (op, 0);
828 /* (float_truncate:SF (float_truncate:DF foo:XF))
829 = (float_truncate:SF foo:XF).
830 This may eliminate double rounding, so it is unsafe.
832 (float_truncate:SF (float_extend:XF foo:DF))
833 = (float_truncate:SF foo:DF).
835 (float_truncate:DF (float_extend:XF foo:SF))
836 = (float_extend:SF foo:DF). */
837 if ((GET_CODE (op) == FLOAT_TRUNCATE
838 && flag_unsafe_math_optimizations)
839 || GET_CODE (op) == FLOAT_EXTEND)
840 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
841 0)))
842 > GET_MODE_SIZE (mode)
843 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
844 mode,
845 XEXP (op, 0), mode);
847 /* (float_truncate (float x)) is (float x) */
848 if (GET_CODE (op) == FLOAT
849 && (flag_unsafe_math_optimizations
850 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
851 && ((unsigned)significand_size (GET_MODE (op))
852 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
853 - num_sign_bit_copies (XEXP (op, 0),
854 GET_MODE (XEXP (op, 0))))))))
855 return simplify_gen_unary (FLOAT, mode,
856 XEXP (op, 0),
857 GET_MODE (XEXP (op, 0)));
859 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
860 (OP:SF foo:SF) if OP is NEG or ABS. */
861 if ((GET_CODE (op) == ABS
862 || GET_CODE (op) == NEG)
863 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
864 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
865 return simplify_gen_unary (GET_CODE (op), mode,
866 XEXP (XEXP (op, 0), 0), mode);
868 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
869 is (float_truncate:SF x). */
870 if (GET_CODE (op) == SUBREG
871 && subreg_lowpart_p (op)
872 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
873 return SUBREG_REG (op);
874 break;
876 case FLOAT_EXTEND:
877 if (DECIMAL_FLOAT_MODE_P (mode))
878 break;
880 /* (float_extend (float_extend x)) is (float_extend x)
882 (float_extend (float x)) is (float x) assuming that double
883 rounding can't happen.
885 if (GET_CODE (op) == FLOAT_EXTEND
886 || (GET_CODE (op) == FLOAT
887 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
888 && ((unsigned)significand_size (GET_MODE (op))
889 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
890 - num_sign_bit_copies (XEXP (op, 0),
891 GET_MODE (XEXP (op, 0)))))))
892 return simplify_gen_unary (GET_CODE (op), mode,
893 XEXP (op, 0),
894 GET_MODE (XEXP (op, 0)));
896 break;
898 case ABS:
899 /* (abs (neg <foo>)) -> (abs <foo>) */
900 if (GET_CODE (op) == NEG)
901 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
902 GET_MODE (XEXP (op, 0)));
904 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
905 do nothing. */
906 if (GET_MODE (op) == VOIDmode)
907 break;
909 /* If operand is something known to be positive, ignore the ABS. */
910 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
911 || ((GET_MODE_BITSIZE (GET_MODE (op))
912 <= HOST_BITS_PER_WIDE_INT)
913 && ((nonzero_bits (op, GET_MODE (op))
914 & ((HOST_WIDE_INT) 1
915 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
916 == 0)))
917 return op;
919 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
920 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
921 return gen_rtx_NEG (mode, op);
923 break;
925 case FFS:
926 /* (ffs (*_extend <X>)) = (ffs <X>) */
927 if (GET_CODE (op) == SIGN_EXTEND
928 || GET_CODE (op) == ZERO_EXTEND)
929 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
930 GET_MODE (XEXP (op, 0)));
931 break;
933 case POPCOUNT:
934 switch (GET_CODE (op))
936 case BSWAP:
937 case ZERO_EXTEND:
938 /* (popcount (zero_extend <X>)) = (popcount <X>) */
939 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
940 GET_MODE (XEXP (op, 0)));
942 case ROTATE:
943 case ROTATERT:
944 /* Rotations don't affect popcount. */
945 if (!side_effects_p (XEXP (op, 1)))
946 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
947 GET_MODE (XEXP (op, 0)));
948 break;
950 default:
951 break;
953 break;
955 case PARITY:
956 switch (GET_CODE (op))
958 case NOT:
959 case BSWAP:
960 case ZERO_EXTEND:
961 case SIGN_EXTEND:
962 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
963 GET_MODE (XEXP (op, 0)));
965 case ROTATE:
966 case ROTATERT:
967 /* Rotations don't affect parity. */
968 if (!side_effects_p (XEXP (op, 1)))
969 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
970 GET_MODE (XEXP (op, 0)));
971 break;
973 default:
974 break;
976 break;
978 case BSWAP:
979 /* (bswap (bswap x)) -> x. */
980 if (GET_CODE (op) == BSWAP)
981 return XEXP (op, 0);
982 break;
984 case FLOAT:
985 /* (float (sign_extend <X>)) = (float <X>). */
986 if (GET_CODE (op) == SIGN_EXTEND)
987 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
988 GET_MODE (XEXP (op, 0)));
989 break;
991 case SIGN_EXTEND:
992 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
993 becomes just the MINUS if its mode is MODE. This allows
994 folding switch statements on machines using casesi (such as
995 the VAX). */
996 if (GET_CODE (op) == TRUNCATE
997 && GET_MODE (XEXP (op, 0)) == mode
998 && GET_CODE (XEXP (op, 0)) == MINUS
999 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1000 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1001 return XEXP (op, 0);
1003 /* Check for a sign extension of a subreg of a promoted
1004 variable, where the promotion is sign-extended, and the
1005 target mode is the same as the variable's promotion. */
1006 if (GET_CODE (op) == SUBREG
1007 && SUBREG_PROMOTED_VAR_P (op)
1008 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1009 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1010 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1012 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1013 /* As we do not know which address space the pointer is refering to,
1014 we can do this only if the target does not support different pointer
1015 or address modes depending on the address space. */
1016 if (target_default_pointer_address_modes_p ()
1017 && ! POINTERS_EXTEND_UNSIGNED
1018 && mode == Pmode && GET_MODE (op) == ptr_mode
1019 && (CONSTANT_P (op)
1020 || (GET_CODE (op) == SUBREG
1021 && REG_P (SUBREG_REG (op))
1022 && REG_POINTER (SUBREG_REG (op))
1023 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1024 return convert_memory_address (Pmode, op);
1025 #endif
1026 break;
1028 case ZERO_EXTEND:
1029 /* Check for a zero extension of a subreg of a promoted
1030 variable, where the promotion is zero-extended, and the
1031 target mode is the same as the variable's promotion. */
1032 if (GET_CODE (op) == SUBREG
1033 && SUBREG_PROMOTED_VAR_P (op)
1034 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1035 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1036 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1038 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1039 /* As we do not know which address space the pointer is refering to,
1040 we can do this only if the target does not support different pointer
1041 or address modes depending on the address space. */
1042 if (target_default_pointer_address_modes_p ()
1043 && POINTERS_EXTEND_UNSIGNED > 0
1044 && mode == Pmode && GET_MODE (op) == ptr_mode
1045 && (CONSTANT_P (op)
1046 || (GET_CODE (op) == SUBREG
1047 && REG_P (SUBREG_REG (op))
1048 && REG_POINTER (SUBREG_REG (op))
1049 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1050 return convert_memory_address (Pmode, op);
1051 #endif
1052 break;
1054 default:
1055 break;
1058 return 0;
1061 /* Try to compute the value of a unary operation CODE whose output mode is to
1062 be MODE with input operand OP whose mode was originally OP_MODE.
1063 Return zero if the value cannot be computed. */
1065 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1066 rtx op, enum machine_mode op_mode)
1068 unsigned int width = GET_MODE_BITSIZE (mode);
1070 if (code == VEC_DUPLICATE)
1072 gcc_assert (VECTOR_MODE_P (mode));
1073 if (GET_MODE (op) != VOIDmode)
1075 if (!VECTOR_MODE_P (GET_MODE (op)))
1076 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1077 else
1078 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1079 (GET_MODE (op)));
1081 if (CONST_INT_P (op) || GET_CODE (op) == CONST_DOUBLE
1082 || GET_CODE (op) == CONST_VECTOR)
1084 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1085 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1086 rtvec v = rtvec_alloc (n_elts);
1087 unsigned int i;
1089 if (GET_CODE (op) != CONST_VECTOR)
1090 for (i = 0; i < n_elts; i++)
1091 RTVEC_ELT (v, i) = op;
1092 else
1094 enum machine_mode inmode = GET_MODE (op);
1095 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1096 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1098 gcc_assert (in_n_elts < n_elts);
1099 gcc_assert ((n_elts % in_n_elts) == 0);
1100 for (i = 0; i < n_elts; i++)
1101 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1103 return gen_rtx_CONST_VECTOR (mode, v);
1107 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1109 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1110 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1111 enum machine_mode opmode = GET_MODE (op);
1112 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1113 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1114 rtvec v = rtvec_alloc (n_elts);
1115 unsigned int i;
1117 gcc_assert (op_n_elts == n_elts);
1118 for (i = 0; i < n_elts; i++)
1120 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1121 CONST_VECTOR_ELT (op, i),
1122 GET_MODE_INNER (opmode));
1123 if (!x)
1124 return 0;
1125 RTVEC_ELT (v, i) = x;
1127 return gen_rtx_CONST_VECTOR (mode, v);
1130 /* The order of these tests is critical so that, for example, we don't
1131 check the wrong mode (input vs. output) for a conversion operation,
1132 such as FIX. At some point, this should be simplified. */
1134 if (code == FLOAT && GET_MODE (op) == VOIDmode
1135 && (GET_CODE (op) == CONST_DOUBLE || CONST_INT_P (op)))
1137 HOST_WIDE_INT hv, lv;
1138 REAL_VALUE_TYPE d;
1140 if (CONST_INT_P (op))
1141 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1142 else
1143 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1145 REAL_VALUE_FROM_INT (d, lv, hv, mode);
1146 d = real_value_truncate (mode, d);
1147 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1149 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
1150 && (GET_CODE (op) == CONST_DOUBLE
1151 || CONST_INT_P (op)))
1153 HOST_WIDE_INT hv, lv;
1154 REAL_VALUE_TYPE d;
1156 if (CONST_INT_P (op))
1157 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1158 else
1159 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1161 if (op_mode == VOIDmode)
1163 /* We don't know how to interpret negative-looking numbers in
1164 this case, so don't try to fold those. */
1165 if (hv < 0)
1166 return 0;
1168 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
1170 else
1171 hv = 0, lv &= GET_MODE_MASK (op_mode);
1173 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1174 d = real_value_truncate (mode, d);
1175 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1178 if (CONST_INT_P (op)
1179 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1181 HOST_WIDE_INT arg0 = INTVAL (op);
1182 HOST_WIDE_INT val;
1184 switch (code)
1186 case NOT:
1187 val = ~ arg0;
1188 break;
1190 case NEG:
1191 val = - arg0;
1192 break;
1194 case ABS:
1195 val = (arg0 >= 0 ? arg0 : - arg0);
1196 break;
1198 case FFS:
1199 /* Don't use ffs here. Instead, get low order bit and then its
1200 number. If arg0 is zero, this will return 0, as desired. */
1201 arg0 &= GET_MODE_MASK (mode);
1202 val = exact_log2 (arg0 & (- arg0)) + 1;
1203 break;
1205 case CLZ:
1206 arg0 &= GET_MODE_MASK (mode);
1207 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1209 else
1210 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1211 break;
1213 case CTZ:
1214 arg0 &= GET_MODE_MASK (mode);
1215 if (arg0 == 0)
1217 /* Even if the value at zero is undefined, we have to come
1218 up with some replacement. Seems good enough. */
1219 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1220 val = GET_MODE_BITSIZE (mode);
1222 else
1223 val = exact_log2 (arg0 & -arg0);
1224 break;
1226 case POPCOUNT:
1227 arg0 &= GET_MODE_MASK (mode);
1228 val = 0;
1229 while (arg0)
1230 val++, arg0 &= arg0 - 1;
1231 break;
1233 case PARITY:
1234 arg0 &= GET_MODE_MASK (mode);
1235 val = 0;
1236 while (arg0)
1237 val++, arg0 &= arg0 - 1;
1238 val &= 1;
1239 break;
1241 case BSWAP:
1243 unsigned int s;
1245 val = 0;
1246 for (s = 0; s < width; s += 8)
1248 unsigned int d = width - s - 8;
1249 unsigned HOST_WIDE_INT byte;
1250 byte = (arg0 >> s) & 0xff;
1251 val |= byte << d;
1254 break;
1256 case TRUNCATE:
1257 val = arg0;
1258 break;
1260 case ZERO_EXTEND:
1261 /* When zero-extending a CONST_INT, we need to know its
1262 original mode. */
1263 gcc_assert (op_mode != VOIDmode);
1264 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1266 /* If we were really extending the mode,
1267 we would have to distinguish between zero-extension
1268 and sign-extension. */
1269 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1270 val = arg0;
1272 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1273 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1274 else
1275 return 0;
1276 break;
1278 case SIGN_EXTEND:
1279 if (op_mode == VOIDmode)
1280 op_mode = mode;
1281 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1283 /* If we were really extending the mode,
1284 we would have to distinguish between zero-extension
1285 and sign-extension. */
1286 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1287 val = arg0;
1289 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1292 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1293 if (val
1294 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1295 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1297 else
1298 return 0;
1299 break;
1301 case SQRT:
1302 case FLOAT_EXTEND:
1303 case FLOAT_TRUNCATE:
1304 case SS_TRUNCATE:
1305 case US_TRUNCATE:
1306 case SS_NEG:
1307 case US_NEG:
1308 case SS_ABS:
1309 return 0;
1311 default:
1312 gcc_unreachable ();
1315 return gen_int_mode (val, mode);
1318 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1319 for a DImode operation on a CONST_INT. */
1320 else if (GET_MODE (op) == VOIDmode
1321 && width <= HOST_BITS_PER_WIDE_INT * 2
1322 && (GET_CODE (op) == CONST_DOUBLE
1323 || CONST_INT_P (op)))
1325 unsigned HOST_WIDE_INT l1, lv;
1326 HOST_WIDE_INT h1, hv;
1328 if (GET_CODE (op) == CONST_DOUBLE)
1329 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1330 else
1331 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1333 switch (code)
1335 case NOT:
1336 lv = ~ l1;
1337 hv = ~ h1;
1338 break;
1340 case NEG:
1341 neg_double (l1, h1, &lv, &hv);
1342 break;
1344 case ABS:
1345 if (h1 < 0)
1346 neg_double (l1, h1, &lv, &hv);
1347 else
1348 lv = l1, hv = h1;
1349 break;
1351 case FFS:
1352 hv = 0;
1353 if (l1 == 0)
1355 if (h1 == 0)
1356 lv = 0;
1357 else
1358 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1360 else
1361 lv = exact_log2 (l1 & -l1) + 1;
1362 break;
1364 case CLZ:
1365 hv = 0;
1366 if (h1 != 0)
1367 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1368 - HOST_BITS_PER_WIDE_INT;
1369 else if (l1 != 0)
1370 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1371 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1372 lv = GET_MODE_BITSIZE (mode);
1373 break;
1375 case CTZ:
1376 hv = 0;
1377 if (l1 != 0)
1378 lv = exact_log2 (l1 & -l1);
1379 else if (h1 != 0)
1380 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1381 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1382 lv = GET_MODE_BITSIZE (mode);
1383 break;
1385 case POPCOUNT:
1386 hv = 0;
1387 lv = 0;
1388 while (l1)
1389 lv++, l1 &= l1 - 1;
1390 while (h1)
1391 lv++, h1 &= h1 - 1;
1392 break;
1394 case PARITY:
1395 hv = 0;
1396 lv = 0;
1397 while (l1)
1398 lv++, l1 &= l1 - 1;
1399 while (h1)
1400 lv++, h1 &= h1 - 1;
1401 lv &= 1;
1402 break;
1404 case BSWAP:
1406 unsigned int s;
1408 hv = 0;
1409 lv = 0;
1410 for (s = 0; s < width; s += 8)
1412 unsigned int d = width - s - 8;
1413 unsigned HOST_WIDE_INT byte;
1415 if (s < HOST_BITS_PER_WIDE_INT)
1416 byte = (l1 >> s) & 0xff;
1417 else
1418 byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1420 if (d < HOST_BITS_PER_WIDE_INT)
1421 lv |= byte << d;
1422 else
1423 hv |= byte << (d - HOST_BITS_PER_WIDE_INT);
1426 break;
1428 case TRUNCATE:
1429 /* This is just a change-of-mode, so do nothing. */
1430 lv = l1, hv = h1;
1431 break;
1433 case ZERO_EXTEND:
1434 gcc_assert (op_mode != VOIDmode);
1436 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1437 return 0;
1439 hv = 0;
1440 lv = l1 & GET_MODE_MASK (op_mode);
1441 break;
1443 case SIGN_EXTEND:
1444 if (op_mode == VOIDmode
1445 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1446 return 0;
1447 else
1449 lv = l1 & GET_MODE_MASK (op_mode);
1450 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1451 && (lv & ((HOST_WIDE_INT) 1
1452 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1453 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1455 hv = HWI_SIGN_EXTEND (lv);
1457 break;
1459 case SQRT:
1460 return 0;
1462 default:
1463 return 0;
1466 return immed_double_const (lv, hv, mode);
1469 else if (GET_CODE (op) == CONST_DOUBLE
1470 && SCALAR_FLOAT_MODE_P (mode))
1472 REAL_VALUE_TYPE d, t;
1473 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1475 switch (code)
1477 case SQRT:
1478 if (HONOR_SNANS (mode) && real_isnan (&d))
1479 return 0;
1480 real_sqrt (&t, mode, &d);
1481 d = t;
1482 break;
1483 case ABS:
1484 d = real_value_abs (&d);
1485 break;
1486 case NEG:
1487 d = real_value_negate (&d);
1488 break;
1489 case FLOAT_TRUNCATE:
1490 d = real_value_truncate (mode, d);
1491 break;
1492 case FLOAT_EXTEND:
1493 /* All this does is change the mode. */
1494 break;
1495 case FIX:
1496 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1497 break;
1498 case NOT:
1500 long tmp[4];
1501 int i;
1503 real_to_target (tmp, &d, GET_MODE (op));
1504 for (i = 0; i < 4; i++)
1505 tmp[i] = ~tmp[i];
1506 real_from_target (&d, tmp, mode);
1507 break;
1509 default:
1510 gcc_unreachable ();
1512 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1515 else if (GET_CODE (op) == CONST_DOUBLE
1516 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1517 && GET_MODE_CLASS (mode) == MODE_INT
1518 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1520 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1521 operators are intentionally left unspecified (to ease implementation
1522 by target backends), for consistency, this routine implements the
1523 same semantics for constant folding as used by the middle-end. */
1525 /* This was formerly used only for non-IEEE float.
1526 eggert@twinsun.com says it is safe for IEEE also. */
1527 HOST_WIDE_INT xh, xl, th, tl;
1528 REAL_VALUE_TYPE x, t;
1529 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1530 switch (code)
1532 case FIX:
1533 if (REAL_VALUE_ISNAN (x))
1534 return const0_rtx;
1536 /* Test against the signed upper bound. */
1537 if (width > HOST_BITS_PER_WIDE_INT)
1539 th = ((unsigned HOST_WIDE_INT) 1
1540 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1541 tl = -1;
1543 else
1545 th = 0;
1546 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1548 real_from_integer (&t, VOIDmode, tl, th, 0);
1549 if (REAL_VALUES_LESS (t, x))
1551 xh = th;
1552 xl = tl;
1553 break;
1556 /* Test against the signed lower bound. */
1557 if (width > HOST_BITS_PER_WIDE_INT)
1559 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1560 tl = 0;
1562 else
1564 th = -1;
1565 tl = (HOST_WIDE_INT) -1 << (width - 1);
1567 real_from_integer (&t, VOIDmode, tl, th, 0);
1568 if (REAL_VALUES_LESS (x, t))
1570 xh = th;
1571 xl = tl;
1572 break;
1574 REAL_VALUE_TO_INT (&xl, &xh, x);
1575 break;
1577 case UNSIGNED_FIX:
1578 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1579 return const0_rtx;
1581 /* Test against the unsigned upper bound. */
1582 if (width == 2*HOST_BITS_PER_WIDE_INT)
1584 th = -1;
1585 tl = -1;
1587 else if (width >= HOST_BITS_PER_WIDE_INT)
1589 th = ((unsigned HOST_WIDE_INT) 1
1590 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1591 tl = -1;
1593 else
1595 th = 0;
1596 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1598 real_from_integer (&t, VOIDmode, tl, th, 1);
1599 if (REAL_VALUES_LESS (t, x))
1601 xh = th;
1602 xl = tl;
1603 break;
1606 REAL_VALUE_TO_INT (&xl, &xh, x);
1607 break;
1609 default:
1610 gcc_unreachable ();
1612 return immed_double_const (xl, xh, mode);
1615 return NULL_RTX;
1618 /* Subroutine of simplify_binary_operation to simplify a commutative,
1619 associative binary operation CODE with result mode MODE, operating
1620 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1621 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1622 canonicalization is possible. */
1624 static rtx
1625 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1626 rtx op0, rtx op1)
1628 rtx tem;
1630 /* Linearize the operator to the left. */
1631 if (GET_CODE (op1) == code)
1633 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1634 if (GET_CODE (op0) == code)
1636 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1637 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1640 /* "a op (b op c)" becomes "(b op c) op a". */
1641 if (! swap_commutative_operands_p (op1, op0))
1642 return simplify_gen_binary (code, mode, op1, op0);
1644 tem = op0;
1645 op0 = op1;
1646 op1 = tem;
1649 if (GET_CODE (op0) == code)
1651 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1652 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1654 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1655 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1658 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1659 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1660 if (tem != 0)
1661 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1663 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1664 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1665 if (tem != 0)
1666 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1669 return 0;
1673 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1674 and OP1. Return 0 if no simplification is possible.
1676 Don't use this for relational operations such as EQ or LT.
1677 Use simplify_relational_operation instead. */
1679 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1680 rtx op0, rtx op1)
1682 rtx trueop0, trueop1;
1683 rtx tem;
1685 /* Relational operations don't work here. We must know the mode
1686 of the operands in order to do the comparison correctly.
1687 Assuming a full word can give incorrect results.
1688 Consider comparing 128 with -128 in QImode. */
1689 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1690 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1692 /* Make sure the constant is second. */
1693 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1694 && swap_commutative_operands_p (op0, op1))
1696 tem = op0, op0 = op1, op1 = tem;
1699 trueop0 = avoid_constant_pool_reference (op0);
1700 trueop1 = avoid_constant_pool_reference (op1);
1702 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1703 if (tem)
1704 return tem;
1705 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1708 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1709 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1710 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1711 actual constants. */
1713 static rtx
1714 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1715 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1717 rtx tem, reversed, opleft, opright;
1718 HOST_WIDE_INT val;
1719 unsigned int width = GET_MODE_BITSIZE (mode);
1721 /* Even if we can't compute a constant result,
1722 there are some cases worth simplifying. */
1724 switch (code)
1726 case PLUS:
1727 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1728 when x is NaN, infinite, or finite and nonzero. They aren't
1729 when x is -0 and the rounding mode is not towards -infinity,
1730 since (-0) + 0 is then 0. */
1731 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1732 return op0;
1734 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1735 transformations are safe even for IEEE. */
1736 if (GET_CODE (op0) == NEG)
1737 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1738 else if (GET_CODE (op1) == NEG)
1739 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1741 /* (~a) + 1 -> -a */
1742 if (INTEGRAL_MODE_P (mode)
1743 && GET_CODE (op0) == NOT
1744 && trueop1 == const1_rtx)
1745 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1747 /* Handle both-operands-constant cases. We can only add
1748 CONST_INTs to constants since the sum of relocatable symbols
1749 can't be handled by most assemblers. Don't add CONST_INT
1750 to CONST_INT since overflow won't be computed properly if wider
1751 than HOST_BITS_PER_WIDE_INT. */
1753 if ((GET_CODE (op0) == CONST
1754 || GET_CODE (op0) == SYMBOL_REF
1755 || GET_CODE (op0) == LABEL_REF)
1756 && CONST_INT_P (op1))
1757 return plus_constant (op0, INTVAL (op1));
1758 else if ((GET_CODE (op1) == CONST
1759 || GET_CODE (op1) == SYMBOL_REF
1760 || GET_CODE (op1) == LABEL_REF)
1761 && CONST_INT_P (op0))
1762 return plus_constant (op1, INTVAL (op0));
1764 /* See if this is something like X * C - X or vice versa or
1765 if the multiplication is written as a shift. If so, we can
1766 distribute and make a new multiply, shift, or maybe just
1767 have X (if C is 2 in the example above). But don't make
1768 something more expensive than we had before. */
1770 if (SCALAR_INT_MODE_P (mode))
1772 double_int coeff0, coeff1;
1773 rtx lhs = op0, rhs = op1;
1775 coeff0 = double_int_one;
1776 coeff1 = double_int_one;
1778 if (GET_CODE (lhs) == NEG)
1780 coeff0 = double_int_minus_one;
1781 lhs = XEXP (lhs, 0);
1783 else if (GET_CODE (lhs) == MULT
1784 && CONST_INT_P (XEXP (lhs, 1)))
1786 coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
1787 lhs = XEXP (lhs, 0);
1789 else if (GET_CODE (lhs) == ASHIFT
1790 && CONST_INT_P (XEXP (lhs, 1))
1791 && INTVAL (XEXP (lhs, 1)) >= 0
1792 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1794 coeff0 = double_int_setbit (double_int_zero,
1795 INTVAL (XEXP (lhs, 1)));
1796 lhs = XEXP (lhs, 0);
1799 if (GET_CODE (rhs) == NEG)
1801 coeff1 = double_int_minus_one;
1802 rhs = XEXP (rhs, 0);
1804 else if (GET_CODE (rhs) == MULT
1805 && CONST_INT_P (XEXP (rhs, 1)))
1807 coeff1 = shwi_to_double_int (INTVAL (XEXP (rhs, 1)));
1808 rhs = XEXP (rhs, 0);
1810 else if (GET_CODE (rhs) == ASHIFT
1811 && CONST_INT_P (XEXP (rhs, 1))
1812 && INTVAL (XEXP (rhs, 1)) >= 0
1813 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1815 coeff1 = double_int_setbit (double_int_zero,
1816 INTVAL (XEXP (rhs, 1)));
1817 rhs = XEXP (rhs, 0);
1820 if (rtx_equal_p (lhs, rhs))
1822 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1823 rtx coeff;
1824 double_int val;
1825 bool speed = optimize_function_for_speed_p (cfun);
1827 val = double_int_add (coeff0, coeff1);
1828 coeff = immed_double_int_const (val, mode);
1830 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1831 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
1832 ? tem : 0;
1836 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1837 if ((CONST_INT_P (op1)
1838 || GET_CODE (op1) == CONST_DOUBLE)
1839 && GET_CODE (op0) == XOR
1840 && (CONST_INT_P (XEXP (op0, 1))
1841 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1842 && mode_signbit_p (mode, op1))
1843 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1844 simplify_gen_binary (XOR, mode, op1,
1845 XEXP (op0, 1)));
1847 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1848 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1849 && GET_CODE (op0) == MULT
1850 && GET_CODE (XEXP (op0, 0)) == NEG)
1852 rtx in1, in2;
1854 in1 = XEXP (XEXP (op0, 0), 0);
1855 in2 = XEXP (op0, 1);
1856 return simplify_gen_binary (MINUS, mode, op1,
1857 simplify_gen_binary (MULT, mode,
1858 in1, in2));
1861 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1862 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1863 is 1. */
1864 if (COMPARISON_P (op0)
1865 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1866 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1867 && (reversed = reversed_comparison (op0, mode)))
1868 return
1869 simplify_gen_unary (NEG, mode, reversed, mode);
1871 /* If one of the operands is a PLUS or a MINUS, see if we can
1872 simplify this by the associative law.
1873 Don't use the associative law for floating point.
1874 The inaccuracy makes it nonassociative,
1875 and subtle programs can break if operations are associated. */
1877 if (INTEGRAL_MODE_P (mode)
1878 && (plus_minus_operand_p (op0)
1879 || plus_minus_operand_p (op1))
1880 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1881 return tem;
1883 /* Reassociate floating point addition only when the user
1884 specifies associative math operations. */
1885 if (FLOAT_MODE_P (mode)
1886 && flag_associative_math)
1888 tem = simplify_associative_operation (code, mode, op0, op1);
1889 if (tem)
1890 return tem;
1892 break;
1894 case COMPARE:
1895 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1896 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1897 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1898 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1900 rtx xop00 = XEXP (op0, 0);
1901 rtx xop10 = XEXP (op1, 0);
1903 #ifdef HAVE_cc0
1904 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1905 #else
1906 if (REG_P (xop00) && REG_P (xop10)
1907 && GET_MODE (xop00) == GET_MODE (xop10)
1908 && REGNO (xop00) == REGNO (xop10)
1909 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1910 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1911 #endif
1912 return xop00;
1914 break;
1916 case MINUS:
1917 /* We can't assume x-x is 0 even with non-IEEE floating point,
1918 but since it is zero except in very strange circumstances, we
1919 will treat it as zero with -ffinite-math-only. */
1920 if (rtx_equal_p (trueop0, trueop1)
1921 && ! side_effects_p (op0)
1922 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
1923 return CONST0_RTX (mode);
1925 /* Change subtraction from zero into negation. (0 - x) is the
1926 same as -x when x is NaN, infinite, or finite and nonzero.
1927 But if the mode has signed zeros, and does not round towards
1928 -infinity, then 0 - 0 is 0, not -0. */
1929 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1930 return simplify_gen_unary (NEG, mode, op1, mode);
1932 /* (-1 - a) is ~a. */
1933 if (trueop0 == constm1_rtx)
1934 return simplify_gen_unary (NOT, mode, op1, mode);
1936 /* Subtracting 0 has no effect unless the mode has signed zeros
1937 and supports rounding towards -infinity. In such a case,
1938 0 - 0 is -0. */
1939 if (!(HONOR_SIGNED_ZEROS (mode)
1940 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1941 && trueop1 == CONST0_RTX (mode))
1942 return op0;
1944 /* See if this is something like X * C - X or vice versa or
1945 if the multiplication is written as a shift. If so, we can
1946 distribute and make a new multiply, shift, or maybe just
1947 have X (if C is 2 in the example above). But don't make
1948 something more expensive than we had before. */
1950 if (SCALAR_INT_MODE_P (mode))
1952 double_int coeff0, negcoeff1;
1953 rtx lhs = op0, rhs = op1;
1955 coeff0 = double_int_one;
1956 negcoeff1 = double_int_minus_one;
1958 if (GET_CODE (lhs) == NEG)
1960 coeff0 = double_int_minus_one;
1961 lhs = XEXP (lhs, 0);
1963 else if (GET_CODE (lhs) == MULT
1964 && CONST_INT_P (XEXP (lhs, 1)))
1966 coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
1967 lhs = XEXP (lhs, 0);
1969 else if (GET_CODE (lhs) == ASHIFT
1970 && CONST_INT_P (XEXP (lhs, 1))
1971 && INTVAL (XEXP (lhs, 1)) >= 0
1972 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1974 coeff0 = double_int_setbit (double_int_zero,
1975 INTVAL (XEXP (lhs, 1)));
1976 lhs = XEXP (lhs, 0);
1979 if (GET_CODE (rhs) == NEG)
1981 negcoeff1 = double_int_one;
1982 rhs = XEXP (rhs, 0);
1984 else if (GET_CODE (rhs) == MULT
1985 && CONST_INT_P (XEXP (rhs, 1)))
1987 negcoeff1 = shwi_to_double_int (-INTVAL (XEXP (rhs, 1)));
1988 rhs = XEXP (rhs, 0);
1990 else if (GET_CODE (rhs) == ASHIFT
1991 && CONST_INT_P (XEXP (rhs, 1))
1992 && INTVAL (XEXP (rhs, 1)) >= 0
1993 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1995 negcoeff1 = double_int_setbit (double_int_zero,
1996 INTVAL (XEXP (rhs, 1)));
1997 negcoeff1 = double_int_neg (negcoeff1);
1998 rhs = XEXP (rhs, 0);
2001 if (rtx_equal_p (lhs, rhs))
2003 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2004 rtx coeff;
2005 double_int val;
2006 bool speed = optimize_function_for_speed_p (cfun);
2008 val = double_int_add (coeff0, negcoeff1);
2009 coeff = immed_double_int_const (val, mode);
2011 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2012 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
2013 ? tem : 0;
2017 /* (a - (-b)) -> (a + b). True even for IEEE. */
2018 if (GET_CODE (op1) == NEG)
2019 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2021 /* (-x - c) may be simplified as (-c - x). */
2022 if (GET_CODE (op0) == NEG
2023 && (CONST_INT_P (op1)
2024 || GET_CODE (op1) == CONST_DOUBLE))
2026 tem = simplify_unary_operation (NEG, mode, op1, mode);
2027 if (tem)
2028 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2031 /* Don't let a relocatable value get a negative coeff. */
2032 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2033 return simplify_gen_binary (PLUS, mode,
2034 op0,
2035 neg_const_int (mode, op1));
2037 /* (x - (x & y)) -> (x & ~y) */
2038 if (GET_CODE (op1) == AND)
2040 if (rtx_equal_p (op0, XEXP (op1, 0)))
2042 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2043 GET_MODE (XEXP (op1, 1)));
2044 return simplify_gen_binary (AND, mode, op0, tem);
2046 if (rtx_equal_p (op0, XEXP (op1, 1)))
2048 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2049 GET_MODE (XEXP (op1, 0)));
2050 return simplify_gen_binary (AND, mode, op0, tem);
2054 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2055 by reversing the comparison code if valid. */
2056 if (STORE_FLAG_VALUE == 1
2057 && trueop0 == const1_rtx
2058 && COMPARISON_P (op1)
2059 && (reversed = reversed_comparison (op1, mode)))
2060 return reversed;
2062 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2063 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2064 && GET_CODE (op1) == MULT
2065 && GET_CODE (XEXP (op1, 0)) == NEG)
2067 rtx in1, in2;
2069 in1 = XEXP (XEXP (op1, 0), 0);
2070 in2 = XEXP (op1, 1);
2071 return simplify_gen_binary (PLUS, mode,
2072 simplify_gen_binary (MULT, mode,
2073 in1, in2),
2074 op0);
2077 /* Canonicalize (minus (neg A) (mult B C)) to
2078 (minus (mult (neg B) C) A). */
2079 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2080 && GET_CODE (op1) == MULT
2081 && GET_CODE (op0) == NEG)
2083 rtx in1, in2;
2085 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2086 in2 = XEXP (op1, 1);
2087 return simplify_gen_binary (MINUS, mode,
2088 simplify_gen_binary (MULT, mode,
2089 in1, in2),
2090 XEXP (op0, 0));
2093 /* If one of the operands is a PLUS or a MINUS, see if we can
2094 simplify this by the associative law. This will, for example,
2095 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2096 Don't use the associative law for floating point.
2097 The inaccuracy makes it nonassociative,
2098 and subtle programs can break if operations are associated. */
2100 if (INTEGRAL_MODE_P (mode)
2101 && (plus_minus_operand_p (op0)
2102 || plus_minus_operand_p (op1))
2103 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2104 return tem;
2105 break;
2107 case MULT:
2108 if (trueop1 == constm1_rtx)
2109 return simplify_gen_unary (NEG, mode, op0, mode);
2111 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2112 x is NaN, since x * 0 is then also NaN. Nor is it valid
2113 when the mode has signed zeros, since multiplying a negative
2114 number by 0 will give -0, not 0. */
2115 if (!HONOR_NANS (mode)
2116 && !HONOR_SIGNED_ZEROS (mode)
2117 && trueop1 == CONST0_RTX (mode)
2118 && ! side_effects_p (op0))
2119 return op1;
2121 /* In IEEE floating point, x*1 is not equivalent to x for
2122 signalling NaNs. */
2123 if (!HONOR_SNANS (mode)
2124 && trueop1 == CONST1_RTX (mode))
2125 return op0;
2127 /* Convert multiply by constant power of two into shift unless
2128 we are still generating RTL. This test is a kludge. */
2129 if (CONST_INT_P (trueop1)
2130 && (val = exact_log2 (INTVAL (trueop1))) >= 0
2131 /* If the mode is larger than the host word size, and the
2132 uppermost bit is set, then this isn't a power of two due
2133 to implicit sign extension. */
2134 && (width <= HOST_BITS_PER_WIDE_INT
2135 || val != HOST_BITS_PER_WIDE_INT - 1))
2136 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2138 /* Likewise for multipliers wider than a word. */
2139 if (GET_CODE (trueop1) == CONST_DOUBLE
2140 && (GET_MODE (trueop1) == VOIDmode
2141 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
2142 && GET_MODE (op0) == mode
2143 && CONST_DOUBLE_LOW (trueop1) == 0
2144 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
2145 return simplify_gen_binary (ASHIFT, mode, op0,
2146 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2148 /* x*2 is x+x and x*(-1) is -x */
2149 if (GET_CODE (trueop1) == CONST_DOUBLE
2150 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2151 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2152 && GET_MODE (op0) == mode)
2154 REAL_VALUE_TYPE d;
2155 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2157 if (REAL_VALUES_EQUAL (d, dconst2))
2158 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2160 if (!HONOR_SNANS (mode)
2161 && REAL_VALUES_EQUAL (d, dconstm1))
2162 return simplify_gen_unary (NEG, mode, op0, mode);
2165 /* Optimize -x * -x as x * x. */
2166 if (FLOAT_MODE_P (mode)
2167 && GET_CODE (op0) == NEG
2168 && GET_CODE (op1) == NEG
2169 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2170 && !side_effects_p (XEXP (op0, 0)))
2171 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2173 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2174 if (SCALAR_FLOAT_MODE_P (mode)
2175 && GET_CODE (op0) == ABS
2176 && GET_CODE (op1) == ABS
2177 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2178 && !side_effects_p (XEXP (op0, 0)))
2179 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2181 /* Reassociate multiplication, but for floating point MULTs
2182 only when the user specifies unsafe math optimizations. */
2183 if (! FLOAT_MODE_P (mode)
2184 || flag_unsafe_math_optimizations)
2186 tem = simplify_associative_operation (code, mode, op0, op1);
2187 if (tem)
2188 return tem;
2190 break;
2192 case IOR:
2193 if (trueop1 == const0_rtx)
2194 return op0;
2195 if (CONST_INT_P (trueop1)
2196 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2197 == GET_MODE_MASK (mode)))
2198 return op1;
2199 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2200 return op0;
2201 /* A | (~A) -> -1 */
2202 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2203 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2204 && ! side_effects_p (op0)
2205 && SCALAR_INT_MODE_P (mode))
2206 return constm1_rtx;
2208 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2209 if (CONST_INT_P (op1)
2210 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2211 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
2212 return op1;
2214 /* Canonicalize (X & C1) | C2. */
2215 if (GET_CODE (op0) == AND
2216 && CONST_INT_P (trueop1)
2217 && CONST_INT_P (XEXP (op0, 1)))
2219 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2220 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2221 HOST_WIDE_INT c2 = INTVAL (trueop1);
2223 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2224 if ((c1 & c2) == c1
2225 && !side_effects_p (XEXP (op0, 0)))
2226 return trueop1;
2228 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2229 if (((c1|c2) & mask) == mask)
2230 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2232 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2233 if (((c1 & ~c2) & mask) != (c1 & mask))
2235 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2236 gen_int_mode (c1 & ~c2, mode));
2237 return simplify_gen_binary (IOR, mode, tem, op1);
2241 /* Convert (A & B) | A to A. */
2242 if (GET_CODE (op0) == AND
2243 && (rtx_equal_p (XEXP (op0, 0), op1)
2244 || rtx_equal_p (XEXP (op0, 1), op1))
2245 && ! side_effects_p (XEXP (op0, 0))
2246 && ! side_effects_p (XEXP (op0, 1)))
2247 return op1;
2249 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2250 mode size to (rotate A CX). */
2252 if (GET_CODE (op1) == ASHIFT
2253 || GET_CODE (op1) == SUBREG)
2255 opleft = op1;
2256 opright = op0;
2258 else
2260 opright = op1;
2261 opleft = op0;
2264 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2265 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2266 && CONST_INT_P (XEXP (opleft, 1))
2267 && CONST_INT_P (XEXP (opright, 1))
2268 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2269 == GET_MODE_BITSIZE (mode)))
2270 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2272 /* Same, but for ashift that has been "simplified" to a wider mode
2273 by simplify_shift_const. */
2275 if (GET_CODE (opleft) == SUBREG
2276 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2277 && GET_CODE (opright) == LSHIFTRT
2278 && GET_CODE (XEXP (opright, 0)) == SUBREG
2279 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2280 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2281 && (GET_MODE_SIZE (GET_MODE (opleft))
2282 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2283 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2284 SUBREG_REG (XEXP (opright, 0)))
2285 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2286 && CONST_INT_P (XEXP (opright, 1))
2287 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2288 == GET_MODE_BITSIZE (mode)))
2289 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2290 XEXP (SUBREG_REG (opleft), 1));
2292 /* If we have (ior (and (X C1) C2)), simplify this by making
2293 C1 as small as possible if C1 actually changes. */
2294 if (CONST_INT_P (op1)
2295 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2296 || INTVAL (op1) > 0)
2297 && GET_CODE (op0) == AND
2298 && CONST_INT_P (XEXP (op0, 1))
2299 && CONST_INT_P (op1)
2300 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2301 return simplify_gen_binary (IOR, mode,
2302 simplify_gen_binary
2303 (AND, mode, XEXP (op0, 0),
2304 GEN_INT (INTVAL (XEXP (op0, 1))
2305 & ~INTVAL (op1))),
2306 op1);
2308 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2309 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2310 the PLUS does not affect any of the bits in OP1: then we can do
2311 the IOR as a PLUS and we can associate. This is valid if OP1
2312 can be safely shifted left C bits. */
2313 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2314 && GET_CODE (XEXP (op0, 0)) == PLUS
2315 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2316 && CONST_INT_P (XEXP (op0, 1))
2317 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2319 int count = INTVAL (XEXP (op0, 1));
2320 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2322 if (mask >> count == INTVAL (trueop1)
2323 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2324 return simplify_gen_binary (ASHIFTRT, mode,
2325 plus_constant (XEXP (op0, 0), mask),
2326 XEXP (op0, 1));
2329 tem = simplify_associative_operation (code, mode, op0, op1);
2330 if (tem)
2331 return tem;
2332 break;
2334 case XOR:
2335 if (trueop1 == const0_rtx)
2336 return op0;
2337 if (CONST_INT_P (trueop1)
2338 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2339 == GET_MODE_MASK (mode)))
2340 return simplify_gen_unary (NOT, mode, op0, mode);
2341 if (rtx_equal_p (trueop0, trueop1)
2342 && ! side_effects_p (op0)
2343 && GET_MODE_CLASS (mode) != MODE_CC)
2344 return CONST0_RTX (mode);
2346 /* Canonicalize XOR of the most significant bit to PLUS. */
2347 if ((CONST_INT_P (op1)
2348 || GET_CODE (op1) == CONST_DOUBLE)
2349 && mode_signbit_p (mode, op1))
2350 return simplify_gen_binary (PLUS, mode, op0, op1);
2351 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2352 if ((CONST_INT_P (op1)
2353 || GET_CODE (op1) == CONST_DOUBLE)
2354 && GET_CODE (op0) == PLUS
2355 && (CONST_INT_P (XEXP (op0, 1))
2356 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2357 && mode_signbit_p (mode, XEXP (op0, 1)))
2358 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2359 simplify_gen_binary (XOR, mode, op1,
2360 XEXP (op0, 1)));
2362 /* If we are XORing two things that have no bits in common,
2363 convert them into an IOR. This helps to detect rotation encoded
2364 using those methods and possibly other simplifications. */
2366 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2367 && (nonzero_bits (op0, mode)
2368 & nonzero_bits (op1, mode)) == 0)
2369 return (simplify_gen_binary (IOR, mode, op0, op1));
2371 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2372 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2373 (NOT y). */
2375 int num_negated = 0;
2377 if (GET_CODE (op0) == NOT)
2378 num_negated++, op0 = XEXP (op0, 0);
2379 if (GET_CODE (op1) == NOT)
2380 num_negated++, op1 = XEXP (op1, 0);
2382 if (num_negated == 2)
2383 return simplify_gen_binary (XOR, mode, op0, op1);
2384 else if (num_negated == 1)
2385 return simplify_gen_unary (NOT, mode,
2386 simplify_gen_binary (XOR, mode, op0, op1),
2387 mode);
2390 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2391 correspond to a machine insn or result in further simplifications
2392 if B is a constant. */
2394 if (GET_CODE (op0) == AND
2395 && rtx_equal_p (XEXP (op0, 1), op1)
2396 && ! side_effects_p (op1))
2397 return simplify_gen_binary (AND, mode,
2398 simplify_gen_unary (NOT, mode,
2399 XEXP (op0, 0), mode),
2400 op1);
2402 else if (GET_CODE (op0) == AND
2403 && rtx_equal_p (XEXP (op0, 0), op1)
2404 && ! side_effects_p (op1))
2405 return simplify_gen_binary (AND, mode,
2406 simplify_gen_unary (NOT, mode,
2407 XEXP (op0, 1), mode),
2408 op1);
2410 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2411 comparison if STORE_FLAG_VALUE is 1. */
2412 if (STORE_FLAG_VALUE == 1
2413 && trueop1 == const1_rtx
2414 && COMPARISON_P (op0)
2415 && (reversed = reversed_comparison (op0, mode)))
2416 return reversed;
2418 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2419 is (lt foo (const_int 0)), so we can perform the above
2420 simplification if STORE_FLAG_VALUE is 1. */
2422 if (STORE_FLAG_VALUE == 1
2423 && trueop1 == const1_rtx
2424 && GET_CODE (op0) == LSHIFTRT
2425 && CONST_INT_P (XEXP (op0, 1))
2426 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2427 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2429 /* (xor (comparison foo bar) (const_int sign-bit))
2430 when STORE_FLAG_VALUE is the sign bit. */
2431 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2432 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2433 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2434 && trueop1 == const_true_rtx
2435 && COMPARISON_P (op0)
2436 && (reversed = reversed_comparison (op0, mode)))
2437 return reversed;
2439 tem = simplify_associative_operation (code, mode, op0, op1);
2440 if (tem)
2441 return tem;
2442 break;
2444 case AND:
2445 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2446 return trueop1;
2447 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
2449 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2450 HOST_WIDE_INT nzop1;
2451 if (CONST_INT_P (trueop1))
2453 HOST_WIDE_INT val1 = INTVAL (trueop1);
2454 /* If we are turning off bits already known off in OP0, we need
2455 not do an AND. */
2456 if ((nzop0 & ~val1) == 0)
2457 return op0;
2459 nzop1 = nonzero_bits (trueop1, mode);
2460 /* If we are clearing all the nonzero bits, the result is zero. */
2461 if ((nzop1 & nzop0) == 0
2462 && !side_effects_p (op0) && !side_effects_p (op1))
2463 return CONST0_RTX (mode);
2465 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2466 && GET_MODE_CLASS (mode) != MODE_CC)
2467 return op0;
2468 /* A & (~A) -> 0 */
2469 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2470 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2471 && ! side_effects_p (op0)
2472 && GET_MODE_CLASS (mode) != MODE_CC)
2473 return CONST0_RTX (mode);
2475 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2476 there are no nonzero bits of C outside of X's mode. */
2477 if ((GET_CODE (op0) == SIGN_EXTEND
2478 || GET_CODE (op0) == ZERO_EXTEND)
2479 && CONST_INT_P (trueop1)
2480 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2481 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2482 & INTVAL (trueop1)) == 0)
2484 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2485 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2486 gen_int_mode (INTVAL (trueop1),
2487 imode));
2488 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2491 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2492 we might be able to further simplify the AND with X and potentially
2493 remove the truncation altogether. */
2494 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2496 rtx x = XEXP (op0, 0);
2497 enum machine_mode xmode = GET_MODE (x);
2498 tem = simplify_gen_binary (AND, xmode, x,
2499 gen_int_mode (INTVAL (trueop1), xmode));
2500 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2503 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2504 if (GET_CODE (op0) == IOR
2505 && CONST_INT_P (trueop1)
2506 && CONST_INT_P (XEXP (op0, 1)))
2508 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2509 return simplify_gen_binary (IOR, mode,
2510 simplify_gen_binary (AND, mode,
2511 XEXP (op0, 0), op1),
2512 gen_int_mode (tmp, mode));
2515 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2516 insn (and may simplify more). */
2517 if (GET_CODE (op0) == XOR
2518 && rtx_equal_p (XEXP (op0, 0), op1)
2519 && ! side_effects_p (op1))
2520 return simplify_gen_binary (AND, mode,
2521 simplify_gen_unary (NOT, mode,
2522 XEXP (op0, 1), mode),
2523 op1);
2525 if (GET_CODE (op0) == XOR
2526 && rtx_equal_p (XEXP (op0, 1), op1)
2527 && ! side_effects_p (op1))
2528 return simplify_gen_binary (AND, mode,
2529 simplify_gen_unary (NOT, mode,
2530 XEXP (op0, 0), mode),
2531 op1);
2533 /* Similarly for (~(A ^ B)) & A. */
2534 if (GET_CODE (op0) == NOT
2535 && GET_CODE (XEXP (op0, 0)) == XOR
2536 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2537 && ! side_effects_p (op1))
2538 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2540 if (GET_CODE (op0) == NOT
2541 && GET_CODE (XEXP (op0, 0)) == XOR
2542 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2543 && ! side_effects_p (op1))
2544 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2546 /* Convert (A | B) & A to A. */
2547 if (GET_CODE (op0) == IOR
2548 && (rtx_equal_p (XEXP (op0, 0), op1)
2549 || rtx_equal_p (XEXP (op0, 1), op1))
2550 && ! side_effects_p (XEXP (op0, 0))
2551 && ! side_effects_p (XEXP (op0, 1)))
2552 return op1;
2554 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2555 ((A & N) + B) & M -> (A + B) & M
2556 Similarly if (N & M) == 0,
2557 ((A | N) + B) & M -> (A + B) & M
2558 and for - instead of + and/or ^ instead of |.
2559 Also, if (N & M) == 0, then
2560 (A +- N) & M -> A & M. */
2561 if (CONST_INT_P (trueop1)
2562 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2563 && ~INTVAL (trueop1)
2564 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2565 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2567 rtx pmop[2];
2568 int which;
2570 pmop[0] = XEXP (op0, 0);
2571 pmop[1] = XEXP (op0, 1);
2573 if (CONST_INT_P (pmop[1])
2574 && (INTVAL (pmop[1]) & INTVAL (trueop1)) == 0)
2575 return simplify_gen_binary (AND, mode, pmop[0], op1);
2577 for (which = 0; which < 2; which++)
2579 tem = pmop[which];
2580 switch (GET_CODE (tem))
2582 case AND:
2583 if (CONST_INT_P (XEXP (tem, 1))
2584 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2585 == INTVAL (trueop1))
2586 pmop[which] = XEXP (tem, 0);
2587 break;
2588 case IOR:
2589 case XOR:
2590 if (CONST_INT_P (XEXP (tem, 1))
2591 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2592 pmop[which] = XEXP (tem, 0);
2593 break;
2594 default:
2595 break;
2599 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2601 tem = simplify_gen_binary (GET_CODE (op0), mode,
2602 pmop[0], pmop[1]);
2603 return simplify_gen_binary (code, mode, tem, op1);
2607 /* (and X (ior (not X) Y) -> (and X Y) */
2608 if (GET_CODE (op1) == IOR
2609 && GET_CODE (XEXP (op1, 0)) == NOT
2610 && op0 == XEXP (XEXP (op1, 0), 0))
2611 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2613 /* (and (ior (not X) Y) X) -> (and X Y) */
2614 if (GET_CODE (op0) == IOR
2615 && GET_CODE (XEXP (op0, 0)) == NOT
2616 && op1 == XEXP (XEXP (op0, 0), 0))
2617 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2619 tem = simplify_associative_operation (code, mode, op0, op1);
2620 if (tem)
2621 return tem;
2622 break;
2624 case UDIV:
2625 /* 0/x is 0 (or x&0 if x has side-effects). */
2626 if (trueop0 == CONST0_RTX (mode))
2628 if (side_effects_p (op1))
2629 return simplify_gen_binary (AND, mode, op1, trueop0);
2630 return trueop0;
2632 /* x/1 is x. */
2633 if (trueop1 == CONST1_RTX (mode))
2634 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2635 /* Convert divide by power of two into shift. */
2636 if (CONST_INT_P (trueop1)
2637 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2638 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2639 break;
2641 case DIV:
2642 /* Handle floating point and integers separately. */
2643 if (SCALAR_FLOAT_MODE_P (mode))
2645 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2646 safe for modes with NaNs, since 0.0 / 0.0 will then be
2647 NaN rather than 0.0. Nor is it safe for modes with signed
2648 zeros, since dividing 0 by a negative number gives -0.0 */
2649 if (trueop0 == CONST0_RTX (mode)
2650 && !HONOR_NANS (mode)
2651 && !HONOR_SIGNED_ZEROS (mode)
2652 && ! side_effects_p (op1))
2653 return op0;
2654 /* x/1.0 is x. */
2655 if (trueop1 == CONST1_RTX (mode)
2656 && !HONOR_SNANS (mode))
2657 return op0;
2659 if (GET_CODE (trueop1) == CONST_DOUBLE
2660 && trueop1 != CONST0_RTX (mode))
2662 REAL_VALUE_TYPE d;
2663 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2665 /* x/-1.0 is -x. */
2666 if (REAL_VALUES_EQUAL (d, dconstm1)
2667 && !HONOR_SNANS (mode))
2668 return simplify_gen_unary (NEG, mode, op0, mode);
2670 /* Change FP division by a constant into multiplication.
2671 Only do this with -freciprocal-math. */
2672 if (flag_reciprocal_math
2673 && !REAL_VALUES_EQUAL (d, dconst0))
2675 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2676 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2677 return simplify_gen_binary (MULT, mode, op0, tem);
2681 else
2683 /* 0/x is 0 (or x&0 if x has side-effects). */
2684 if (trueop0 == CONST0_RTX (mode))
2686 if (side_effects_p (op1))
2687 return simplify_gen_binary (AND, mode, op1, trueop0);
2688 return trueop0;
2690 /* x/1 is x. */
2691 if (trueop1 == CONST1_RTX (mode))
2692 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2693 /* x/-1 is -x. */
2694 if (trueop1 == constm1_rtx)
2696 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2697 return simplify_gen_unary (NEG, mode, x, mode);
2700 break;
2702 case UMOD:
2703 /* 0%x is 0 (or x&0 if x has side-effects). */
2704 if (trueop0 == CONST0_RTX (mode))
2706 if (side_effects_p (op1))
2707 return simplify_gen_binary (AND, mode, op1, trueop0);
2708 return trueop0;
2710 /* x%1 is 0 (of x&0 if x has side-effects). */
2711 if (trueop1 == CONST1_RTX (mode))
2713 if (side_effects_p (op0))
2714 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2715 return CONST0_RTX (mode);
2717 /* Implement modulus by power of two as AND. */
2718 if (CONST_INT_P (trueop1)
2719 && exact_log2 (INTVAL (trueop1)) > 0)
2720 return simplify_gen_binary (AND, mode, op0,
2721 GEN_INT (INTVAL (op1) - 1));
2722 break;
2724 case MOD:
2725 /* 0%x is 0 (or x&0 if x has side-effects). */
2726 if (trueop0 == CONST0_RTX (mode))
2728 if (side_effects_p (op1))
2729 return simplify_gen_binary (AND, mode, op1, trueop0);
2730 return trueop0;
2732 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2733 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2735 if (side_effects_p (op0))
2736 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2737 return CONST0_RTX (mode);
2739 break;
2741 case ROTATERT:
2742 case ROTATE:
2743 case ASHIFTRT:
2744 if (trueop1 == CONST0_RTX (mode))
2745 return op0;
2746 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2747 return op0;
2748 /* Rotating ~0 always results in ~0. */
2749 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
2750 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2751 && ! side_effects_p (op1))
2752 return op0;
2753 canonicalize_shift:
2754 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
2756 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
2757 if (val != INTVAL (op1))
2758 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
2760 break;
2762 case ASHIFT:
2763 case SS_ASHIFT:
2764 case US_ASHIFT:
2765 if (trueop1 == CONST0_RTX (mode))
2766 return op0;
2767 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2768 return op0;
2769 goto canonicalize_shift;
2771 case LSHIFTRT:
2772 if (trueop1 == CONST0_RTX (mode))
2773 return op0;
2774 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2775 return op0;
2776 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2777 if (GET_CODE (op0) == CLZ
2778 && CONST_INT_P (trueop1)
2779 && STORE_FLAG_VALUE == 1
2780 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2782 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2783 unsigned HOST_WIDE_INT zero_val = 0;
2785 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2786 && zero_val == GET_MODE_BITSIZE (imode)
2787 && INTVAL (trueop1) == exact_log2 (zero_val))
2788 return simplify_gen_relational (EQ, mode, imode,
2789 XEXP (op0, 0), const0_rtx);
2791 goto canonicalize_shift;
2793 case SMIN:
2794 if (width <= HOST_BITS_PER_WIDE_INT
2795 && CONST_INT_P (trueop1)
2796 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2797 && ! side_effects_p (op0))
2798 return op1;
2799 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2800 return op0;
2801 tem = simplify_associative_operation (code, mode, op0, op1);
2802 if (tem)
2803 return tem;
2804 break;
2806 case SMAX:
2807 if (width <= HOST_BITS_PER_WIDE_INT
2808 && CONST_INT_P (trueop1)
2809 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2810 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2811 && ! side_effects_p (op0))
2812 return op1;
2813 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2814 return op0;
2815 tem = simplify_associative_operation (code, mode, op0, op1);
2816 if (tem)
2817 return tem;
2818 break;
2820 case UMIN:
2821 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2822 return op1;
2823 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2824 return op0;
2825 tem = simplify_associative_operation (code, mode, op0, op1);
2826 if (tem)
2827 return tem;
2828 break;
2830 case UMAX:
2831 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2832 return op1;
2833 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2834 return op0;
2835 tem = simplify_associative_operation (code, mode, op0, op1);
2836 if (tem)
2837 return tem;
2838 break;
2840 case SS_PLUS:
2841 case US_PLUS:
2842 case SS_MINUS:
2843 case US_MINUS:
2844 case SS_MULT:
2845 case US_MULT:
2846 case SS_DIV:
2847 case US_DIV:
2848 /* ??? There are simplifications that can be done. */
2849 return 0;
2851 case VEC_SELECT:
2852 if (!VECTOR_MODE_P (mode))
2854 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2855 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2856 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2857 gcc_assert (XVECLEN (trueop1, 0) == 1);
2858 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
2860 if (GET_CODE (trueop0) == CONST_VECTOR)
2861 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2862 (trueop1, 0, 0)));
2864 /* Extract a scalar element from a nested VEC_SELECT expression
2865 (with optional nested VEC_CONCAT expression). Some targets
2866 (i386) extract scalar element from a vector using chain of
2867 nested VEC_SELECT expressions. When input operand is a memory
2868 operand, this operation can be simplified to a simple scalar
2869 load from an offseted memory address. */
2870 if (GET_CODE (trueop0) == VEC_SELECT)
2872 rtx op0 = XEXP (trueop0, 0);
2873 rtx op1 = XEXP (trueop0, 1);
2875 enum machine_mode opmode = GET_MODE (op0);
2876 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
2877 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
2879 int i = INTVAL (XVECEXP (trueop1, 0, 0));
2880 int elem;
2882 rtvec vec;
2883 rtx tmp_op, tmp;
2885 gcc_assert (GET_CODE (op1) == PARALLEL);
2886 gcc_assert (i < n_elts);
2888 /* Select element, pointed by nested selector. */
2889 elem = INTVAL (XVECEXP (op1, 0, i));
2891 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
2892 if (GET_CODE (op0) == VEC_CONCAT)
2894 rtx op00 = XEXP (op0, 0);
2895 rtx op01 = XEXP (op0, 1);
2897 enum machine_mode mode00, mode01;
2898 int n_elts00, n_elts01;
2900 mode00 = GET_MODE (op00);
2901 mode01 = GET_MODE (op01);
2903 /* Find out number of elements of each operand. */
2904 if (VECTOR_MODE_P (mode00))
2906 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
2907 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
2909 else
2910 n_elts00 = 1;
2912 if (VECTOR_MODE_P (mode01))
2914 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
2915 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
2917 else
2918 n_elts01 = 1;
2920 gcc_assert (n_elts == n_elts00 + n_elts01);
2922 /* Select correct operand of VEC_CONCAT
2923 and adjust selector. */
2924 if (elem < n_elts01)
2925 tmp_op = op00;
2926 else
2928 tmp_op = op01;
2929 elem -= n_elts00;
2932 else
2933 tmp_op = op0;
2935 vec = rtvec_alloc (1);
2936 RTVEC_ELT (vec, 0) = GEN_INT (elem);
2938 tmp = gen_rtx_fmt_ee (code, mode,
2939 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
2940 return tmp;
2942 if (GET_CODE (trueop0) == VEC_DUPLICATE
2943 && GET_MODE (XEXP (trueop0, 0)) == mode)
2944 return XEXP (trueop0, 0);
2946 else
2948 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2949 gcc_assert (GET_MODE_INNER (mode)
2950 == GET_MODE_INNER (GET_MODE (trueop0)));
2951 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2953 if (GET_CODE (trueop0) == CONST_VECTOR)
2955 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2956 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2957 rtvec v = rtvec_alloc (n_elts);
2958 unsigned int i;
2960 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2961 for (i = 0; i < n_elts; i++)
2963 rtx x = XVECEXP (trueop1, 0, i);
2965 gcc_assert (CONST_INT_P (x));
2966 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2967 INTVAL (x));
2970 return gen_rtx_CONST_VECTOR (mode, v);
2974 if (XVECLEN (trueop1, 0) == 1
2975 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
2976 && GET_CODE (trueop0) == VEC_CONCAT)
2978 rtx vec = trueop0;
2979 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2981 /* Try to find the element in the VEC_CONCAT. */
2982 while (GET_MODE (vec) != mode
2983 && GET_CODE (vec) == VEC_CONCAT)
2985 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2986 if (offset < vec_size)
2987 vec = XEXP (vec, 0);
2988 else
2990 offset -= vec_size;
2991 vec = XEXP (vec, 1);
2993 vec = avoid_constant_pool_reference (vec);
2996 if (GET_MODE (vec) == mode)
2997 return vec;
3000 return 0;
3001 case VEC_CONCAT:
3003 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3004 ? GET_MODE (trueop0)
3005 : GET_MODE_INNER (mode));
3006 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3007 ? GET_MODE (trueop1)
3008 : GET_MODE_INNER (mode));
3010 gcc_assert (VECTOR_MODE_P (mode));
3011 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3012 == GET_MODE_SIZE (mode));
3014 if (VECTOR_MODE_P (op0_mode))
3015 gcc_assert (GET_MODE_INNER (mode)
3016 == GET_MODE_INNER (op0_mode));
3017 else
3018 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3020 if (VECTOR_MODE_P (op1_mode))
3021 gcc_assert (GET_MODE_INNER (mode)
3022 == GET_MODE_INNER (op1_mode));
3023 else
3024 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3026 if ((GET_CODE (trueop0) == CONST_VECTOR
3027 || CONST_INT_P (trueop0)
3028 || GET_CODE (trueop0) == CONST_DOUBLE)
3029 && (GET_CODE (trueop1) == CONST_VECTOR
3030 || CONST_INT_P (trueop1)
3031 || GET_CODE (trueop1) == CONST_DOUBLE))
3033 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3034 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3035 rtvec v = rtvec_alloc (n_elts);
3036 unsigned int i;
3037 unsigned in_n_elts = 1;
3039 if (VECTOR_MODE_P (op0_mode))
3040 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3041 for (i = 0; i < n_elts; i++)
3043 if (i < in_n_elts)
3045 if (!VECTOR_MODE_P (op0_mode))
3046 RTVEC_ELT (v, i) = trueop0;
3047 else
3048 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3050 else
3052 if (!VECTOR_MODE_P (op1_mode))
3053 RTVEC_ELT (v, i) = trueop1;
3054 else
3055 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3056 i - in_n_elts);
3060 return gen_rtx_CONST_VECTOR (mode, v);
3063 return 0;
3065 default:
3066 gcc_unreachable ();
3069 return 0;
3073 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3074 rtx op0, rtx op1)
3076 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3077 HOST_WIDE_INT val;
3078 unsigned int width = GET_MODE_BITSIZE (mode);
3080 if (VECTOR_MODE_P (mode)
3081 && code != VEC_CONCAT
3082 && GET_CODE (op0) == CONST_VECTOR
3083 && GET_CODE (op1) == CONST_VECTOR)
3085 unsigned n_elts = GET_MODE_NUNITS (mode);
3086 enum machine_mode op0mode = GET_MODE (op0);
3087 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3088 enum machine_mode op1mode = GET_MODE (op1);
3089 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3090 rtvec v = rtvec_alloc (n_elts);
3091 unsigned int i;
3093 gcc_assert (op0_n_elts == n_elts);
3094 gcc_assert (op1_n_elts == n_elts);
3095 for (i = 0; i < n_elts; i++)
3097 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3098 CONST_VECTOR_ELT (op0, i),
3099 CONST_VECTOR_ELT (op1, i));
3100 if (!x)
3101 return 0;
3102 RTVEC_ELT (v, i) = x;
3105 return gen_rtx_CONST_VECTOR (mode, v);
3108 if (VECTOR_MODE_P (mode)
3109 && code == VEC_CONCAT
3110 && (CONST_INT_P (op0)
3111 || GET_CODE (op0) == CONST_DOUBLE
3112 || GET_CODE (op0) == CONST_FIXED)
3113 && (CONST_INT_P (op1)
3114 || GET_CODE (op1) == CONST_DOUBLE
3115 || GET_CODE (op1) == CONST_FIXED))
3117 unsigned n_elts = GET_MODE_NUNITS (mode);
3118 rtvec v = rtvec_alloc (n_elts);
3120 gcc_assert (n_elts >= 2);
3121 if (n_elts == 2)
3123 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3124 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3126 RTVEC_ELT (v, 0) = op0;
3127 RTVEC_ELT (v, 1) = op1;
3129 else
3131 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3132 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3133 unsigned i;
3135 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3136 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3137 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3139 for (i = 0; i < op0_n_elts; ++i)
3140 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3141 for (i = 0; i < op1_n_elts; ++i)
3142 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3145 return gen_rtx_CONST_VECTOR (mode, v);
3148 if (SCALAR_FLOAT_MODE_P (mode)
3149 && GET_CODE (op0) == CONST_DOUBLE
3150 && GET_CODE (op1) == CONST_DOUBLE
3151 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3153 if (code == AND
3154 || code == IOR
3155 || code == XOR)
3157 long tmp0[4];
3158 long tmp1[4];
3159 REAL_VALUE_TYPE r;
3160 int i;
3162 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3163 GET_MODE (op0));
3164 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3165 GET_MODE (op1));
3166 for (i = 0; i < 4; i++)
3168 switch (code)
3170 case AND:
3171 tmp0[i] &= tmp1[i];
3172 break;
3173 case IOR:
3174 tmp0[i] |= tmp1[i];
3175 break;
3176 case XOR:
3177 tmp0[i] ^= tmp1[i];
3178 break;
3179 default:
3180 gcc_unreachable ();
3183 real_from_target (&r, tmp0, mode);
3184 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3186 else
3188 REAL_VALUE_TYPE f0, f1, value, result;
3189 bool inexact;
3191 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3192 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3193 real_convert (&f0, mode, &f0);
3194 real_convert (&f1, mode, &f1);
3196 if (HONOR_SNANS (mode)
3197 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3198 return 0;
3200 if (code == DIV
3201 && REAL_VALUES_EQUAL (f1, dconst0)
3202 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3203 return 0;
3205 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3206 && flag_trapping_math
3207 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3209 int s0 = REAL_VALUE_NEGATIVE (f0);
3210 int s1 = REAL_VALUE_NEGATIVE (f1);
3212 switch (code)
3214 case PLUS:
3215 /* Inf + -Inf = NaN plus exception. */
3216 if (s0 != s1)
3217 return 0;
3218 break;
3219 case MINUS:
3220 /* Inf - Inf = NaN plus exception. */
3221 if (s0 == s1)
3222 return 0;
3223 break;
3224 case DIV:
3225 /* Inf / Inf = NaN plus exception. */
3226 return 0;
3227 default:
3228 break;
3232 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3233 && flag_trapping_math
3234 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3235 || (REAL_VALUE_ISINF (f1)
3236 && REAL_VALUES_EQUAL (f0, dconst0))))
3237 /* Inf * 0 = NaN plus exception. */
3238 return 0;
3240 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3241 &f0, &f1);
3242 real_convert (&result, mode, &value);
3244 /* Don't constant fold this floating point operation if
3245 the result has overflowed and flag_trapping_math. */
3247 if (flag_trapping_math
3248 && MODE_HAS_INFINITIES (mode)
3249 && REAL_VALUE_ISINF (result)
3250 && !REAL_VALUE_ISINF (f0)
3251 && !REAL_VALUE_ISINF (f1))
3252 /* Overflow plus exception. */
3253 return 0;
3255 /* Don't constant fold this floating point operation if the
3256 result may dependent upon the run-time rounding mode and
3257 flag_rounding_math is set, or if GCC's software emulation
3258 is unable to accurately represent the result. */
3260 if ((flag_rounding_math
3261 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3262 && (inexact || !real_identical (&result, &value)))
3263 return NULL_RTX;
3265 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3269 /* We can fold some multi-word operations. */
3270 if (GET_MODE_CLASS (mode) == MODE_INT
3271 && width == HOST_BITS_PER_WIDE_INT * 2
3272 && (GET_CODE (op0) == CONST_DOUBLE || CONST_INT_P (op0))
3273 && (GET_CODE (op1) == CONST_DOUBLE || CONST_INT_P (op1)))
3275 unsigned HOST_WIDE_INT l1, l2, lv, lt;
3276 HOST_WIDE_INT h1, h2, hv, ht;
3278 if (GET_CODE (op0) == CONST_DOUBLE)
3279 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
3280 else
3281 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
3283 if (GET_CODE (op1) == CONST_DOUBLE)
3284 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
3285 else
3286 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
3288 switch (code)
3290 case MINUS:
3291 /* A - B == A + (-B). */
3292 neg_double (l2, h2, &lv, &hv);
3293 l2 = lv, h2 = hv;
3295 /* Fall through.... */
3297 case PLUS:
3298 add_double (l1, h1, l2, h2, &lv, &hv);
3299 break;
3301 case MULT:
3302 mul_double (l1, h1, l2, h2, &lv, &hv);
3303 break;
3305 case DIV:
3306 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3307 &lv, &hv, &lt, &ht))
3308 return 0;
3309 break;
3311 case MOD:
3312 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3313 &lt, &ht, &lv, &hv))
3314 return 0;
3315 break;
3317 case UDIV:
3318 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3319 &lv, &hv, &lt, &ht))
3320 return 0;
3321 break;
3323 case UMOD:
3324 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3325 &lt, &ht, &lv, &hv))
3326 return 0;
3327 break;
3329 case AND:
3330 lv = l1 & l2, hv = h1 & h2;
3331 break;
3333 case IOR:
3334 lv = l1 | l2, hv = h1 | h2;
3335 break;
3337 case XOR:
3338 lv = l1 ^ l2, hv = h1 ^ h2;
3339 break;
3341 case SMIN:
3342 if (h1 < h2
3343 || (h1 == h2
3344 && ((unsigned HOST_WIDE_INT) l1
3345 < (unsigned HOST_WIDE_INT) l2)))
3346 lv = l1, hv = h1;
3347 else
3348 lv = l2, hv = h2;
3349 break;
3351 case SMAX:
3352 if (h1 > h2
3353 || (h1 == h2
3354 && ((unsigned HOST_WIDE_INT) l1
3355 > (unsigned HOST_WIDE_INT) l2)))
3356 lv = l1, hv = h1;
3357 else
3358 lv = l2, hv = h2;
3359 break;
3361 case UMIN:
3362 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
3363 || (h1 == h2
3364 && ((unsigned HOST_WIDE_INT) l1
3365 < (unsigned HOST_WIDE_INT) l2)))
3366 lv = l1, hv = h1;
3367 else
3368 lv = l2, hv = h2;
3369 break;
3371 case UMAX:
3372 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
3373 || (h1 == h2
3374 && ((unsigned HOST_WIDE_INT) l1
3375 > (unsigned HOST_WIDE_INT) l2)))
3376 lv = l1, hv = h1;
3377 else
3378 lv = l2, hv = h2;
3379 break;
3381 case LSHIFTRT: case ASHIFTRT:
3382 case ASHIFT:
3383 case ROTATE: case ROTATERT:
3384 if (SHIFT_COUNT_TRUNCATED)
3385 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
3387 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
3388 return 0;
3390 if (code == LSHIFTRT || code == ASHIFTRT)
3391 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
3392 code == ASHIFTRT);
3393 else if (code == ASHIFT)
3394 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
3395 else if (code == ROTATE)
3396 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3397 else /* code == ROTATERT */
3398 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3399 break;
3401 default:
3402 return 0;
3405 return immed_double_const (lv, hv, mode);
3408 if (CONST_INT_P (op0) && CONST_INT_P (op1)
3409 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3411 /* Get the integer argument values in two forms:
3412 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3414 arg0 = INTVAL (op0);
3415 arg1 = INTVAL (op1);
3417 if (width < HOST_BITS_PER_WIDE_INT)
3419 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
3420 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
3422 arg0s = arg0;
3423 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3424 arg0s |= ((HOST_WIDE_INT) (-1) << width);
3426 arg1s = arg1;
3427 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3428 arg1s |= ((HOST_WIDE_INT) (-1) << width);
3430 else
3432 arg0s = arg0;
3433 arg1s = arg1;
3436 /* Compute the value of the arithmetic. */
3438 switch (code)
3440 case PLUS:
3441 val = arg0s + arg1s;
3442 break;
3444 case MINUS:
3445 val = arg0s - arg1s;
3446 break;
3448 case MULT:
3449 val = arg0s * arg1s;
3450 break;
3452 case DIV:
3453 if (arg1s == 0
3454 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3455 && arg1s == -1))
3456 return 0;
3457 val = arg0s / arg1s;
3458 break;
3460 case MOD:
3461 if (arg1s == 0
3462 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3463 && arg1s == -1))
3464 return 0;
3465 val = arg0s % arg1s;
3466 break;
3468 case UDIV:
3469 if (arg1 == 0
3470 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3471 && arg1s == -1))
3472 return 0;
3473 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3474 break;
3476 case UMOD:
3477 if (arg1 == 0
3478 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3479 && arg1s == -1))
3480 return 0;
3481 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3482 break;
3484 case AND:
3485 val = arg0 & arg1;
3486 break;
3488 case IOR:
3489 val = arg0 | arg1;
3490 break;
3492 case XOR:
3493 val = arg0 ^ arg1;
3494 break;
3496 case LSHIFTRT:
3497 case ASHIFT:
3498 case ASHIFTRT:
3499 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3500 the value is in range. We can't return any old value for
3501 out-of-range arguments because either the middle-end (via
3502 shift_truncation_mask) or the back-end might be relying on
3503 target-specific knowledge. Nor can we rely on
3504 shift_truncation_mask, since the shift might not be part of an
3505 ashlM3, lshrM3 or ashrM3 instruction. */
3506 if (SHIFT_COUNT_TRUNCATED)
3507 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3508 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3509 return 0;
3511 val = (code == ASHIFT
3512 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3513 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3515 /* Sign-extend the result for arithmetic right shifts. */
3516 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3517 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3518 break;
3520 case ROTATERT:
3521 if (arg1 < 0)
3522 return 0;
3524 arg1 %= width;
3525 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3526 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3527 break;
3529 case ROTATE:
3530 if (arg1 < 0)
3531 return 0;
3533 arg1 %= width;
3534 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3535 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3536 break;
3538 case COMPARE:
3539 /* Do nothing here. */
3540 return 0;
3542 case SMIN:
3543 val = arg0s <= arg1s ? arg0s : arg1s;
3544 break;
3546 case UMIN:
3547 val = ((unsigned HOST_WIDE_INT) arg0
3548 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3549 break;
3551 case SMAX:
3552 val = arg0s > arg1s ? arg0s : arg1s;
3553 break;
3555 case UMAX:
3556 val = ((unsigned HOST_WIDE_INT) arg0
3557 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3558 break;
3560 case SS_PLUS:
3561 case US_PLUS:
3562 case SS_MINUS:
3563 case US_MINUS:
3564 case SS_MULT:
3565 case US_MULT:
3566 case SS_DIV:
3567 case US_DIV:
3568 case SS_ASHIFT:
3569 case US_ASHIFT:
3570 /* ??? There are simplifications that can be done. */
3571 return 0;
3573 default:
3574 gcc_unreachable ();
3577 return gen_int_mode (val, mode);
3580 return NULL_RTX;
3585 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3586 PLUS or MINUS.
3588 Rather than test for specific case, we do this by a brute-force method
3589 and do all possible simplifications until no more changes occur. Then
3590 we rebuild the operation. */
3592 struct simplify_plus_minus_op_data
3594 rtx op;
3595 short neg;
3598 static bool
3599 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3601 int result;
3603 result = (commutative_operand_precedence (y)
3604 - commutative_operand_precedence (x));
3605 if (result)
3606 return result > 0;
3608 /* Group together equal REGs to do more simplification. */
3609 if (REG_P (x) && REG_P (y))
3610 return REGNO (x) > REGNO (y);
3611 else
3612 return false;
3615 static rtx
3616 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3617 rtx op1)
3619 struct simplify_plus_minus_op_data ops[8];
3620 rtx result, tem;
3621 int n_ops = 2, input_ops = 2;
3622 int changed, n_constants = 0, canonicalized = 0;
3623 int i, j;
3625 memset (ops, 0, sizeof ops);
3627 /* Set up the two operands and then expand them until nothing has been
3628 changed. If we run out of room in our array, give up; this should
3629 almost never happen. */
3631 ops[0].op = op0;
3632 ops[0].neg = 0;
3633 ops[1].op = op1;
3634 ops[1].neg = (code == MINUS);
3638 changed = 0;
3640 for (i = 0; i < n_ops; i++)
3642 rtx this_op = ops[i].op;
3643 int this_neg = ops[i].neg;
3644 enum rtx_code this_code = GET_CODE (this_op);
3646 switch (this_code)
3648 case PLUS:
3649 case MINUS:
3650 if (n_ops == 7)
3651 return NULL_RTX;
3653 ops[n_ops].op = XEXP (this_op, 1);
3654 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3655 n_ops++;
3657 ops[i].op = XEXP (this_op, 0);
3658 input_ops++;
3659 changed = 1;
3660 canonicalized |= this_neg;
3661 break;
3663 case NEG:
3664 ops[i].op = XEXP (this_op, 0);
3665 ops[i].neg = ! this_neg;
3666 changed = 1;
3667 canonicalized = 1;
3668 break;
3670 case CONST:
3671 if (n_ops < 7
3672 && GET_CODE (XEXP (this_op, 0)) == PLUS
3673 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3674 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3676 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3677 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3678 ops[n_ops].neg = this_neg;
3679 n_ops++;
3680 changed = 1;
3681 canonicalized = 1;
3683 break;
3685 case NOT:
3686 /* ~a -> (-a - 1) */
3687 if (n_ops != 7)
3689 ops[n_ops].op = constm1_rtx;
3690 ops[n_ops++].neg = this_neg;
3691 ops[i].op = XEXP (this_op, 0);
3692 ops[i].neg = !this_neg;
3693 changed = 1;
3694 canonicalized = 1;
3696 break;
3698 case CONST_INT:
3699 n_constants++;
3700 if (this_neg)
3702 ops[i].op = neg_const_int (mode, this_op);
3703 ops[i].neg = 0;
3704 changed = 1;
3705 canonicalized = 1;
3707 break;
3709 default:
3710 break;
3714 while (changed);
3716 if (n_constants > 1)
3717 canonicalized = 1;
3719 gcc_assert (n_ops >= 2);
3721 /* If we only have two operands, we can avoid the loops. */
3722 if (n_ops == 2)
3724 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3725 rtx lhs, rhs;
3727 /* Get the two operands. Be careful with the order, especially for
3728 the cases where code == MINUS. */
3729 if (ops[0].neg && ops[1].neg)
3731 lhs = gen_rtx_NEG (mode, ops[0].op);
3732 rhs = ops[1].op;
3734 else if (ops[0].neg)
3736 lhs = ops[1].op;
3737 rhs = ops[0].op;
3739 else
3741 lhs = ops[0].op;
3742 rhs = ops[1].op;
3745 return simplify_const_binary_operation (code, mode, lhs, rhs);
3748 /* Now simplify each pair of operands until nothing changes. */
3751 /* Insertion sort is good enough for an eight-element array. */
3752 for (i = 1; i < n_ops; i++)
3754 struct simplify_plus_minus_op_data save;
3755 j = i - 1;
3756 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
3757 continue;
3759 canonicalized = 1;
3760 save = ops[i];
3762 ops[j + 1] = ops[j];
3763 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
3764 ops[j + 1] = save;
3767 changed = 0;
3768 for (i = n_ops - 1; i > 0; i--)
3769 for (j = i - 1; j >= 0; j--)
3771 rtx lhs = ops[j].op, rhs = ops[i].op;
3772 int lneg = ops[j].neg, rneg = ops[i].neg;
3774 if (lhs != 0 && rhs != 0)
3776 enum rtx_code ncode = PLUS;
3778 if (lneg != rneg)
3780 ncode = MINUS;
3781 if (lneg)
3782 tem = lhs, lhs = rhs, rhs = tem;
3784 else if (swap_commutative_operands_p (lhs, rhs))
3785 tem = lhs, lhs = rhs, rhs = tem;
3787 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
3788 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
3790 rtx tem_lhs, tem_rhs;
3792 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3793 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3794 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3796 if (tem && !CONSTANT_P (tem))
3797 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3799 else
3800 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3802 /* Reject "simplifications" that just wrap the two
3803 arguments in a CONST. Failure to do so can result
3804 in infinite recursion with simplify_binary_operation
3805 when it calls us to simplify CONST operations. */
3806 if (tem
3807 && ! (GET_CODE (tem) == CONST
3808 && GET_CODE (XEXP (tem, 0)) == ncode
3809 && XEXP (XEXP (tem, 0), 0) == lhs
3810 && XEXP (XEXP (tem, 0), 1) == rhs))
3812 lneg &= rneg;
3813 if (GET_CODE (tem) == NEG)
3814 tem = XEXP (tem, 0), lneg = !lneg;
3815 if (CONST_INT_P (tem) && lneg)
3816 tem = neg_const_int (mode, tem), lneg = 0;
3818 ops[i].op = tem;
3819 ops[i].neg = lneg;
3820 ops[j].op = NULL_RTX;
3821 changed = 1;
3822 canonicalized = 1;
3827 /* If nothing changed, fail. */
3828 if (!canonicalized)
3829 return NULL_RTX;
3831 /* Pack all the operands to the lower-numbered entries. */
3832 for (i = 0, j = 0; j < n_ops; j++)
3833 if (ops[j].op)
3835 ops[i] = ops[j];
3836 i++;
3838 n_ops = i;
3840 while (changed);
3842 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3843 if (n_ops == 2
3844 && CONST_INT_P (ops[1].op)
3845 && CONSTANT_P (ops[0].op)
3846 && ops[0].neg)
3847 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3849 /* We suppressed creation of trivial CONST expressions in the
3850 combination loop to avoid recursion. Create one manually now.
3851 The combination loop should have ensured that there is exactly
3852 one CONST_INT, and the sort will have ensured that it is last
3853 in the array and that any other constant will be next-to-last. */
3855 if (n_ops > 1
3856 && CONST_INT_P (ops[n_ops - 1].op)
3857 && CONSTANT_P (ops[n_ops - 2].op))
3859 rtx value = ops[n_ops - 1].op;
3860 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3861 value = neg_const_int (mode, value);
3862 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3863 n_ops--;
3866 /* Put a non-negated operand first, if possible. */
3868 for (i = 0; i < n_ops && ops[i].neg; i++)
3869 continue;
3870 if (i == n_ops)
3871 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3872 else if (i != 0)
3874 tem = ops[0].op;
3875 ops[0] = ops[i];
3876 ops[i].op = tem;
3877 ops[i].neg = 1;
3880 /* Now make the result by performing the requested operations. */
3881 result = ops[0].op;
3882 for (i = 1; i < n_ops; i++)
3883 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3884 mode, result, ops[i].op);
3886 return result;
3889 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3890 static bool
3891 plus_minus_operand_p (const_rtx x)
3893 return GET_CODE (x) == PLUS
3894 || GET_CODE (x) == MINUS
3895 || (GET_CODE (x) == CONST
3896 && GET_CODE (XEXP (x, 0)) == PLUS
3897 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3898 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3901 /* Like simplify_binary_operation except used for relational operators.
3902 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3903 not also be VOIDmode.
3905 CMP_MODE specifies in which mode the comparison is done in, so it is
3906 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3907 the operands or, if both are VOIDmode, the operands are compared in
3908 "infinite precision". */
3910 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3911 enum machine_mode cmp_mode, rtx op0, rtx op1)
3913 rtx tem, trueop0, trueop1;
3915 if (cmp_mode == VOIDmode)
3916 cmp_mode = GET_MODE (op0);
3917 if (cmp_mode == VOIDmode)
3918 cmp_mode = GET_MODE (op1);
3920 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3921 if (tem)
3923 if (SCALAR_FLOAT_MODE_P (mode))
3925 if (tem == const0_rtx)
3926 return CONST0_RTX (mode);
3927 #ifdef FLOAT_STORE_FLAG_VALUE
3929 REAL_VALUE_TYPE val;
3930 val = FLOAT_STORE_FLAG_VALUE (mode);
3931 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3933 #else
3934 return NULL_RTX;
3935 #endif
3937 if (VECTOR_MODE_P (mode))
3939 if (tem == const0_rtx)
3940 return CONST0_RTX (mode);
3941 #ifdef VECTOR_STORE_FLAG_VALUE
3943 int i, units;
3944 rtvec v;
3946 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3947 if (val == NULL_RTX)
3948 return NULL_RTX;
3949 if (val == const1_rtx)
3950 return CONST1_RTX (mode);
3952 units = GET_MODE_NUNITS (mode);
3953 v = rtvec_alloc (units);
3954 for (i = 0; i < units; i++)
3955 RTVEC_ELT (v, i) = val;
3956 return gen_rtx_raw_CONST_VECTOR (mode, v);
3958 #else
3959 return NULL_RTX;
3960 #endif
3963 return tem;
3966 /* For the following tests, ensure const0_rtx is op1. */
3967 if (swap_commutative_operands_p (op0, op1)
3968 || (op0 == const0_rtx && op1 != const0_rtx))
3969 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3971 /* If op0 is a compare, extract the comparison arguments from it. */
3972 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3973 return simplify_gen_relational (code, mode, VOIDmode,
3974 XEXP (op0, 0), XEXP (op0, 1));
3976 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
3977 || CC0_P (op0))
3978 return NULL_RTX;
3980 trueop0 = avoid_constant_pool_reference (op0);
3981 trueop1 = avoid_constant_pool_reference (op1);
3982 return simplify_relational_operation_1 (code, mode, cmp_mode,
3983 trueop0, trueop1);
3986 /* This part of simplify_relational_operation is only used when CMP_MODE
3987 is not in class MODE_CC (i.e. it is a real comparison).
3989 MODE is the mode of the result, while CMP_MODE specifies in which
3990 mode the comparison is done in, so it is the mode of the operands. */
3992 static rtx
3993 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
3994 enum machine_mode cmp_mode, rtx op0, rtx op1)
3996 enum rtx_code op0code = GET_CODE (op0);
3998 if (op1 == const0_rtx && COMPARISON_P (op0))
4000 /* If op0 is a comparison, extract the comparison arguments
4001 from it. */
4002 if (code == NE)
4004 if (GET_MODE (op0) == mode)
4005 return simplify_rtx (op0);
4006 else
4007 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4008 XEXP (op0, 0), XEXP (op0, 1));
4010 else if (code == EQ)
4012 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4013 if (new_code != UNKNOWN)
4014 return simplify_gen_relational (new_code, mode, VOIDmode,
4015 XEXP (op0, 0), XEXP (op0, 1));
4019 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4020 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4021 if ((code == LTU || code == GEU)
4022 && GET_CODE (op0) == PLUS
4023 && CONST_INT_P (XEXP (op0, 1))
4024 && (rtx_equal_p (op1, XEXP (op0, 0))
4025 || rtx_equal_p (op1, XEXP (op0, 1))))
4027 rtx new_cmp
4028 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4029 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4030 cmp_mode, XEXP (op0, 0), new_cmp);
4033 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4034 if ((code == LTU || code == GEU)
4035 && GET_CODE (op0) == PLUS
4036 && rtx_equal_p (op1, XEXP (op0, 1))
4037 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4038 && !rtx_equal_p (op1, XEXP (op0, 0)))
4039 return simplify_gen_relational (code, mode, cmp_mode, op0,
4040 copy_rtx (XEXP (op0, 0)));
4042 if (op1 == const0_rtx)
4044 /* Canonicalize (GTU x 0) as (NE x 0). */
4045 if (code == GTU)
4046 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4047 /* Canonicalize (LEU x 0) as (EQ x 0). */
4048 if (code == LEU)
4049 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4051 else if (op1 == const1_rtx)
4053 switch (code)
4055 case GE:
4056 /* Canonicalize (GE x 1) as (GT x 0). */
4057 return simplify_gen_relational (GT, mode, cmp_mode,
4058 op0, const0_rtx);
4059 case GEU:
4060 /* Canonicalize (GEU x 1) as (NE x 0). */
4061 return simplify_gen_relational (NE, mode, cmp_mode,
4062 op0, const0_rtx);
4063 case LT:
4064 /* Canonicalize (LT x 1) as (LE x 0). */
4065 return simplify_gen_relational (LE, mode, cmp_mode,
4066 op0, const0_rtx);
4067 case LTU:
4068 /* Canonicalize (LTU x 1) as (EQ x 0). */
4069 return simplify_gen_relational (EQ, mode, cmp_mode,
4070 op0, const0_rtx);
4071 default:
4072 break;
4075 else if (op1 == constm1_rtx)
4077 /* Canonicalize (LE x -1) as (LT x 0). */
4078 if (code == LE)
4079 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4080 /* Canonicalize (GT x -1) as (GE x 0). */
4081 if (code == GT)
4082 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4085 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4086 if ((code == EQ || code == NE)
4087 && (op0code == PLUS || op0code == MINUS)
4088 && CONSTANT_P (op1)
4089 && CONSTANT_P (XEXP (op0, 1))
4090 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4092 rtx x = XEXP (op0, 0);
4093 rtx c = XEXP (op0, 1);
4095 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
4096 cmp_mode, op1, c);
4097 return simplify_gen_relational (code, mode, cmp_mode, x, c);
4100 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4101 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4102 if (code == NE
4103 && op1 == const0_rtx
4104 && GET_MODE_CLASS (mode) == MODE_INT
4105 && cmp_mode != VOIDmode
4106 /* ??? Work-around BImode bugs in the ia64 backend. */
4107 && mode != BImode
4108 && cmp_mode != BImode
4109 && nonzero_bits (op0, cmp_mode) == 1
4110 && STORE_FLAG_VALUE == 1)
4111 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4112 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4113 : lowpart_subreg (mode, op0, cmp_mode);
4115 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4116 if ((code == EQ || code == NE)
4117 && op1 == const0_rtx
4118 && op0code == XOR)
4119 return simplify_gen_relational (code, mode, cmp_mode,
4120 XEXP (op0, 0), XEXP (op0, 1));
4122 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4123 if ((code == EQ || code == NE)
4124 && op0code == XOR
4125 && rtx_equal_p (XEXP (op0, 0), op1)
4126 && !side_effects_p (XEXP (op0, 0)))
4127 return simplify_gen_relational (code, mode, cmp_mode,
4128 XEXP (op0, 1), const0_rtx);
4130 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4131 if ((code == EQ || code == NE)
4132 && op0code == XOR
4133 && rtx_equal_p (XEXP (op0, 1), op1)
4134 && !side_effects_p (XEXP (op0, 1)))
4135 return simplify_gen_relational (code, mode, cmp_mode,
4136 XEXP (op0, 0), const0_rtx);
4138 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4139 if ((code == EQ || code == NE)
4140 && op0code == XOR
4141 && (CONST_INT_P (op1)
4142 || GET_CODE (op1) == CONST_DOUBLE)
4143 && (CONST_INT_P (XEXP (op0, 1))
4144 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
4145 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4146 simplify_gen_binary (XOR, cmp_mode,
4147 XEXP (op0, 1), op1));
4149 if (op0code == POPCOUNT && op1 == const0_rtx)
4150 switch (code)
4152 case EQ:
4153 case LE:
4154 case LEU:
4155 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4156 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4157 XEXP (op0, 0), const0_rtx);
4159 case NE:
4160 case GT:
4161 case GTU:
4162 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4163 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4164 XEXP (op0, 0), const0_rtx);
4166 default:
4167 break;
4170 return NULL_RTX;
4173 enum
4175 CMP_EQ = 1,
4176 CMP_LT = 2,
4177 CMP_GT = 4,
4178 CMP_LTU = 8,
4179 CMP_GTU = 16
4183 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4184 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4185 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4186 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4187 For floating-point comparisons, assume that the operands were ordered. */
4189 static rtx
4190 comparison_result (enum rtx_code code, int known_results)
4192 switch (code)
4194 case EQ:
4195 case UNEQ:
4196 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4197 case NE:
4198 case LTGT:
4199 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4201 case LT:
4202 case UNLT:
4203 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4204 case GE:
4205 case UNGE:
4206 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4208 case GT:
4209 case UNGT:
4210 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4211 case LE:
4212 case UNLE:
4213 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4215 case LTU:
4216 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4217 case GEU:
4218 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4220 case GTU:
4221 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4222 case LEU:
4223 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4225 case ORDERED:
4226 return const_true_rtx;
4227 case UNORDERED:
4228 return const0_rtx;
4229 default:
4230 gcc_unreachable ();
4234 /* Check if the given comparison (done in the given MODE) is actually a
4235 tautology or a contradiction.
4236 If no simplification is possible, this function returns zero.
4237 Otherwise, it returns either const_true_rtx or const0_rtx. */
4240 simplify_const_relational_operation (enum rtx_code code,
4241 enum machine_mode mode,
4242 rtx op0, rtx op1)
4244 rtx tem;
4245 rtx trueop0;
4246 rtx trueop1;
4248 gcc_assert (mode != VOIDmode
4249 || (GET_MODE (op0) == VOIDmode
4250 && GET_MODE (op1) == VOIDmode));
4252 /* If op0 is a compare, extract the comparison arguments from it. */
4253 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4255 op1 = XEXP (op0, 1);
4256 op0 = XEXP (op0, 0);
4258 if (GET_MODE (op0) != VOIDmode)
4259 mode = GET_MODE (op0);
4260 else if (GET_MODE (op1) != VOIDmode)
4261 mode = GET_MODE (op1);
4262 else
4263 return 0;
4266 /* We can't simplify MODE_CC values since we don't know what the
4267 actual comparison is. */
4268 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4269 return 0;
4271 /* Make sure the constant is second. */
4272 if (swap_commutative_operands_p (op0, op1))
4274 tem = op0, op0 = op1, op1 = tem;
4275 code = swap_condition (code);
4278 trueop0 = avoid_constant_pool_reference (op0);
4279 trueop1 = avoid_constant_pool_reference (op1);
4281 /* For integer comparisons of A and B maybe we can simplify A - B and can
4282 then simplify a comparison of that with zero. If A and B are both either
4283 a register or a CONST_INT, this can't help; testing for these cases will
4284 prevent infinite recursion here and speed things up.
4286 We can only do this for EQ and NE comparisons as otherwise we may
4287 lose or introduce overflow which we cannot disregard as undefined as
4288 we do not know the signedness of the operation on either the left or
4289 the right hand side of the comparison. */
4291 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4292 && (code == EQ || code == NE)
4293 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4294 && (REG_P (op1) || CONST_INT_P (trueop1)))
4295 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4296 /* We cannot do this if tem is a nonzero address. */
4297 && ! nonzero_address_p (tem))
4298 return simplify_const_relational_operation (signed_condition (code),
4299 mode, tem, const0_rtx);
4301 if (! HONOR_NANS (mode) && code == ORDERED)
4302 return const_true_rtx;
4304 if (! HONOR_NANS (mode) && code == UNORDERED)
4305 return const0_rtx;
4307 /* For modes without NaNs, if the two operands are equal, we know the
4308 result except if they have side-effects. Even with NaNs we know
4309 the result of unordered comparisons and, if signaling NaNs are
4310 irrelevant, also the result of LT/GT/LTGT. */
4311 if ((! HONOR_NANS (GET_MODE (trueop0))
4312 || code == UNEQ || code == UNLE || code == UNGE
4313 || ((code == LT || code == GT || code == LTGT)
4314 && ! HONOR_SNANS (GET_MODE (trueop0))))
4315 && rtx_equal_p (trueop0, trueop1)
4316 && ! side_effects_p (trueop0))
4317 return comparison_result (code, CMP_EQ);
4319 /* If the operands are floating-point constants, see if we can fold
4320 the result. */
4321 if (GET_CODE (trueop0) == CONST_DOUBLE
4322 && GET_CODE (trueop1) == CONST_DOUBLE
4323 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4325 REAL_VALUE_TYPE d0, d1;
4327 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4328 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4330 /* Comparisons are unordered iff at least one of the values is NaN. */
4331 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4332 switch (code)
4334 case UNEQ:
4335 case UNLT:
4336 case UNGT:
4337 case UNLE:
4338 case UNGE:
4339 case NE:
4340 case UNORDERED:
4341 return const_true_rtx;
4342 case EQ:
4343 case LT:
4344 case GT:
4345 case LE:
4346 case GE:
4347 case LTGT:
4348 case ORDERED:
4349 return const0_rtx;
4350 default:
4351 return 0;
4354 return comparison_result (code,
4355 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4356 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4359 /* Otherwise, see if the operands are both integers. */
4360 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4361 && (GET_CODE (trueop0) == CONST_DOUBLE
4362 || CONST_INT_P (trueop0))
4363 && (GET_CODE (trueop1) == CONST_DOUBLE
4364 || CONST_INT_P (trueop1)))
4366 int width = GET_MODE_BITSIZE (mode);
4367 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4368 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4370 /* Get the two words comprising each integer constant. */
4371 if (GET_CODE (trueop0) == CONST_DOUBLE)
4373 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4374 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4376 else
4378 l0u = l0s = INTVAL (trueop0);
4379 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4382 if (GET_CODE (trueop1) == CONST_DOUBLE)
4384 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4385 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4387 else
4389 l1u = l1s = INTVAL (trueop1);
4390 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4393 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4394 we have to sign or zero-extend the values. */
4395 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4397 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
4398 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
4400 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
4401 l0s |= ((HOST_WIDE_INT) (-1) << width);
4403 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
4404 l1s |= ((HOST_WIDE_INT) (-1) << width);
4406 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4407 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4409 if (h0u == h1u && l0u == l1u)
4410 return comparison_result (code, CMP_EQ);
4411 else
4413 int cr;
4414 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
4415 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
4416 return comparison_result (code, cr);
4420 /* Optimize comparisons with upper and lower bounds. */
4421 if (SCALAR_INT_MODE_P (mode)
4422 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4423 && CONST_INT_P (trueop1))
4425 int sign;
4426 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4427 HOST_WIDE_INT val = INTVAL (trueop1);
4428 HOST_WIDE_INT mmin, mmax;
4430 if (code == GEU
4431 || code == LEU
4432 || code == GTU
4433 || code == LTU)
4434 sign = 0;
4435 else
4436 sign = 1;
4438 /* Get a reduced range if the sign bit is zero. */
4439 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4441 mmin = 0;
4442 mmax = nonzero;
4444 else
4446 rtx mmin_rtx, mmax_rtx;
4447 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4449 mmin = INTVAL (mmin_rtx);
4450 mmax = INTVAL (mmax_rtx);
4451 if (sign)
4453 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4455 mmin >>= (sign_copies - 1);
4456 mmax >>= (sign_copies - 1);
4460 switch (code)
4462 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4463 case GEU:
4464 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4465 return const_true_rtx;
4466 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4467 return const0_rtx;
4468 break;
4469 case GE:
4470 if (val <= mmin)
4471 return const_true_rtx;
4472 if (val > mmax)
4473 return const0_rtx;
4474 break;
4476 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4477 case LEU:
4478 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4479 return const_true_rtx;
4480 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4481 return const0_rtx;
4482 break;
4483 case LE:
4484 if (val >= mmax)
4485 return const_true_rtx;
4486 if (val < mmin)
4487 return const0_rtx;
4488 break;
4490 case EQ:
4491 /* x == y is always false for y out of range. */
4492 if (val < mmin || val > mmax)
4493 return const0_rtx;
4494 break;
4496 /* x > y is always false for y >= mmax, always true for y < mmin. */
4497 case GTU:
4498 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4499 return const0_rtx;
4500 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4501 return const_true_rtx;
4502 break;
4503 case GT:
4504 if (val >= mmax)
4505 return const0_rtx;
4506 if (val < mmin)
4507 return const_true_rtx;
4508 break;
4510 /* x < y is always false for y <= mmin, always true for y > mmax. */
4511 case LTU:
4512 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4513 return const0_rtx;
4514 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4515 return const_true_rtx;
4516 break;
4517 case LT:
4518 if (val <= mmin)
4519 return const0_rtx;
4520 if (val > mmax)
4521 return const_true_rtx;
4522 break;
4524 case NE:
4525 /* x != y is always true for y out of range. */
4526 if (val < mmin || val > mmax)
4527 return const_true_rtx;
4528 break;
4530 default:
4531 break;
4535 /* Optimize integer comparisons with zero. */
4536 if (trueop1 == const0_rtx)
4538 /* Some addresses are known to be nonzero. We don't know
4539 their sign, but equality comparisons are known. */
4540 if (nonzero_address_p (trueop0))
4542 if (code == EQ || code == LEU)
4543 return const0_rtx;
4544 if (code == NE || code == GTU)
4545 return const_true_rtx;
4548 /* See if the first operand is an IOR with a constant. If so, we
4549 may be able to determine the result of this comparison. */
4550 if (GET_CODE (op0) == IOR)
4552 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
4553 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
4555 int sign_bitnum = GET_MODE_BITSIZE (mode) - 1;
4556 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
4557 && (INTVAL (inner_const)
4558 & ((HOST_WIDE_INT) 1 << sign_bitnum)));
4560 switch (code)
4562 case EQ:
4563 case LEU:
4564 return const0_rtx;
4565 case NE:
4566 case GTU:
4567 return const_true_rtx;
4568 case LT:
4569 case LE:
4570 if (has_sign)
4571 return const_true_rtx;
4572 break;
4573 case GT:
4574 case GE:
4575 if (has_sign)
4576 return const0_rtx;
4577 break;
4578 default:
4579 break;
4585 /* Optimize comparison of ABS with zero. */
4586 if (trueop1 == CONST0_RTX (mode)
4587 && (GET_CODE (trueop0) == ABS
4588 || (GET_CODE (trueop0) == FLOAT_EXTEND
4589 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
4591 switch (code)
4593 case LT:
4594 /* Optimize abs(x) < 0.0. */
4595 if (!HONOR_SNANS (mode)
4596 && (!INTEGRAL_MODE_P (mode)
4597 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4599 if (INTEGRAL_MODE_P (mode)
4600 && (issue_strict_overflow_warning
4601 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4602 warning (OPT_Wstrict_overflow,
4603 ("assuming signed overflow does not occur when "
4604 "assuming abs (x) < 0 is false"));
4605 return const0_rtx;
4607 break;
4609 case GE:
4610 /* Optimize abs(x) >= 0.0. */
4611 if (!HONOR_NANS (mode)
4612 && (!INTEGRAL_MODE_P (mode)
4613 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4615 if (INTEGRAL_MODE_P (mode)
4616 && (issue_strict_overflow_warning
4617 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4618 warning (OPT_Wstrict_overflow,
4619 ("assuming signed overflow does not occur when "
4620 "assuming abs (x) >= 0 is true"));
4621 return const_true_rtx;
4623 break;
4625 case UNGE:
4626 /* Optimize ! (abs(x) < 0.0). */
4627 return const_true_rtx;
4629 default:
4630 break;
4634 return 0;
4637 /* Simplify CODE, an operation with result mode MODE and three operands,
4638 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4639 a constant. Return 0 if no simplifications is possible. */
4642 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4643 enum machine_mode op0_mode, rtx op0, rtx op1,
4644 rtx op2)
4646 unsigned int width = GET_MODE_BITSIZE (mode);
4648 /* VOIDmode means "infinite" precision. */
4649 if (width == 0)
4650 width = HOST_BITS_PER_WIDE_INT;
4652 switch (code)
4654 case SIGN_EXTRACT:
4655 case ZERO_EXTRACT:
4656 if (CONST_INT_P (op0)
4657 && CONST_INT_P (op1)
4658 && CONST_INT_P (op2)
4659 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4660 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4662 /* Extracting a bit-field from a constant */
4663 HOST_WIDE_INT val = INTVAL (op0);
4665 if (BITS_BIG_ENDIAN)
4666 val >>= (GET_MODE_BITSIZE (op0_mode)
4667 - INTVAL (op2) - INTVAL (op1));
4668 else
4669 val >>= INTVAL (op2);
4671 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4673 /* First zero-extend. */
4674 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4675 /* If desired, propagate sign bit. */
4676 if (code == SIGN_EXTRACT
4677 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4678 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4681 /* Clear the bits that don't belong in our mode,
4682 unless they and our sign bit are all one.
4683 So we get either a reasonable negative value or a reasonable
4684 unsigned value for this mode. */
4685 if (width < HOST_BITS_PER_WIDE_INT
4686 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4687 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4688 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4690 return gen_int_mode (val, mode);
4692 break;
4694 case IF_THEN_ELSE:
4695 if (CONST_INT_P (op0))
4696 return op0 != const0_rtx ? op1 : op2;
4698 /* Convert c ? a : a into "a". */
4699 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4700 return op1;
4702 /* Convert a != b ? a : b into "a". */
4703 if (GET_CODE (op0) == NE
4704 && ! side_effects_p (op0)
4705 && ! HONOR_NANS (mode)
4706 && ! HONOR_SIGNED_ZEROS (mode)
4707 && ((rtx_equal_p (XEXP (op0, 0), op1)
4708 && rtx_equal_p (XEXP (op0, 1), op2))
4709 || (rtx_equal_p (XEXP (op0, 0), op2)
4710 && rtx_equal_p (XEXP (op0, 1), op1))))
4711 return op1;
4713 /* Convert a == b ? a : b into "b". */
4714 if (GET_CODE (op0) == EQ
4715 && ! side_effects_p (op0)
4716 && ! HONOR_NANS (mode)
4717 && ! HONOR_SIGNED_ZEROS (mode)
4718 && ((rtx_equal_p (XEXP (op0, 0), op1)
4719 && rtx_equal_p (XEXP (op0, 1), op2))
4720 || (rtx_equal_p (XEXP (op0, 0), op2)
4721 && rtx_equal_p (XEXP (op0, 1), op1))))
4722 return op2;
4724 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4726 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4727 ? GET_MODE (XEXP (op0, 1))
4728 : GET_MODE (XEXP (op0, 0)));
4729 rtx temp;
4731 /* Look for happy constants in op1 and op2. */
4732 if (CONST_INT_P (op1) && CONST_INT_P (op2))
4734 HOST_WIDE_INT t = INTVAL (op1);
4735 HOST_WIDE_INT f = INTVAL (op2);
4737 if (t == STORE_FLAG_VALUE && f == 0)
4738 code = GET_CODE (op0);
4739 else if (t == 0 && f == STORE_FLAG_VALUE)
4741 enum rtx_code tmp;
4742 tmp = reversed_comparison_code (op0, NULL_RTX);
4743 if (tmp == UNKNOWN)
4744 break;
4745 code = tmp;
4747 else
4748 break;
4750 return simplify_gen_relational (code, mode, cmp_mode,
4751 XEXP (op0, 0), XEXP (op0, 1));
4754 if (cmp_mode == VOIDmode)
4755 cmp_mode = op0_mode;
4756 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4757 cmp_mode, XEXP (op0, 0),
4758 XEXP (op0, 1));
4760 /* See if any simplifications were possible. */
4761 if (temp)
4763 if (CONST_INT_P (temp))
4764 return temp == const0_rtx ? op2 : op1;
4765 else if (temp)
4766 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4769 break;
4771 case VEC_MERGE:
4772 gcc_assert (GET_MODE (op0) == mode);
4773 gcc_assert (GET_MODE (op1) == mode);
4774 gcc_assert (VECTOR_MODE_P (mode));
4775 op2 = avoid_constant_pool_reference (op2);
4776 if (CONST_INT_P (op2))
4778 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4779 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4780 int mask = (1 << n_elts) - 1;
4782 if (!(INTVAL (op2) & mask))
4783 return op1;
4784 if ((INTVAL (op2) & mask) == mask)
4785 return op0;
4787 op0 = avoid_constant_pool_reference (op0);
4788 op1 = avoid_constant_pool_reference (op1);
4789 if (GET_CODE (op0) == CONST_VECTOR
4790 && GET_CODE (op1) == CONST_VECTOR)
4792 rtvec v = rtvec_alloc (n_elts);
4793 unsigned int i;
4795 for (i = 0; i < n_elts; i++)
4796 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4797 ? CONST_VECTOR_ELT (op0, i)
4798 : CONST_VECTOR_ELT (op1, i));
4799 return gen_rtx_CONST_VECTOR (mode, v);
4802 break;
4804 default:
4805 gcc_unreachable ();
4808 return 0;
4811 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
4812 or CONST_VECTOR,
4813 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
4815 Works by unpacking OP into a collection of 8-bit values
4816 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4817 and then repacking them again for OUTERMODE. */
4819 static rtx
4820 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4821 enum machine_mode innermode, unsigned int byte)
4823 /* We support up to 512-bit values (for V8DFmode). */
4824 enum {
4825 max_bitsize = 512,
4826 value_bit = 8,
4827 value_mask = (1 << value_bit) - 1
4829 unsigned char value[max_bitsize / value_bit];
4830 int value_start;
4831 int i;
4832 int elem;
4834 int num_elem;
4835 rtx * elems;
4836 int elem_bitsize;
4837 rtx result_s;
4838 rtvec result_v = NULL;
4839 enum mode_class outer_class;
4840 enum machine_mode outer_submode;
4842 /* Some ports misuse CCmode. */
4843 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
4844 return op;
4846 /* We have no way to represent a complex constant at the rtl level. */
4847 if (COMPLEX_MODE_P (outermode))
4848 return NULL_RTX;
4850 /* Unpack the value. */
4852 if (GET_CODE (op) == CONST_VECTOR)
4854 num_elem = CONST_VECTOR_NUNITS (op);
4855 elems = &CONST_VECTOR_ELT (op, 0);
4856 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4858 else
4860 num_elem = 1;
4861 elems = &op;
4862 elem_bitsize = max_bitsize;
4864 /* If this asserts, it is too complicated; reducing value_bit may help. */
4865 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4866 /* I don't know how to handle endianness of sub-units. */
4867 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4869 for (elem = 0; elem < num_elem; elem++)
4871 unsigned char * vp;
4872 rtx el = elems[elem];
4874 /* Vectors are kept in target memory order. (This is probably
4875 a mistake.) */
4877 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4878 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4879 / BITS_PER_UNIT);
4880 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4881 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4882 unsigned bytele = (subword_byte % UNITS_PER_WORD
4883 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4884 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4887 switch (GET_CODE (el))
4889 case CONST_INT:
4890 for (i = 0;
4891 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4892 i += value_bit)
4893 *vp++ = INTVAL (el) >> i;
4894 /* CONST_INTs are always logically sign-extended. */
4895 for (; i < elem_bitsize; i += value_bit)
4896 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4897 break;
4899 case CONST_DOUBLE:
4900 if (GET_MODE (el) == VOIDmode)
4902 /* If this triggers, someone should have generated a
4903 CONST_INT instead. */
4904 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4906 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4907 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4908 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4910 *vp++
4911 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4912 i += value_bit;
4914 /* It shouldn't matter what's done here, so fill it with
4915 zero. */
4916 for (; i < elem_bitsize; i += value_bit)
4917 *vp++ = 0;
4919 else
4921 long tmp[max_bitsize / 32];
4922 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4924 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4925 gcc_assert (bitsize <= elem_bitsize);
4926 gcc_assert (bitsize % value_bit == 0);
4928 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4929 GET_MODE (el));
4931 /* real_to_target produces its result in words affected by
4932 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4933 and use WORDS_BIG_ENDIAN instead; see the documentation
4934 of SUBREG in rtl.texi. */
4935 for (i = 0; i < bitsize; i += value_bit)
4937 int ibase;
4938 if (WORDS_BIG_ENDIAN)
4939 ibase = bitsize - 1 - i;
4940 else
4941 ibase = i;
4942 *vp++ = tmp[ibase / 32] >> i % 32;
4945 /* It shouldn't matter what's done here, so fill it with
4946 zero. */
4947 for (; i < elem_bitsize; i += value_bit)
4948 *vp++ = 0;
4950 break;
4952 case CONST_FIXED:
4953 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4955 for (i = 0; i < elem_bitsize; i += value_bit)
4956 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
4958 else
4960 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4961 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
4962 for (; i < 2 * HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4963 i += value_bit)
4964 *vp++ = CONST_FIXED_VALUE_HIGH (el)
4965 >> (i - HOST_BITS_PER_WIDE_INT);
4966 for (; i < elem_bitsize; i += value_bit)
4967 *vp++ = 0;
4969 break;
4971 default:
4972 gcc_unreachable ();
4976 /* Now, pick the right byte to start with. */
4977 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4978 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4979 will already have offset 0. */
4980 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4982 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4983 - byte);
4984 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4985 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4986 byte = (subword_byte % UNITS_PER_WORD
4987 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4990 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4991 so if it's become negative it will instead be very large.) */
4992 gcc_assert (byte < GET_MODE_SIZE (innermode));
4994 /* Convert from bytes to chunks of size value_bit. */
4995 value_start = byte * (BITS_PER_UNIT / value_bit);
4997 /* Re-pack the value. */
4999 if (VECTOR_MODE_P (outermode))
5001 num_elem = GET_MODE_NUNITS (outermode);
5002 result_v = rtvec_alloc (num_elem);
5003 elems = &RTVEC_ELT (result_v, 0);
5004 outer_submode = GET_MODE_INNER (outermode);
5006 else
5008 num_elem = 1;
5009 elems = &result_s;
5010 outer_submode = outermode;
5013 outer_class = GET_MODE_CLASS (outer_submode);
5014 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5016 gcc_assert (elem_bitsize % value_bit == 0);
5017 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5019 for (elem = 0; elem < num_elem; elem++)
5021 unsigned char *vp;
5023 /* Vectors are stored in target memory order. (This is probably
5024 a mistake.) */
5026 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5027 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5028 / BITS_PER_UNIT);
5029 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5030 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5031 unsigned bytele = (subword_byte % UNITS_PER_WORD
5032 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5033 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5036 switch (outer_class)
5038 case MODE_INT:
5039 case MODE_PARTIAL_INT:
5041 unsigned HOST_WIDE_INT hi = 0, lo = 0;
5043 for (i = 0;
5044 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5045 i += value_bit)
5046 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
5047 for (; i < elem_bitsize; i += value_bit)
5048 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
5049 << (i - HOST_BITS_PER_WIDE_INT));
5051 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5052 know why. */
5053 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5054 elems[elem] = gen_int_mode (lo, outer_submode);
5055 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
5056 elems[elem] = immed_double_const (lo, hi, outer_submode);
5057 else
5058 return NULL_RTX;
5060 break;
5062 case MODE_FLOAT:
5063 case MODE_DECIMAL_FLOAT:
5065 REAL_VALUE_TYPE r;
5066 long tmp[max_bitsize / 32];
5068 /* real_from_target wants its input in words affected by
5069 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5070 and use WORDS_BIG_ENDIAN instead; see the documentation
5071 of SUBREG in rtl.texi. */
5072 for (i = 0; i < max_bitsize / 32; i++)
5073 tmp[i] = 0;
5074 for (i = 0; i < elem_bitsize; i += value_bit)
5076 int ibase;
5077 if (WORDS_BIG_ENDIAN)
5078 ibase = elem_bitsize - 1 - i;
5079 else
5080 ibase = i;
5081 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5084 real_from_target (&r, tmp, outer_submode);
5085 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5087 break;
5089 case MODE_FRACT:
5090 case MODE_UFRACT:
5091 case MODE_ACCUM:
5092 case MODE_UACCUM:
5094 FIXED_VALUE_TYPE f;
5095 f.data.low = 0;
5096 f.data.high = 0;
5097 f.mode = outer_submode;
5099 for (i = 0;
5100 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5101 i += value_bit)
5102 f.data.low |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
5103 for (; i < elem_bitsize; i += value_bit)
5104 f.data.high |= ((HOST_WIDE_INT)(*vp++ & value_mask)
5105 << (i - HOST_BITS_PER_WIDE_INT));
5107 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5109 break;
5111 default:
5112 gcc_unreachable ();
5115 if (VECTOR_MODE_P (outermode))
5116 return gen_rtx_CONST_VECTOR (outermode, result_v);
5117 else
5118 return result_s;
5121 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5122 Return 0 if no simplifications are possible. */
5124 simplify_subreg (enum machine_mode outermode, rtx op,
5125 enum machine_mode innermode, unsigned int byte)
5127 /* Little bit of sanity checking. */
5128 gcc_assert (innermode != VOIDmode);
5129 gcc_assert (outermode != VOIDmode);
5130 gcc_assert (innermode != BLKmode);
5131 gcc_assert (outermode != BLKmode);
5133 gcc_assert (GET_MODE (op) == innermode
5134 || GET_MODE (op) == VOIDmode);
5136 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
5137 gcc_assert (byte < GET_MODE_SIZE (innermode));
5139 if (outermode == innermode && !byte)
5140 return op;
5142 if (CONST_INT_P (op)
5143 || GET_CODE (op) == CONST_DOUBLE
5144 || GET_CODE (op) == CONST_FIXED
5145 || GET_CODE (op) == CONST_VECTOR)
5146 return simplify_immed_subreg (outermode, op, innermode, byte);
5148 /* Changing mode twice with SUBREG => just change it once,
5149 or not at all if changing back op starting mode. */
5150 if (GET_CODE (op) == SUBREG)
5152 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5153 int final_offset = byte + SUBREG_BYTE (op);
5154 rtx newx;
5156 if (outermode == innermostmode
5157 && byte == 0 && SUBREG_BYTE (op) == 0)
5158 return SUBREG_REG (op);
5160 /* The SUBREG_BYTE represents offset, as if the value were stored
5161 in memory. Irritating exception is paradoxical subreg, where
5162 we define SUBREG_BYTE to be 0. On big endian machines, this
5163 value should be negative. For a moment, undo this exception. */
5164 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5166 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5167 if (WORDS_BIG_ENDIAN)
5168 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5169 if (BYTES_BIG_ENDIAN)
5170 final_offset += difference % UNITS_PER_WORD;
5172 if (SUBREG_BYTE (op) == 0
5173 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5175 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5176 if (WORDS_BIG_ENDIAN)
5177 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5178 if (BYTES_BIG_ENDIAN)
5179 final_offset += difference % UNITS_PER_WORD;
5182 /* See whether resulting subreg will be paradoxical. */
5183 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5185 /* In nonparadoxical subregs we can't handle negative offsets. */
5186 if (final_offset < 0)
5187 return NULL_RTX;
5188 /* Bail out in case resulting subreg would be incorrect. */
5189 if (final_offset % GET_MODE_SIZE (outermode)
5190 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5191 return NULL_RTX;
5193 else
5195 int offset = 0;
5196 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5198 /* In paradoxical subreg, see if we are still looking on lower part.
5199 If so, our SUBREG_BYTE will be 0. */
5200 if (WORDS_BIG_ENDIAN)
5201 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5202 if (BYTES_BIG_ENDIAN)
5203 offset += difference % UNITS_PER_WORD;
5204 if (offset == final_offset)
5205 final_offset = 0;
5206 else
5207 return NULL_RTX;
5210 /* Recurse for further possible simplifications. */
5211 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5212 final_offset);
5213 if (newx)
5214 return newx;
5215 if (validate_subreg (outermode, innermostmode,
5216 SUBREG_REG (op), final_offset))
5218 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5219 if (SUBREG_PROMOTED_VAR_P (op)
5220 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5221 && GET_MODE_CLASS (outermode) == MODE_INT
5222 && IN_RANGE (GET_MODE_SIZE (outermode),
5223 GET_MODE_SIZE (innermode),
5224 GET_MODE_SIZE (innermostmode))
5225 && subreg_lowpart_p (newx))
5227 SUBREG_PROMOTED_VAR_P (newx) = 1;
5228 SUBREG_PROMOTED_UNSIGNED_SET
5229 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5231 return newx;
5233 return NULL_RTX;
5236 /* Merge implicit and explicit truncations. */
5238 if (GET_CODE (op) == TRUNCATE
5239 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
5240 && subreg_lowpart_offset (outermode, innermode) == byte)
5241 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
5242 GET_MODE (XEXP (op, 0)));
5244 /* SUBREG of a hard register => just change the register number
5245 and/or mode. If the hard register is not valid in that mode,
5246 suppress this simplification. If the hard register is the stack,
5247 frame, or argument pointer, leave this as a SUBREG. */
5249 if (REG_P (op) && HARD_REGISTER_P (op))
5251 unsigned int regno, final_regno;
5253 regno = REGNO (op);
5254 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5255 if (HARD_REGISTER_NUM_P (final_regno))
5257 rtx x;
5258 int final_offset = byte;
5260 /* Adjust offset for paradoxical subregs. */
5261 if (byte == 0
5262 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5264 int difference = (GET_MODE_SIZE (innermode)
5265 - GET_MODE_SIZE (outermode));
5266 if (WORDS_BIG_ENDIAN)
5267 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5268 if (BYTES_BIG_ENDIAN)
5269 final_offset += difference % UNITS_PER_WORD;
5272 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5274 /* Propagate original regno. We don't have any way to specify
5275 the offset inside original regno, so do so only for lowpart.
5276 The information is used only by alias analysis that can not
5277 grog partial register anyway. */
5279 if (subreg_lowpart_offset (outermode, innermode) == byte)
5280 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5281 return x;
5285 /* If we have a SUBREG of a register that we are replacing and we are
5286 replacing it with a MEM, make a new MEM and try replacing the
5287 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5288 or if we would be widening it. */
5290 if (MEM_P (op)
5291 && ! mode_dependent_address_p (XEXP (op, 0))
5292 /* Allow splitting of volatile memory references in case we don't
5293 have instruction to move the whole thing. */
5294 && (! MEM_VOLATILE_P (op)
5295 || ! have_insn_for (SET, innermode))
5296 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5297 return adjust_address_nv (op, outermode, byte);
5299 /* Handle complex values represented as CONCAT
5300 of real and imaginary part. */
5301 if (GET_CODE (op) == CONCAT)
5303 unsigned int part_size, final_offset;
5304 rtx part, res;
5306 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5307 if (byte < part_size)
5309 part = XEXP (op, 0);
5310 final_offset = byte;
5312 else
5314 part = XEXP (op, 1);
5315 final_offset = byte - part_size;
5318 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5319 return NULL_RTX;
5321 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5322 if (res)
5323 return res;
5324 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5325 return gen_rtx_SUBREG (outermode, part, final_offset);
5326 return NULL_RTX;
5329 /* Optimize SUBREG truncations of zero and sign extended values. */
5330 if ((GET_CODE (op) == ZERO_EXTEND
5331 || GET_CODE (op) == SIGN_EXTEND)
5332 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
5334 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5336 /* If we're requesting the lowpart of a zero or sign extension,
5337 there are three possibilities. If the outermode is the same
5338 as the origmode, we can omit both the extension and the subreg.
5339 If the outermode is not larger than the origmode, we can apply
5340 the truncation without the extension. Finally, if the outermode
5341 is larger than the origmode, but both are integer modes, we
5342 can just extend to the appropriate mode. */
5343 if (bitpos == 0)
5345 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
5346 if (outermode == origmode)
5347 return XEXP (op, 0);
5348 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
5349 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
5350 subreg_lowpart_offset (outermode,
5351 origmode));
5352 if (SCALAR_INT_MODE_P (outermode))
5353 return simplify_gen_unary (GET_CODE (op), outermode,
5354 XEXP (op, 0), origmode);
5357 /* A SUBREG resulting from a zero extension may fold to zero if
5358 it extracts higher bits that the ZERO_EXTEND's source bits. */
5359 if (GET_CODE (op) == ZERO_EXTEND
5360 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
5361 return CONST0_RTX (outermode);
5364 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5365 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5366 the outer subreg is effectively a truncation to the original mode. */
5367 if ((GET_CODE (op) == LSHIFTRT
5368 || GET_CODE (op) == ASHIFTRT)
5369 && SCALAR_INT_MODE_P (outermode)
5370 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5371 to avoid the possibility that an outer LSHIFTRT shifts by more
5372 than the sign extension's sign_bit_copies and introduces zeros
5373 into the high bits of the result. */
5374 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
5375 && CONST_INT_P (XEXP (op, 1))
5376 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
5377 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5378 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5379 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5380 return simplify_gen_binary (ASHIFTRT, outermode,
5381 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5383 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5384 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5385 the outer subreg is effectively a truncation to the original mode. */
5386 if ((GET_CODE (op) == LSHIFTRT
5387 || GET_CODE (op) == ASHIFTRT)
5388 && SCALAR_INT_MODE_P (outermode)
5389 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5390 && CONST_INT_P (XEXP (op, 1))
5391 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5392 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5393 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5394 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5395 return simplify_gen_binary (LSHIFTRT, outermode,
5396 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5398 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5399 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5400 the outer subreg is effectively a truncation to the original mode. */
5401 if (GET_CODE (op) == ASHIFT
5402 && SCALAR_INT_MODE_P (outermode)
5403 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5404 && CONST_INT_P (XEXP (op, 1))
5405 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5406 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
5407 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5408 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5409 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5410 return simplify_gen_binary (ASHIFT, outermode,
5411 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5413 /* Recognize a word extraction from a multi-word subreg. */
5414 if ((GET_CODE (op) == LSHIFTRT
5415 || GET_CODE (op) == ASHIFTRT)
5416 && SCALAR_INT_MODE_P (outermode)
5417 && GET_MODE_BITSIZE (outermode) >= BITS_PER_WORD
5418 && GET_MODE_BITSIZE (innermode) >= (2 * GET_MODE_BITSIZE (outermode))
5419 && CONST_INT_P (XEXP (op, 1))
5420 && (INTVAL (XEXP (op, 1)) & (GET_MODE_BITSIZE (outermode) - 1)) == 0
5421 && INTVAL (XEXP (op, 1)) >= 0
5422 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
5423 && byte == subreg_lowpart_offset (outermode, innermode))
5425 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5426 return simplify_gen_subreg (outermode, XEXP (op, 0), innermode,
5427 (WORDS_BIG_ENDIAN
5428 ? byte - shifted_bytes
5429 : byte + shifted_bytes));
5432 return NULL_RTX;
5435 /* Make a SUBREG operation or equivalent if it folds. */
5438 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5439 enum machine_mode innermode, unsigned int byte)
5441 rtx newx;
5443 newx = simplify_subreg (outermode, op, innermode, byte);
5444 if (newx)
5445 return newx;
5447 if (GET_CODE (op) == SUBREG
5448 || GET_CODE (op) == CONCAT
5449 || GET_MODE (op) == VOIDmode)
5450 return NULL_RTX;
5452 if (validate_subreg (outermode, innermode, op, byte))
5453 return gen_rtx_SUBREG (outermode, op, byte);
5455 return NULL_RTX;
5458 /* Simplify X, an rtx expression.
5460 Return the simplified expression or NULL if no simplifications
5461 were possible.
5463 This is the preferred entry point into the simplification routines;
5464 however, we still allow passes to call the more specific routines.
5466 Right now GCC has three (yes, three) major bodies of RTL simplification
5467 code that need to be unified.
5469 1. fold_rtx in cse.c. This code uses various CSE specific
5470 information to aid in RTL simplification.
5472 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5473 it uses combine specific information to aid in RTL
5474 simplification.
5476 3. The routines in this file.
5479 Long term we want to only have one body of simplification code; to
5480 get to that state I recommend the following steps:
5482 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5483 which are not pass dependent state into these routines.
5485 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5486 use this routine whenever possible.
5488 3. Allow for pass dependent state to be provided to these
5489 routines and add simplifications based on the pass dependent
5490 state. Remove code from cse.c & combine.c that becomes
5491 redundant/dead.
5493 It will take time, but ultimately the compiler will be easier to
5494 maintain and improve. It's totally silly that when we add a
5495 simplification that it needs to be added to 4 places (3 for RTL
5496 simplification and 1 for tree simplification. */
5499 simplify_rtx (const_rtx x)
5501 const enum rtx_code code = GET_CODE (x);
5502 const enum machine_mode mode = GET_MODE (x);
5504 switch (GET_RTX_CLASS (code))
5506 case RTX_UNARY:
5507 return simplify_unary_operation (code, mode,
5508 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5509 case RTX_COMM_ARITH:
5510 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5511 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5513 /* Fall through.... */
5515 case RTX_BIN_ARITH:
5516 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5518 case RTX_TERNARY:
5519 case RTX_BITFIELD_OPS:
5520 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5521 XEXP (x, 0), XEXP (x, 1),
5522 XEXP (x, 2));
5524 case RTX_COMPARE:
5525 case RTX_COMM_COMPARE:
5526 return simplify_relational_operation (code, mode,
5527 ((GET_MODE (XEXP (x, 0))
5528 != VOIDmode)
5529 ? GET_MODE (XEXP (x, 0))
5530 : GET_MODE (XEXP (x, 1))),
5531 XEXP (x, 0),
5532 XEXP (x, 1));
5534 case RTX_EXTRA:
5535 if (code == SUBREG)
5536 return simplify_subreg (mode, SUBREG_REG (x),
5537 GET_MODE (SUBREG_REG (x)),
5538 SUBREG_BYTE (x));
5539 break;
5541 case RTX_OBJ:
5542 if (code == LO_SUM)
5544 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5545 if (GET_CODE (XEXP (x, 0)) == HIGH
5546 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5547 return XEXP (x, 1);
5549 break;
5551 default:
5552 break;
5554 return NULL;