2011-04-28 Tobias Burnus <burnus@net-b.de>
[official-gcc.git] / gcc / simplify-rtx.c
blob847a4bb6ecf59ae47c649bcdb7e85db26fe83800
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "insn-config.h"
34 #include "recog.h"
35 #include "function.h"
36 #include "expr.h"
37 #include "diagnostic-core.h"
38 #include "output.h"
39 #include "ggc.h"
40 #include "target.h"
42 /* Simplification and canonicalization of RTL. */
44 /* Much code operates on (low, high) pairs; the low value is an
45 unsigned wide int, the high value a signed wide int. We
46 occasionally need to sign extend from low to high as if low were a
47 signed wide int. */
48 #define HWI_SIGN_EXTEND(low) \
49 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
51 static rtx neg_const_int (enum machine_mode, const_rtx);
52 static bool plus_minus_operand_p (const_rtx);
53 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
54 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
55 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
56 unsigned int);
57 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
58 rtx, rtx);
59 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
60 enum machine_mode, rtx, rtx);
61 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
62 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
63 rtx, rtx, rtx, rtx);
65 /* Negate a CONST_INT rtx, truncating (because a conversion from a
66 maximally negative number can overflow). */
67 static rtx
68 neg_const_int (enum machine_mode mode, const_rtx i)
70 return gen_int_mode (- INTVAL (i), mode);
73 /* Test whether expression, X, is an immediate constant that represents
74 the most significant bit of machine mode MODE. */
76 bool
77 mode_signbit_p (enum machine_mode mode, const_rtx x)
79 unsigned HOST_WIDE_INT val;
80 unsigned int width;
82 if (GET_MODE_CLASS (mode) != MODE_INT)
83 return false;
85 width = GET_MODE_BITSIZE (mode);
86 if (width == 0)
87 return false;
89 if (width <= HOST_BITS_PER_WIDE_INT
90 && CONST_INT_P (x))
91 val = INTVAL (x);
92 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
93 && GET_CODE (x) == CONST_DOUBLE
94 && CONST_DOUBLE_LOW (x) == 0)
96 val = CONST_DOUBLE_HIGH (x);
97 width -= HOST_BITS_PER_WIDE_INT;
99 else
100 return false;
102 if (width < HOST_BITS_PER_WIDE_INT)
103 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
104 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
107 /* Make a binary operation by properly ordering the operands and
108 seeing if the expression folds. */
111 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
112 rtx op1)
114 rtx tem;
116 /* If this simplifies, do it. */
117 tem = simplify_binary_operation (code, mode, op0, op1);
118 if (tem)
119 return tem;
121 /* Put complex operands first and constants second if commutative. */
122 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
123 && swap_commutative_operands_p (op0, op1))
124 tem = op0, op0 = op1, op1 = tem;
126 return gen_rtx_fmt_ee (code, mode, op0, op1);
129 /* If X is a MEM referencing the constant pool, return the real value.
130 Otherwise return X. */
132 avoid_constant_pool_reference (rtx x)
134 rtx c, tmp, addr;
135 enum machine_mode cmode;
136 HOST_WIDE_INT offset = 0;
138 switch (GET_CODE (x))
140 case MEM:
141 break;
143 case FLOAT_EXTEND:
144 /* Handle float extensions of constant pool references. */
145 tmp = XEXP (x, 0);
146 c = avoid_constant_pool_reference (tmp);
147 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
149 REAL_VALUE_TYPE d;
151 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
152 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
154 return x;
156 default:
157 return x;
160 if (GET_MODE (x) == BLKmode)
161 return x;
163 addr = XEXP (x, 0);
165 /* Call target hook to avoid the effects of -fpic etc.... */
166 addr = targetm.delegitimize_address (addr);
168 /* Split the address into a base and integer offset. */
169 if (GET_CODE (addr) == CONST
170 && GET_CODE (XEXP (addr, 0)) == PLUS
171 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
173 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
174 addr = XEXP (XEXP (addr, 0), 0);
177 if (GET_CODE (addr) == LO_SUM)
178 addr = XEXP (addr, 1);
180 /* If this is a constant pool reference, we can turn it into its
181 constant and hope that simplifications happen. */
182 if (GET_CODE (addr) == SYMBOL_REF
183 && CONSTANT_POOL_ADDRESS_P (addr))
185 c = get_pool_constant (addr);
186 cmode = get_pool_mode (addr);
188 /* If we're accessing the constant in a different mode than it was
189 originally stored, attempt to fix that up via subreg simplifications.
190 If that fails we have no choice but to return the original memory. */
191 if (offset != 0 || cmode != GET_MODE (x))
193 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
194 if (tem && CONSTANT_P (tem))
195 return tem;
197 else
198 return c;
201 return x;
204 /* Simplify a MEM based on its attributes. This is the default
205 delegitimize_address target hook, and it's recommended that every
206 overrider call it. */
209 delegitimize_mem_from_attrs (rtx x)
211 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
212 use their base addresses as equivalent. */
213 if (MEM_P (x)
214 && MEM_EXPR (x)
215 && MEM_OFFSET (x))
217 tree decl = MEM_EXPR (x);
218 enum machine_mode mode = GET_MODE (x);
219 HOST_WIDE_INT offset = 0;
221 switch (TREE_CODE (decl))
223 default:
224 decl = NULL;
225 break;
227 case VAR_DECL:
228 break;
230 case ARRAY_REF:
231 case ARRAY_RANGE_REF:
232 case COMPONENT_REF:
233 case BIT_FIELD_REF:
234 case REALPART_EXPR:
235 case IMAGPART_EXPR:
236 case VIEW_CONVERT_EXPR:
238 HOST_WIDE_INT bitsize, bitpos;
239 tree toffset;
240 int unsignedp = 0, volatilep = 0;
242 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
243 &mode, &unsignedp, &volatilep, false);
244 if (bitsize != GET_MODE_BITSIZE (mode)
245 || (bitpos % BITS_PER_UNIT)
246 || (toffset && !host_integerp (toffset, 0)))
247 decl = NULL;
248 else
250 offset += bitpos / BITS_PER_UNIT;
251 if (toffset)
252 offset += TREE_INT_CST_LOW (toffset);
254 break;
258 if (decl
259 && mode == GET_MODE (x)
260 && TREE_CODE (decl) == VAR_DECL
261 && (TREE_STATIC (decl)
262 || DECL_THREAD_LOCAL_P (decl))
263 && DECL_RTL_SET_P (decl)
264 && MEM_P (DECL_RTL (decl)))
266 rtx newx;
268 offset += INTVAL (MEM_OFFSET (x));
270 newx = DECL_RTL (decl);
272 if (MEM_P (newx))
274 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
276 /* Avoid creating a new MEM needlessly if we already had
277 the same address. We do if there's no OFFSET and the
278 old address X is identical to NEWX, or if X is of the
279 form (plus NEWX OFFSET), or the NEWX is of the form
280 (plus Y (const_int Z)) and X is that with the offset
281 added: (plus Y (const_int Z+OFFSET)). */
282 if (!((offset == 0
283 || (GET_CODE (o) == PLUS
284 && GET_CODE (XEXP (o, 1)) == CONST_INT
285 && (offset == INTVAL (XEXP (o, 1))
286 || (GET_CODE (n) == PLUS
287 && GET_CODE (XEXP (n, 1)) == CONST_INT
288 && (INTVAL (XEXP (n, 1)) + offset
289 == INTVAL (XEXP (o, 1)))
290 && (n = XEXP (n, 0))))
291 && (o = XEXP (o, 0))))
292 && rtx_equal_p (o, n)))
293 x = adjust_address_nv (newx, mode, offset);
295 else if (GET_MODE (x) == GET_MODE (newx)
296 && offset == 0)
297 x = newx;
301 return x;
304 /* Make a unary operation by first seeing if it folds and otherwise making
305 the specified operation. */
308 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
309 enum machine_mode op_mode)
311 rtx tem;
313 /* If this simplifies, use it. */
314 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
315 return tem;
317 return gen_rtx_fmt_e (code, mode, op);
320 /* Likewise for ternary operations. */
323 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
324 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
326 rtx tem;
328 /* If this simplifies, use it. */
329 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
330 op0, op1, op2)))
331 return tem;
333 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
336 /* Likewise, for relational operations.
337 CMP_MODE specifies mode comparison is done in. */
340 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
341 enum machine_mode cmp_mode, rtx op0, rtx op1)
343 rtx tem;
345 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
346 op0, op1)))
347 return tem;
349 return gen_rtx_fmt_ee (code, mode, op0, op1);
352 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
353 and simplify the result. If FN is non-NULL, call this callback on each
354 X, if it returns non-NULL, replace X with its return value and simplify the
355 result. */
358 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
359 rtx (*fn) (rtx, const_rtx, void *), void *data)
361 enum rtx_code code = GET_CODE (x);
362 enum machine_mode mode = GET_MODE (x);
363 enum machine_mode op_mode;
364 const char *fmt;
365 rtx op0, op1, op2, newx, op;
366 rtvec vec, newvec;
367 int i, j;
369 if (__builtin_expect (fn != NULL, 0))
371 newx = fn (x, old_rtx, data);
372 if (newx)
373 return newx;
375 else if (rtx_equal_p (x, old_rtx))
376 return copy_rtx ((rtx) data);
378 switch (GET_RTX_CLASS (code))
380 case RTX_UNARY:
381 op0 = XEXP (x, 0);
382 op_mode = GET_MODE (op0);
383 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
384 if (op0 == XEXP (x, 0))
385 return x;
386 return simplify_gen_unary (code, mode, op0, op_mode);
388 case RTX_BIN_ARITH:
389 case RTX_COMM_ARITH:
390 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
391 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
392 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
393 return x;
394 return simplify_gen_binary (code, mode, op0, op1);
396 case RTX_COMPARE:
397 case RTX_COMM_COMPARE:
398 op0 = XEXP (x, 0);
399 op1 = XEXP (x, 1);
400 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
401 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
402 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
403 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
404 return x;
405 return simplify_gen_relational (code, mode, op_mode, op0, op1);
407 case RTX_TERNARY:
408 case RTX_BITFIELD_OPS:
409 op0 = XEXP (x, 0);
410 op_mode = GET_MODE (op0);
411 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
412 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
413 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
414 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
415 return x;
416 if (op_mode == VOIDmode)
417 op_mode = GET_MODE (op0);
418 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
420 case RTX_EXTRA:
421 if (code == SUBREG)
423 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
424 if (op0 == SUBREG_REG (x))
425 return x;
426 op0 = simplify_gen_subreg (GET_MODE (x), op0,
427 GET_MODE (SUBREG_REG (x)),
428 SUBREG_BYTE (x));
429 return op0 ? op0 : x;
431 break;
433 case RTX_OBJ:
434 if (code == MEM)
436 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
437 if (op0 == XEXP (x, 0))
438 return x;
439 return replace_equiv_address_nv (x, op0);
441 else if (code == LO_SUM)
443 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
444 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
446 /* (lo_sum (high x) x) -> x */
447 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
448 return op1;
450 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
451 return x;
452 return gen_rtx_LO_SUM (mode, op0, op1);
454 break;
456 default:
457 break;
460 newx = x;
461 fmt = GET_RTX_FORMAT (code);
462 for (i = 0; fmt[i]; i++)
463 switch (fmt[i])
465 case 'E':
466 vec = XVEC (x, i);
467 newvec = XVEC (newx, i);
468 for (j = 0; j < GET_NUM_ELEM (vec); j++)
470 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
471 old_rtx, fn, data);
472 if (op != RTVEC_ELT (vec, j))
474 if (newvec == vec)
476 newvec = shallow_copy_rtvec (vec);
477 if (x == newx)
478 newx = shallow_copy_rtx (x);
479 XVEC (newx, i) = newvec;
481 RTVEC_ELT (newvec, j) = op;
484 break;
486 case 'e':
487 if (XEXP (x, i))
489 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
490 if (op != XEXP (x, i))
492 if (x == newx)
493 newx = shallow_copy_rtx (x);
494 XEXP (newx, i) = op;
497 break;
499 return newx;
502 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
503 resulting RTX. Return a new RTX which is as simplified as possible. */
506 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
508 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
511 /* Try to simplify a unary operation CODE whose output mode is to be
512 MODE with input operand OP whose mode was originally OP_MODE.
513 Return zero if no simplification can be made. */
515 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
516 rtx op, enum machine_mode op_mode)
518 rtx trueop, tem;
520 trueop = avoid_constant_pool_reference (op);
522 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
523 if (tem)
524 return tem;
526 return simplify_unary_operation_1 (code, mode, op);
529 /* Perform some simplifications we can do even if the operands
530 aren't constant. */
531 static rtx
532 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
534 enum rtx_code reversed;
535 rtx temp;
537 switch (code)
539 case NOT:
540 /* (not (not X)) == X. */
541 if (GET_CODE (op) == NOT)
542 return XEXP (op, 0);
544 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
545 comparison is all ones. */
546 if (COMPARISON_P (op)
547 && (mode == BImode || STORE_FLAG_VALUE == -1)
548 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
549 return simplify_gen_relational (reversed, mode, VOIDmode,
550 XEXP (op, 0), XEXP (op, 1));
552 /* (not (plus X -1)) can become (neg X). */
553 if (GET_CODE (op) == PLUS
554 && XEXP (op, 1) == constm1_rtx)
555 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
557 /* Similarly, (not (neg X)) is (plus X -1). */
558 if (GET_CODE (op) == NEG)
559 return plus_constant (XEXP (op, 0), -1);
561 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
562 if (GET_CODE (op) == XOR
563 && CONST_INT_P (XEXP (op, 1))
564 && (temp = simplify_unary_operation (NOT, mode,
565 XEXP (op, 1), mode)) != 0)
566 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
568 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
569 if (GET_CODE (op) == PLUS
570 && CONST_INT_P (XEXP (op, 1))
571 && mode_signbit_p (mode, XEXP (op, 1))
572 && (temp = simplify_unary_operation (NOT, mode,
573 XEXP (op, 1), mode)) != 0)
574 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
577 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
578 operands other than 1, but that is not valid. We could do a
579 similar simplification for (not (lshiftrt C X)) where C is
580 just the sign bit, but this doesn't seem common enough to
581 bother with. */
582 if (GET_CODE (op) == ASHIFT
583 && XEXP (op, 0) == const1_rtx)
585 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
586 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
589 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
590 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
591 so we can perform the above simplification. */
593 if (STORE_FLAG_VALUE == -1
594 && GET_CODE (op) == ASHIFTRT
595 && GET_CODE (XEXP (op, 1))
596 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
597 return simplify_gen_relational (GE, mode, VOIDmode,
598 XEXP (op, 0), const0_rtx);
601 if (GET_CODE (op) == SUBREG
602 && subreg_lowpart_p (op)
603 && (GET_MODE_SIZE (GET_MODE (op))
604 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
605 && GET_CODE (SUBREG_REG (op)) == ASHIFT
606 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
608 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
609 rtx x;
611 x = gen_rtx_ROTATE (inner_mode,
612 simplify_gen_unary (NOT, inner_mode, const1_rtx,
613 inner_mode),
614 XEXP (SUBREG_REG (op), 1));
615 return rtl_hooks.gen_lowpart_no_emit (mode, x);
618 /* Apply De Morgan's laws to reduce number of patterns for machines
619 with negating logical insns (and-not, nand, etc.). If result has
620 only one NOT, put it first, since that is how the patterns are
621 coded. */
623 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
625 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
626 enum machine_mode op_mode;
628 op_mode = GET_MODE (in1);
629 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
631 op_mode = GET_MODE (in2);
632 if (op_mode == VOIDmode)
633 op_mode = mode;
634 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
636 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
638 rtx tem = in2;
639 in2 = in1; in1 = tem;
642 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
643 mode, in1, in2);
645 break;
647 case NEG:
648 /* (neg (neg X)) == X. */
649 if (GET_CODE (op) == NEG)
650 return XEXP (op, 0);
652 /* (neg (plus X 1)) can become (not X). */
653 if (GET_CODE (op) == PLUS
654 && XEXP (op, 1) == const1_rtx)
655 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
657 /* Similarly, (neg (not X)) is (plus X 1). */
658 if (GET_CODE (op) == NOT)
659 return plus_constant (XEXP (op, 0), 1);
661 /* (neg (minus X Y)) can become (minus Y X). This transformation
662 isn't safe for modes with signed zeros, since if X and Y are
663 both +0, (minus Y X) is the same as (minus X Y). If the
664 rounding mode is towards +infinity (or -infinity) then the two
665 expressions will be rounded differently. */
666 if (GET_CODE (op) == MINUS
667 && !HONOR_SIGNED_ZEROS (mode)
668 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
669 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
671 if (GET_CODE (op) == PLUS
672 && !HONOR_SIGNED_ZEROS (mode)
673 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
675 /* (neg (plus A C)) is simplified to (minus -C A). */
676 if (CONST_INT_P (XEXP (op, 1))
677 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
679 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
680 if (temp)
681 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
684 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
685 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
686 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
689 /* (neg (mult A B)) becomes (mult (neg A) B).
690 This works even for floating-point values. */
691 if (GET_CODE (op) == MULT
692 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
694 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
695 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
698 /* NEG commutes with ASHIFT since it is multiplication. Only do
699 this if we can then eliminate the NEG (e.g., if the operand
700 is a constant). */
701 if (GET_CODE (op) == ASHIFT)
703 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
704 if (temp)
705 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
708 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
709 C is equal to the width of MODE minus 1. */
710 if (GET_CODE (op) == ASHIFTRT
711 && CONST_INT_P (XEXP (op, 1))
712 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
713 return simplify_gen_binary (LSHIFTRT, mode,
714 XEXP (op, 0), XEXP (op, 1));
716 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
717 C is equal to the width of MODE minus 1. */
718 if (GET_CODE (op) == LSHIFTRT
719 && CONST_INT_P (XEXP (op, 1))
720 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
721 return simplify_gen_binary (ASHIFTRT, mode,
722 XEXP (op, 0), XEXP (op, 1));
724 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
725 if (GET_CODE (op) == XOR
726 && XEXP (op, 1) == const1_rtx
727 && nonzero_bits (XEXP (op, 0), mode) == 1)
728 return plus_constant (XEXP (op, 0), -1);
730 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
731 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
732 if (GET_CODE (op) == LT
733 && XEXP (op, 1) == const0_rtx
734 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
736 enum machine_mode inner = GET_MODE (XEXP (op, 0));
737 int isize = GET_MODE_BITSIZE (inner);
738 if (STORE_FLAG_VALUE == 1)
740 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
741 GEN_INT (isize - 1));
742 if (mode == inner)
743 return temp;
744 if (GET_MODE_BITSIZE (mode) > isize)
745 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
746 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
748 else if (STORE_FLAG_VALUE == -1)
750 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
751 GEN_INT (isize - 1));
752 if (mode == inner)
753 return temp;
754 if (GET_MODE_BITSIZE (mode) > isize)
755 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
756 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
759 break;
761 case TRUNCATE:
762 /* We can't handle truncation to a partial integer mode here
763 because we don't know the real bitsize of the partial
764 integer mode. */
765 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
766 break;
768 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
769 if ((GET_CODE (op) == SIGN_EXTEND
770 || GET_CODE (op) == ZERO_EXTEND)
771 && GET_MODE (XEXP (op, 0)) == mode)
772 return XEXP (op, 0);
774 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
775 (OP:SI foo:SI) if OP is NEG or ABS. */
776 if ((GET_CODE (op) == ABS
777 || GET_CODE (op) == NEG)
778 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
779 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
780 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
781 return simplify_gen_unary (GET_CODE (op), mode,
782 XEXP (XEXP (op, 0), 0), mode);
784 /* (truncate:A (subreg:B (truncate:C X) 0)) is
785 (truncate:A X). */
786 if (GET_CODE (op) == SUBREG
787 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
788 && subreg_lowpart_p (op))
789 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
790 GET_MODE (XEXP (SUBREG_REG (op), 0)));
792 /* If we know that the value is already truncated, we can
793 replace the TRUNCATE with a SUBREG. Note that this is also
794 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
795 modes we just have to apply a different definition for
796 truncation. But don't do this for an (LSHIFTRT (MULT ...))
797 since this will cause problems with the umulXi3_highpart
798 patterns. */
799 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
800 GET_MODE_BITSIZE (GET_MODE (op)))
801 ? (num_sign_bit_copies (op, GET_MODE (op))
802 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
803 - GET_MODE_BITSIZE (mode)))
804 : truncated_to_mode (mode, op))
805 && ! (GET_CODE (op) == LSHIFTRT
806 && GET_CODE (XEXP (op, 0)) == MULT))
807 return rtl_hooks.gen_lowpart_no_emit (mode, op);
809 /* A truncate of a comparison can be replaced with a subreg if
810 STORE_FLAG_VALUE permits. This is like the previous test,
811 but it works even if the comparison is done in a mode larger
812 than HOST_BITS_PER_WIDE_INT. */
813 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
814 && COMPARISON_P (op)
815 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
816 return rtl_hooks.gen_lowpart_no_emit (mode, op);
817 break;
819 case FLOAT_TRUNCATE:
820 if (DECIMAL_FLOAT_MODE_P (mode))
821 break;
823 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
824 if (GET_CODE (op) == FLOAT_EXTEND
825 && GET_MODE (XEXP (op, 0)) == mode)
826 return XEXP (op, 0);
828 /* (float_truncate:SF (float_truncate:DF foo:XF))
829 = (float_truncate:SF foo:XF).
830 This may eliminate double rounding, so it is unsafe.
832 (float_truncate:SF (float_extend:XF foo:DF))
833 = (float_truncate:SF foo:DF).
835 (float_truncate:DF (float_extend:XF foo:SF))
836 = (float_extend:SF foo:DF). */
837 if ((GET_CODE (op) == FLOAT_TRUNCATE
838 && flag_unsafe_math_optimizations)
839 || GET_CODE (op) == FLOAT_EXTEND)
840 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
841 0)))
842 > GET_MODE_SIZE (mode)
843 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
844 mode,
845 XEXP (op, 0), mode);
847 /* (float_truncate (float x)) is (float x) */
848 if (GET_CODE (op) == FLOAT
849 && (flag_unsafe_math_optimizations
850 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
851 && ((unsigned)significand_size (GET_MODE (op))
852 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
853 - num_sign_bit_copies (XEXP (op, 0),
854 GET_MODE (XEXP (op, 0))))))))
855 return simplify_gen_unary (FLOAT, mode,
856 XEXP (op, 0),
857 GET_MODE (XEXP (op, 0)));
859 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
860 (OP:SF foo:SF) if OP is NEG or ABS. */
861 if ((GET_CODE (op) == ABS
862 || GET_CODE (op) == NEG)
863 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
864 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
865 return simplify_gen_unary (GET_CODE (op), mode,
866 XEXP (XEXP (op, 0), 0), mode);
868 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
869 is (float_truncate:SF x). */
870 if (GET_CODE (op) == SUBREG
871 && subreg_lowpart_p (op)
872 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
873 return SUBREG_REG (op);
874 break;
876 case FLOAT_EXTEND:
877 if (DECIMAL_FLOAT_MODE_P (mode))
878 break;
880 /* (float_extend (float_extend x)) is (float_extend x)
882 (float_extend (float x)) is (float x) assuming that double
883 rounding can't happen.
885 if (GET_CODE (op) == FLOAT_EXTEND
886 || (GET_CODE (op) == FLOAT
887 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
888 && ((unsigned)significand_size (GET_MODE (op))
889 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
890 - num_sign_bit_copies (XEXP (op, 0),
891 GET_MODE (XEXP (op, 0)))))))
892 return simplify_gen_unary (GET_CODE (op), mode,
893 XEXP (op, 0),
894 GET_MODE (XEXP (op, 0)));
896 break;
898 case ABS:
899 /* (abs (neg <foo>)) -> (abs <foo>) */
900 if (GET_CODE (op) == NEG)
901 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
902 GET_MODE (XEXP (op, 0)));
904 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
905 do nothing. */
906 if (GET_MODE (op) == VOIDmode)
907 break;
909 /* If operand is something known to be positive, ignore the ABS. */
910 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
911 || ((GET_MODE_BITSIZE (GET_MODE (op))
912 <= HOST_BITS_PER_WIDE_INT)
913 && ((nonzero_bits (op, GET_MODE (op))
914 & ((unsigned HOST_WIDE_INT) 1
915 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
916 == 0)))
917 return op;
919 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
920 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
921 return gen_rtx_NEG (mode, op);
923 break;
925 case FFS:
926 /* (ffs (*_extend <X>)) = (ffs <X>) */
927 if (GET_CODE (op) == SIGN_EXTEND
928 || GET_CODE (op) == ZERO_EXTEND)
929 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
930 GET_MODE (XEXP (op, 0)));
931 break;
933 case POPCOUNT:
934 switch (GET_CODE (op))
936 case BSWAP:
937 case ZERO_EXTEND:
938 /* (popcount (zero_extend <X>)) = (popcount <X>) */
939 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
940 GET_MODE (XEXP (op, 0)));
942 case ROTATE:
943 case ROTATERT:
944 /* Rotations don't affect popcount. */
945 if (!side_effects_p (XEXP (op, 1)))
946 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
947 GET_MODE (XEXP (op, 0)));
948 break;
950 default:
951 break;
953 break;
955 case PARITY:
956 switch (GET_CODE (op))
958 case NOT:
959 case BSWAP:
960 case ZERO_EXTEND:
961 case SIGN_EXTEND:
962 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
963 GET_MODE (XEXP (op, 0)));
965 case ROTATE:
966 case ROTATERT:
967 /* Rotations don't affect parity. */
968 if (!side_effects_p (XEXP (op, 1)))
969 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
970 GET_MODE (XEXP (op, 0)));
971 break;
973 default:
974 break;
976 break;
978 case BSWAP:
979 /* (bswap (bswap x)) -> x. */
980 if (GET_CODE (op) == BSWAP)
981 return XEXP (op, 0);
982 break;
984 case FLOAT:
985 /* (float (sign_extend <X>)) = (float <X>). */
986 if (GET_CODE (op) == SIGN_EXTEND)
987 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
988 GET_MODE (XEXP (op, 0)));
989 break;
991 case SIGN_EXTEND:
992 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
993 becomes just the MINUS if its mode is MODE. This allows
994 folding switch statements on machines using casesi (such as
995 the VAX). */
996 if (GET_CODE (op) == TRUNCATE
997 && GET_MODE (XEXP (op, 0)) == mode
998 && GET_CODE (XEXP (op, 0)) == MINUS
999 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1000 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1001 return XEXP (op, 0);
1003 /* Check for a sign extension of a subreg of a promoted
1004 variable, where the promotion is sign-extended, and the
1005 target mode is the same as the variable's promotion. */
1006 if (GET_CODE (op) == SUBREG
1007 && SUBREG_PROMOTED_VAR_P (op)
1008 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1009 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1010 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1012 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1013 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1014 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1016 gcc_assert (GET_MODE_BITSIZE (mode)
1017 > GET_MODE_BITSIZE (GET_MODE (op)));
1018 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1019 GET_MODE (XEXP (op, 0)));
1022 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1023 is (sign_extend:M (subreg:O <X>)) if there is mode with
1024 GET_MODE_BITSIZE (N) - I bits.
1025 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1026 is similarly (zero_extend:M (subreg:O <X>)). */
1027 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1028 && GET_CODE (XEXP (op, 0)) == ASHIFT
1029 && CONST_INT_P (XEXP (op, 1))
1030 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1031 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1033 enum machine_mode tmode
1034 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1035 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1036 gcc_assert (GET_MODE_BITSIZE (mode)
1037 > GET_MODE_BITSIZE (GET_MODE (op)));
1038 if (tmode != BLKmode)
1040 rtx inner =
1041 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1042 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1043 ? SIGN_EXTEND : ZERO_EXTEND,
1044 mode, inner, tmode);
1048 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1049 /* As we do not know which address space the pointer is refering to,
1050 we can do this only if the target does not support different pointer
1051 or address modes depending on the address space. */
1052 if (target_default_pointer_address_modes_p ()
1053 && ! POINTERS_EXTEND_UNSIGNED
1054 && mode == Pmode && GET_MODE (op) == ptr_mode
1055 && (CONSTANT_P (op)
1056 || (GET_CODE (op) == SUBREG
1057 && REG_P (SUBREG_REG (op))
1058 && REG_POINTER (SUBREG_REG (op))
1059 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1060 return convert_memory_address (Pmode, op);
1061 #endif
1062 break;
1064 case ZERO_EXTEND:
1065 /* Check for a zero extension of a subreg of a promoted
1066 variable, where the promotion is zero-extended, and the
1067 target mode is the same as the variable's promotion. */
1068 if (GET_CODE (op) == SUBREG
1069 && SUBREG_PROMOTED_VAR_P (op)
1070 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1071 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1072 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1074 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1075 if (GET_CODE (op) == ZERO_EXTEND)
1076 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1077 GET_MODE (XEXP (op, 0)));
1079 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1080 is (zero_extend:M (subreg:O <X>)) if there is mode with
1081 GET_MODE_BITSIZE (N) - I bits. */
1082 if (GET_CODE (op) == LSHIFTRT
1083 && GET_CODE (XEXP (op, 0)) == ASHIFT
1084 && CONST_INT_P (XEXP (op, 1))
1085 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1086 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1088 enum machine_mode tmode
1089 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1090 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1091 if (tmode != BLKmode)
1093 rtx inner =
1094 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1095 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1099 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1100 /* As we do not know which address space the pointer is refering to,
1101 we can do this only if the target does not support different pointer
1102 or address modes depending on the address space. */
1103 if (target_default_pointer_address_modes_p ()
1104 && POINTERS_EXTEND_UNSIGNED > 0
1105 && mode == Pmode && GET_MODE (op) == ptr_mode
1106 && (CONSTANT_P (op)
1107 || (GET_CODE (op) == SUBREG
1108 && REG_P (SUBREG_REG (op))
1109 && REG_POINTER (SUBREG_REG (op))
1110 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1111 return convert_memory_address (Pmode, op);
1112 #endif
1113 break;
1115 default:
1116 break;
1119 return 0;
1122 /* Try to compute the value of a unary operation CODE whose output mode is to
1123 be MODE with input operand OP whose mode was originally OP_MODE.
1124 Return zero if the value cannot be computed. */
1126 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1127 rtx op, enum machine_mode op_mode)
1129 unsigned int width = GET_MODE_BITSIZE (mode);
1131 if (code == VEC_DUPLICATE)
1133 gcc_assert (VECTOR_MODE_P (mode));
1134 if (GET_MODE (op) != VOIDmode)
1136 if (!VECTOR_MODE_P (GET_MODE (op)))
1137 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1138 else
1139 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1140 (GET_MODE (op)));
1142 if (CONST_INT_P (op) || GET_CODE (op) == CONST_DOUBLE
1143 || GET_CODE (op) == CONST_VECTOR)
1145 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1146 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1147 rtvec v = rtvec_alloc (n_elts);
1148 unsigned int i;
1150 if (GET_CODE (op) != CONST_VECTOR)
1151 for (i = 0; i < n_elts; i++)
1152 RTVEC_ELT (v, i) = op;
1153 else
1155 enum machine_mode inmode = GET_MODE (op);
1156 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1157 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1159 gcc_assert (in_n_elts < n_elts);
1160 gcc_assert ((n_elts % in_n_elts) == 0);
1161 for (i = 0; i < n_elts; i++)
1162 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1164 return gen_rtx_CONST_VECTOR (mode, v);
1168 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1170 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1171 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1172 enum machine_mode opmode = GET_MODE (op);
1173 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1174 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1175 rtvec v = rtvec_alloc (n_elts);
1176 unsigned int i;
1178 gcc_assert (op_n_elts == n_elts);
1179 for (i = 0; i < n_elts; i++)
1181 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1182 CONST_VECTOR_ELT (op, i),
1183 GET_MODE_INNER (opmode));
1184 if (!x)
1185 return 0;
1186 RTVEC_ELT (v, i) = x;
1188 return gen_rtx_CONST_VECTOR (mode, v);
1191 /* The order of these tests is critical so that, for example, we don't
1192 check the wrong mode (input vs. output) for a conversion operation,
1193 such as FIX. At some point, this should be simplified. */
1195 if (code == FLOAT && GET_MODE (op) == VOIDmode
1196 && (GET_CODE (op) == CONST_DOUBLE || CONST_INT_P (op)))
1198 HOST_WIDE_INT hv, lv;
1199 REAL_VALUE_TYPE d;
1201 if (CONST_INT_P (op))
1202 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1203 else
1204 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1206 REAL_VALUE_FROM_INT (d, lv, hv, mode);
1207 d = real_value_truncate (mode, d);
1208 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1210 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
1211 && (GET_CODE (op) == CONST_DOUBLE
1212 || CONST_INT_P (op)))
1214 HOST_WIDE_INT hv, lv;
1215 REAL_VALUE_TYPE d;
1217 if (CONST_INT_P (op))
1218 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1219 else
1220 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1222 if (op_mode == VOIDmode)
1224 /* We don't know how to interpret negative-looking numbers in
1225 this case, so don't try to fold those. */
1226 if (hv < 0)
1227 return 0;
1229 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
1231 else
1232 hv = 0, lv &= GET_MODE_MASK (op_mode);
1234 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1235 d = real_value_truncate (mode, d);
1236 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1239 if (CONST_INT_P (op)
1240 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1242 HOST_WIDE_INT arg0 = INTVAL (op);
1243 HOST_WIDE_INT val;
1245 switch (code)
1247 case NOT:
1248 val = ~ arg0;
1249 break;
1251 case NEG:
1252 val = - arg0;
1253 break;
1255 case ABS:
1256 val = (arg0 >= 0 ? arg0 : - arg0);
1257 break;
1259 case FFS:
1260 arg0 &= GET_MODE_MASK (mode);
1261 val = ffs_hwi (arg0);
1262 break;
1264 case CLZ:
1265 arg0 &= GET_MODE_MASK (mode);
1266 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1268 else
1269 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1270 break;
1272 case CTZ:
1273 arg0 &= GET_MODE_MASK (mode);
1274 if (arg0 == 0)
1276 /* Even if the value at zero is undefined, we have to come
1277 up with some replacement. Seems good enough. */
1278 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1279 val = GET_MODE_BITSIZE (mode);
1281 else
1282 val = ctz_hwi (arg0);
1283 break;
1285 case POPCOUNT:
1286 arg0 &= GET_MODE_MASK (mode);
1287 val = 0;
1288 while (arg0)
1289 val++, arg0 &= arg0 - 1;
1290 break;
1292 case PARITY:
1293 arg0 &= GET_MODE_MASK (mode);
1294 val = 0;
1295 while (arg0)
1296 val++, arg0 &= arg0 - 1;
1297 val &= 1;
1298 break;
1300 case BSWAP:
1302 unsigned int s;
1304 val = 0;
1305 for (s = 0; s < width; s += 8)
1307 unsigned int d = width - s - 8;
1308 unsigned HOST_WIDE_INT byte;
1309 byte = (arg0 >> s) & 0xff;
1310 val |= byte << d;
1313 break;
1315 case TRUNCATE:
1316 val = arg0;
1317 break;
1319 case ZERO_EXTEND:
1320 /* When zero-extending a CONST_INT, we need to know its
1321 original mode. */
1322 gcc_assert (op_mode != VOIDmode);
1323 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1325 /* If we were really extending the mode,
1326 we would have to distinguish between zero-extension
1327 and sign-extension. */
1328 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1329 val = arg0;
1331 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1332 val = arg0 & ~((unsigned HOST_WIDE_INT) (-1)
1333 << GET_MODE_BITSIZE (op_mode));
1334 else
1335 return 0;
1336 break;
1338 case SIGN_EXTEND:
1339 if (op_mode == VOIDmode)
1340 op_mode = mode;
1341 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1343 /* If we were really extending the mode,
1344 we would have to distinguish between zero-extension
1345 and sign-extension. */
1346 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1347 val = arg0;
1349 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1352 = arg0 & ~((unsigned HOST_WIDE_INT) (-1)
1353 << GET_MODE_BITSIZE (op_mode));
1354 if (val & ((unsigned HOST_WIDE_INT) 1
1355 << (GET_MODE_BITSIZE (op_mode) - 1)))
1357 -= (unsigned HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1359 else
1360 return 0;
1361 break;
1363 case SQRT:
1364 case FLOAT_EXTEND:
1365 case FLOAT_TRUNCATE:
1366 case SS_TRUNCATE:
1367 case US_TRUNCATE:
1368 case SS_NEG:
1369 case US_NEG:
1370 case SS_ABS:
1371 return 0;
1373 default:
1374 gcc_unreachable ();
1377 return gen_int_mode (val, mode);
1380 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1381 for a DImode operation on a CONST_INT. */
1382 else if (GET_MODE (op) == VOIDmode
1383 && width <= HOST_BITS_PER_WIDE_INT * 2
1384 && (GET_CODE (op) == CONST_DOUBLE
1385 || CONST_INT_P (op)))
1387 unsigned HOST_WIDE_INT l1, lv;
1388 HOST_WIDE_INT h1, hv;
1390 if (GET_CODE (op) == CONST_DOUBLE)
1391 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1392 else
1393 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1395 switch (code)
1397 case NOT:
1398 lv = ~ l1;
1399 hv = ~ h1;
1400 break;
1402 case NEG:
1403 neg_double (l1, h1, &lv, &hv);
1404 break;
1406 case ABS:
1407 if (h1 < 0)
1408 neg_double (l1, h1, &lv, &hv);
1409 else
1410 lv = l1, hv = h1;
1411 break;
1413 case FFS:
1414 hv = 0;
1415 if (l1 != 0)
1416 lv = ffs_hwi (l1);
1417 else if (h1 != 0)
1418 lv = HOST_BITS_PER_WIDE_INT + ffs_hwi (h1);
1419 else
1420 lv = 0;
1421 break;
1423 case CLZ:
1424 hv = 0;
1425 if (h1 != 0)
1426 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1427 - HOST_BITS_PER_WIDE_INT;
1428 else if (l1 != 0)
1429 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1430 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1431 lv = GET_MODE_BITSIZE (mode);
1432 break;
1434 case CTZ:
1435 hv = 0;
1436 if (l1 != 0)
1437 lv = ctz_hwi (l1);
1438 else if (h1 != 0)
1439 lv = HOST_BITS_PER_WIDE_INT + ctz_hwi (h1);
1440 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1441 lv = GET_MODE_BITSIZE (mode);
1442 break;
1444 case POPCOUNT:
1445 hv = 0;
1446 lv = 0;
1447 while (l1)
1448 lv++, l1 &= l1 - 1;
1449 while (h1)
1450 lv++, h1 &= h1 - 1;
1451 break;
1453 case PARITY:
1454 hv = 0;
1455 lv = 0;
1456 while (l1)
1457 lv++, l1 &= l1 - 1;
1458 while (h1)
1459 lv++, h1 &= h1 - 1;
1460 lv &= 1;
1461 break;
1463 case BSWAP:
1465 unsigned int s;
1467 hv = 0;
1468 lv = 0;
1469 for (s = 0; s < width; s += 8)
1471 unsigned int d = width - s - 8;
1472 unsigned HOST_WIDE_INT byte;
1474 if (s < HOST_BITS_PER_WIDE_INT)
1475 byte = (l1 >> s) & 0xff;
1476 else
1477 byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1479 if (d < HOST_BITS_PER_WIDE_INT)
1480 lv |= byte << d;
1481 else
1482 hv |= byte << (d - HOST_BITS_PER_WIDE_INT);
1485 break;
1487 case TRUNCATE:
1488 /* This is just a change-of-mode, so do nothing. */
1489 lv = l1, hv = h1;
1490 break;
1492 case ZERO_EXTEND:
1493 gcc_assert (op_mode != VOIDmode);
1495 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1496 return 0;
1498 hv = 0;
1499 lv = l1 & GET_MODE_MASK (op_mode);
1500 break;
1502 case SIGN_EXTEND:
1503 if (op_mode == VOIDmode
1504 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1505 return 0;
1506 else
1508 lv = l1 & GET_MODE_MASK (op_mode);
1509 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1510 && (lv & ((unsigned HOST_WIDE_INT) 1
1511 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1512 lv -= (unsigned HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1514 hv = HWI_SIGN_EXTEND (lv);
1516 break;
1518 case SQRT:
1519 return 0;
1521 default:
1522 return 0;
1525 return immed_double_const (lv, hv, mode);
1528 else if (GET_CODE (op) == CONST_DOUBLE
1529 && SCALAR_FLOAT_MODE_P (mode)
1530 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1532 REAL_VALUE_TYPE d, t;
1533 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1535 switch (code)
1537 case SQRT:
1538 if (HONOR_SNANS (mode) && real_isnan (&d))
1539 return 0;
1540 real_sqrt (&t, mode, &d);
1541 d = t;
1542 break;
1543 case ABS:
1544 d = real_value_abs (&d);
1545 break;
1546 case NEG:
1547 d = real_value_negate (&d);
1548 break;
1549 case FLOAT_TRUNCATE:
1550 d = real_value_truncate (mode, d);
1551 break;
1552 case FLOAT_EXTEND:
1553 /* All this does is change the mode, unless changing
1554 mode class. */
1555 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1556 real_convert (&d, mode, &d);
1557 break;
1558 case FIX:
1559 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1560 break;
1561 case NOT:
1563 long tmp[4];
1564 int i;
1566 real_to_target (tmp, &d, GET_MODE (op));
1567 for (i = 0; i < 4; i++)
1568 tmp[i] = ~tmp[i];
1569 real_from_target (&d, tmp, mode);
1570 break;
1572 default:
1573 gcc_unreachable ();
1575 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1578 else if (GET_CODE (op) == CONST_DOUBLE
1579 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1580 && GET_MODE_CLASS (mode) == MODE_INT
1581 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1583 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1584 operators are intentionally left unspecified (to ease implementation
1585 by target backends), for consistency, this routine implements the
1586 same semantics for constant folding as used by the middle-end. */
1588 /* This was formerly used only for non-IEEE float.
1589 eggert@twinsun.com says it is safe for IEEE also. */
1590 HOST_WIDE_INT xh, xl, th, tl;
1591 REAL_VALUE_TYPE x, t;
1592 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1593 switch (code)
1595 case FIX:
1596 if (REAL_VALUE_ISNAN (x))
1597 return const0_rtx;
1599 /* Test against the signed upper bound. */
1600 if (width > HOST_BITS_PER_WIDE_INT)
1602 th = ((unsigned HOST_WIDE_INT) 1
1603 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1604 tl = -1;
1606 else
1608 th = 0;
1609 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1611 real_from_integer (&t, VOIDmode, tl, th, 0);
1612 if (REAL_VALUES_LESS (t, x))
1614 xh = th;
1615 xl = tl;
1616 break;
1619 /* Test against the signed lower bound. */
1620 if (width > HOST_BITS_PER_WIDE_INT)
1622 th = (unsigned HOST_WIDE_INT) (-1)
1623 << (width - HOST_BITS_PER_WIDE_INT - 1);
1624 tl = 0;
1626 else
1628 th = -1;
1629 tl = (unsigned HOST_WIDE_INT) (-1) << (width - 1);
1631 real_from_integer (&t, VOIDmode, tl, th, 0);
1632 if (REAL_VALUES_LESS (x, t))
1634 xh = th;
1635 xl = tl;
1636 break;
1638 REAL_VALUE_TO_INT (&xl, &xh, x);
1639 break;
1641 case UNSIGNED_FIX:
1642 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1643 return const0_rtx;
1645 /* Test against the unsigned upper bound. */
1646 if (width == 2*HOST_BITS_PER_WIDE_INT)
1648 th = -1;
1649 tl = -1;
1651 else if (width >= HOST_BITS_PER_WIDE_INT)
1653 th = ((unsigned HOST_WIDE_INT) 1
1654 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1655 tl = -1;
1657 else
1659 th = 0;
1660 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1662 real_from_integer (&t, VOIDmode, tl, th, 1);
1663 if (REAL_VALUES_LESS (t, x))
1665 xh = th;
1666 xl = tl;
1667 break;
1670 REAL_VALUE_TO_INT (&xl, &xh, x);
1671 break;
1673 default:
1674 gcc_unreachable ();
1676 return immed_double_const (xl, xh, mode);
1679 return NULL_RTX;
1682 /* Subroutine of simplify_binary_operation to simplify a commutative,
1683 associative binary operation CODE with result mode MODE, operating
1684 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1685 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1686 canonicalization is possible. */
1688 static rtx
1689 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1690 rtx op0, rtx op1)
1692 rtx tem;
1694 /* Linearize the operator to the left. */
1695 if (GET_CODE (op1) == code)
1697 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1698 if (GET_CODE (op0) == code)
1700 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1701 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1704 /* "a op (b op c)" becomes "(b op c) op a". */
1705 if (! swap_commutative_operands_p (op1, op0))
1706 return simplify_gen_binary (code, mode, op1, op0);
1708 tem = op0;
1709 op0 = op1;
1710 op1 = tem;
1713 if (GET_CODE (op0) == code)
1715 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1716 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1718 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1719 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1722 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1723 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1724 if (tem != 0)
1725 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1727 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1728 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1729 if (tem != 0)
1730 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1733 return 0;
1737 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1738 and OP1. Return 0 if no simplification is possible.
1740 Don't use this for relational operations such as EQ or LT.
1741 Use simplify_relational_operation instead. */
1743 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1744 rtx op0, rtx op1)
1746 rtx trueop0, trueop1;
1747 rtx tem;
1749 /* Relational operations don't work here. We must know the mode
1750 of the operands in order to do the comparison correctly.
1751 Assuming a full word can give incorrect results.
1752 Consider comparing 128 with -128 in QImode. */
1753 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1754 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1756 /* Make sure the constant is second. */
1757 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1758 && swap_commutative_operands_p (op0, op1))
1760 tem = op0, op0 = op1, op1 = tem;
1763 trueop0 = avoid_constant_pool_reference (op0);
1764 trueop1 = avoid_constant_pool_reference (op1);
1766 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1767 if (tem)
1768 return tem;
1769 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1772 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1773 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1774 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1775 actual constants. */
1777 static rtx
1778 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1779 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1781 rtx tem, reversed, opleft, opright;
1782 HOST_WIDE_INT val;
1783 unsigned int width = GET_MODE_BITSIZE (mode);
1785 /* Even if we can't compute a constant result,
1786 there are some cases worth simplifying. */
1788 switch (code)
1790 case PLUS:
1791 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1792 when x is NaN, infinite, or finite and nonzero. They aren't
1793 when x is -0 and the rounding mode is not towards -infinity,
1794 since (-0) + 0 is then 0. */
1795 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1796 return op0;
1798 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1799 transformations are safe even for IEEE. */
1800 if (GET_CODE (op0) == NEG)
1801 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1802 else if (GET_CODE (op1) == NEG)
1803 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1805 /* (~a) + 1 -> -a */
1806 if (INTEGRAL_MODE_P (mode)
1807 && GET_CODE (op0) == NOT
1808 && trueop1 == const1_rtx)
1809 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1811 /* Handle both-operands-constant cases. We can only add
1812 CONST_INTs to constants since the sum of relocatable symbols
1813 can't be handled by most assemblers. Don't add CONST_INT
1814 to CONST_INT since overflow won't be computed properly if wider
1815 than HOST_BITS_PER_WIDE_INT. */
1817 if ((GET_CODE (op0) == CONST
1818 || GET_CODE (op0) == SYMBOL_REF
1819 || GET_CODE (op0) == LABEL_REF)
1820 && CONST_INT_P (op1))
1821 return plus_constant (op0, INTVAL (op1));
1822 else if ((GET_CODE (op1) == CONST
1823 || GET_CODE (op1) == SYMBOL_REF
1824 || GET_CODE (op1) == LABEL_REF)
1825 && CONST_INT_P (op0))
1826 return plus_constant (op1, INTVAL (op0));
1828 /* See if this is something like X * C - X or vice versa or
1829 if the multiplication is written as a shift. If so, we can
1830 distribute and make a new multiply, shift, or maybe just
1831 have X (if C is 2 in the example above). But don't make
1832 something more expensive than we had before. */
1834 if (SCALAR_INT_MODE_P (mode))
1836 double_int coeff0, coeff1;
1837 rtx lhs = op0, rhs = op1;
1839 coeff0 = double_int_one;
1840 coeff1 = double_int_one;
1842 if (GET_CODE (lhs) == NEG)
1844 coeff0 = double_int_minus_one;
1845 lhs = XEXP (lhs, 0);
1847 else if (GET_CODE (lhs) == MULT
1848 && CONST_INT_P (XEXP (lhs, 1)))
1850 coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
1851 lhs = XEXP (lhs, 0);
1853 else if (GET_CODE (lhs) == ASHIFT
1854 && CONST_INT_P (XEXP (lhs, 1))
1855 && INTVAL (XEXP (lhs, 1)) >= 0
1856 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1858 coeff0 = double_int_setbit (double_int_zero,
1859 INTVAL (XEXP (lhs, 1)));
1860 lhs = XEXP (lhs, 0);
1863 if (GET_CODE (rhs) == NEG)
1865 coeff1 = double_int_minus_one;
1866 rhs = XEXP (rhs, 0);
1868 else if (GET_CODE (rhs) == MULT
1869 && CONST_INT_P (XEXP (rhs, 1)))
1871 coeff1 = shwi_to_double_int (INTVAL (XEXP (rhs, 1)));
1872 rhs = XEXP (rhs, 0);
1874 else if (GET_CODE (rhs) == ASHIFT
1875 && CONST_INT_P (XEXP (rhs, 1))
1876 && INTVAL (XEXP (rhs, 1)) >= 0
1877 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1879 coeff1 = double_int_setbit (double_int_zero,
1880 INTVAL (XEXP (rhs, 1)));
1881 rhs = XEXP (rhs, 0);
1884 if (rtx_equal_p (lhs, rhs))
1886 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1887 rtx coeff;
1888 double_int val;
1889 bool speed = optimize_function_for_speed_p (cfun);
1891 val = double_int_add (coeff0, coeff1);
1892 coeff = immed_double_int_const (val, mode);
1894 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1895 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
1896 ? tem : 0;
1900 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1901 if ((CONST_INT_P (op1)
1902 || GET_CODE (op1) == CONST_DOUBLE)
1903 && GET_CODE (op0) == XOR
1904 && (CONST_INT_P (XEXP (op0, 1))
1905 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1906 && mode_signbit_p (mode, op1))
1907 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1908 simplify_gen_binary (XOR, mode, op1,
1909 XEXP (op0, 1)));
1911 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1912 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1913 && GET_CODE (op0) == MULT
1914 && GET_CODE (XEXP (op0, 0)) == NEG)
1916 rtx in1, in2;
1918 in1 = XEXP (XEXP (op0, 0), 0);
1919 in2 = XEXP (op0, 1);
1920 return simplify_gen_binary (MINUS, mode, op1,
1921 simplify_gen_binary (MULT, mode,
1922 in1, in2));
1925 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1926 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1927 is 1. */
1928 if (COMPARISON_P (op0)
1929 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1930 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1931 && (reversed = reversed_comparison (op0, mode)))
1932 return
1933 simplify_gen_unary (NEG, mode, reversed, mode);
1935 /* If one of the operands is a PLUS or a MINUS, see if we can
1936 simplify this by the associative law.
1937 Don't use the associative law for floating point.
1938 The inaccuracy makes it nonassociative,
1939 and subtle programs can break if operations are associated. */
1941 if (INTEGRAL_MODE_P (mode)
1942 && (plus_minus_operand_p (op0)
1943 || plus_minus_operand_p (op1))
1944 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1945 return tem;
1947 /* Reassociate floating point addition only when the user
1948 specifies associative math operations. */
1949 if (FLOAT_MODE_P (mode)
1950 && flag_associative_math)
1952 tem = simplify_associative_operation (code, mode, op0, op1);
1953 if (tem)
1954 return tem;
1956 break;
1958 case COMPARE:
1959 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1960 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1961 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1962 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1964 rtx xop00 = XEXP (op0, 0);
1965 rtx xop10 = XEXP (op1, 0);
1967 #ifdef HAVE_cc0
1968 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1969 #else
1970 if (REG_P (xop00) && REG_P (xop10)
1971 && GET_MODE (xop00) == GET_MODE (xop10)
1972 && REGNO (xop00) == REGNO (xop10)
1973 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1974 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1975 #endif
1976 return xop00;
1978 break;
1980 case MINUS:
1981 /* We can't assume x-x is 0 even with non-IEEE floating point,
1982 but since it is zero except in very strange circumstances, we
1983 will treat it as zero with -ffinite-math-only. */
1984 if (rtx_equal_p (trueop0, trueop1)
1985 && ! side_effects_p (op0)
1986 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
1987 return CONST0_RTX (mode);
1989 /* Change subtraction from zero into negation. (0 - x) is the
1990 same as -x when x is NaN, infinite, or finite and nonzero.
1991 But if the mode has signed zeros, and does not round towards
1992 -infinity, then 0 - 0 is 0, not -0. */
1993 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1994 return simplify_gen_unary (NEG, mode, op1, mode);
1996 /* (-1 - a) is ~a. */
1997 if (trueop0 == constm1_rtx)
1998 return simplify_gen_unary (NOT, mode, op1, mode);
2000 /* Subtracting 0 has no effect unless the mode has signed zeros
2001 and supports rounding towards -infinity. In such a case,
2002 0 - 0 is -0. */
2003 if (!(HONOR_SIGNED_ZEROS (mode)
2004 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2005 && trueop1 == CONST0_RTX (mode))
2006 return op0;
2008 /* See if this is something like X * C - X or vice versa or
2009 if the multiplication is written as a shift. If so, we can
2010 distribute and make a new multiply, shift, or maybe just
2011 have X (if C is 2 in the example above). But don't make
2012 something more expensive than we had before. */
2014 if (SCALAR_INT_MODE_P (mode))
2016 double_int coeff0, negcoeff1;
2017 rtx lhs = op0, rhs = op1;
2019 coeff0 = double_int_one;
2020 negcoeff1 = double_int_minus_one;
2022 if (GET_CODE (lhs) == NEG)
2024 coeff0 = double_int_minus_one;
2025 lhs = XEXP (lhs, 0);
2027 else if (GET_CODE (lhs) == MULT
2028 && CONST_INT_P (XEXP (lhs, 1)))
2030 coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
2031 lhs = XEXP (lhs, 0);
2033 else if (GET_CODE (lhs) == ASHIFT
2034 && CONST_INT_P (XEXP (lhs, 1))
2035 && INTVAL (XEXP (lhs, 1)) >= 0
2036 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2038 coeff0 = double_int_setbit (double_int_zero,
2039 INTVAL (XEXP (lhs, 1)));
2040 lhs = XEXP (lhs, 0);
2043 if (GET_CODE (rhs) == NEG)
2045 negcoeff1 = double_int_one;
2046 rhs = XEXP (rhs, 0);
2048 else if (GET_CODE (rhs) == MULT
2049 && CONST_INT_P (XEXP (rhs, 1)))
2051 negcoeff1 = shwi_to_double_int (-INTVAL (XEXP (rhs, 1)));
2052 rhs = XEXP (rhs, 0);
2054 else if (GET_CODE (rhs) == ASHIFT
2055 && CONST_INT_P (XEXP (rhs, 1))
2056 && INTVAL (XEXP (rhs, 1)) >= 0
2057 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2059 negcoeff1 = double_int_setbit (double_int_zero,
2060 INTVAL (XEXP (rhs, 1)));
2061 negcoeff1 = double_int_neg (negcoeff1);
2062 rhs = XEXP (rhs, 0);
2065 if (rtx_equal_p (lhs, rhs))
2067 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2068 rtx coeff;
2069 double_int val;
2070 bool speed = optimize_function_for_speed_p (cfun);
2072 val = double_int_add (coeff0, negcoeff1);
2073 coeff = immed_double_int_const (val, mode);
2075 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2076 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
2077 ? tem : 0;
2081 /* (a - (-b)) -> (a + b). True even for IEEE. */
2082 if (GET_CODE (op1) == NEG)
2083 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2085 /* (-x - c) may be simplified as (-c - x). */
2086 if (GET_CODE (op0) == NEG
2087 && (CONST_INT_P (op1)
2088 || GET_CODE (op1) == CONST_DOUBLE))
2090 tem = simplify_unary_operation (NEG, mode, op1, mode);
2091 if (tem)
2092 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2095 /* Don't let a relocatable value get a negative coeff. */
2096 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2097 return simplify_gen_binary (PLUS, mode,
2098 op0,
2099 neg_const_int (mode, op1));
2101 /* (x - (x & y)) -> (x & ~y) */
2102 if (GET_CODE (op1) == AND)
2104 if (rtx_equal_p (op0, XEXP (op1, 0)))
2106 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2107 GET_MODE (XEXP (op1, 1)));
2108 return simplify_gen_binary (AND, mode, op0, tem);
2110 if (rtx_equal_p (op0, XEXP (op1, 1)))
2112 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2113 GET_MODE (XEXP (op1, 0)));
2114 return simplify_gen_binary (AND, mode, op0, tem);
2118 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2119 by reversing the comparison code if valid. */
2120 if (STORE_FLAG_VALUE == 1
2121 && trueop0 == const1_rtx
2122 && COMPARISON_P (op1)
2123 && (reversed = reversed_comparison (op1, mode)))
2124 return reversed;
2126 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2127 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2128 && GET_CODE (op1) == MULT
2129 && GET_CODE (XEXP (op1, 0)) == NEG)
2131 rtx in1, in2;
2133 in1 = XEXP (XEXP (op1, 0), 0);
2134 in2 = XEXP (op1, 1);
2135 return simplify_gen_binary (PLUS, mode,
2136 simplify_gen_binary (MULT, mode,
2137 in1, in2),
2138 op0);
2141 /* Canonicalize (minus (neg A) (mult B C)) to
2142 (minus (mult (neg B) C) A). */
2143 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2144 && GET_CODE (op1) == MULT
2145 && GET_CODE (op0) == NEG)
2147 rtx in1, in2;
2149 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2150 in2 = XEXP (op1, 1);
2151 return simplify_gen_binary (MINUS, mode,
2152 simplify_gen_binary (MULT, mode,
2153 in1, in2),
2154 XEXP (op0, 0));
2157 /* If one of the operands is a PLUS or a MINUS, see if we can
2158 simplify this by the associative law. This will, for example,
2159 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2160 Don't use the associative law for floating point.
2161 The inaccuracy makes it nonassociative,
2162 and subtle programs can break if operations are associated. */
2164 if (INTEGRAL_MODE_P (mode)
2165 && (plus_minus_operand_p (op0)
2166 || plus_minus_operand_p (op1))
2167 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2168 return tem;
2169 break;
2171 case MULT:
2172 if (trueop1 == constm1_rtx)
2173 return simplify_gen_unary (NEG, mode, op0, mode);
2175 if (GET_CODE (op0) == NEG)
2177 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2178 if (temp)
2179 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2181 if (GET_CODE (op1) == NEG)
2183 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2184 if (temp)
2185 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2188 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2189 x is NaN, since x * 0 is then also NaN. Nor is it valid
2190 when the mode has signed zeros, since multiplying a negative
2191 number by 0 will give -0, not 0. */
2192 if (!HONOR_NANS (mode)
2193 && !HONOR_SIGNED_ZEROS (mode)
2194 && trueop1 == CONST0_RTX (mode)
2195 && ! side_effects_p (op0))
2196 return op1;
2198 /* In IEEE floating point, x*1 is not equivalent to x for
2199 signalling NaNs. */
2200 if (!HONOR_SNANS (mode)
2201 && trueop1 == CONST1_RTX (mode))
2202 return op0;
2204 /* Convert multiply by constant power of two into shift unless
2205 we are still generating RTL. This test is a kludge. */
2206 if (CONST_INT_P (trueop1)
2207 && (val = exact_log2 (UINTVAL (trueop1))) >= 0
2208 /* If the mode is larger than the host word size, and the
2209 uppermost bit is set, then this isn't a power of two due
2210 to implicit sign extension. */
2211 && (width <= HOST_BITS_PER_WIDE_INT
2212 || val != HOST_BITS_PER_WIDE_INT - 1))
2213 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2215 /* Likewise for multipliers wider than a word. */
2216 if (GET_CODE (trueop1) == CONST_DOUBLE
2217 && (GET_MODE (trueop1) == VOIDmode
2218 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
2219 && GET_MODE (op0) == mode
2220 && CONST_DOUBLE_LOW (trueop1) == 0
2221 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
2222 return simplify_gen_binary (ASHIFT, mode, op0,
2223 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2225 /* x*2 is x+x and x*(-1) is -x */
2226 if (GET_CODE (trueop1) == CONST_DOUBLE
2227 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2228 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2229 && GET_MODE (op0) == mode)
2231 REAL_VALUE_TYPE d;
2232 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2234 if (REAL_VALUES_EQUAL (d, dconst2))
2235 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2237 if (!HONOR_SNANS (mode)
2238 && REAL_VALUES_EQUAL (d, dconstm1))
2239 return simplify_gen_unary (NEG, mode, op0, mode);
2242 /* Optimize -x * -x as x * x. */
2243 if (FLOAT_MODE_P (mode)
2244 && GET_CODE (op0) == NEG
2245 && GET_CODE (op1) == NEG
2246 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2247 && !side_effects_p (XEXP (op0, 0)))
2248 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2250 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2251 if (SCALAR_FLOAT_MODE_P (mode)
2252 && GET_CODE (op0) == ABS
2253 && GET_CODE (op1) == ABS
2254 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2255 && !side_effects_p (XEXP (op0, 0)))
2256 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2258 /* Reassociate multiplication, but for floating point MULTs
2259 only when the user specifies unsafe math optimizations. */
2260 if (! FLOAT_MODE_P (mode)
2261 || flag_unsafe_math_optimizations)
2263 tem = simplify_associative_operation (code, mode, op0, op1);
2264 if (tem)
2265 return tem;
2267 break;
2269 case IOR:
2270 if (trueop1 == CONST0_RTX (mode))
2271 return op0;
2272 if (CONST_INT_P (trueop1)
2273 && ((UINTVAL (trueop1) & GET_MODE_MASK (mode))
2274 == GET_MODE_MASK (mode)))
2275 return op1;
2276 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2277 return op0;
2278 /* A | (~A) -> -1 */
2279 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2280 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2281 && ! side_effects_p (op0)
2282 && SCALAR_INT_MODE_P (mode))
2283 return constm1_rtx;
2285 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2286 if (CONST_INT_P (op1)
2287 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2288 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0)
2289 return op1;
2291 /* Canonicalize (X & C1) | C2. */
2292 if (GET_CODE (op0) == AND
2293 && CONST_INT_P (trueop1)
2294 && CONST_INT_P (XEXP (op0, 1)))
2296 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2297 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2298 HOST_WIDE_INT c2 = INTVAL (trueop1);
2300 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2301 if ((c1 & c2) == c1
2302 && !side_effects_p (XEXP (op0, 0)))
2303 return trueop1;
2305 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2306 if (((c1|c2) & mask) == mask)
2307 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2309 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2310 if (((c1 & ~c2) & mask) != (c1 & mask))
2312 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2313 gen_int_mode (c1 & ~c2, mode));
2314 return simplify_gen_binary (IOR, mode, tem, op1);
2318 /* Convert (A & B) | A to A. */
2319 if (GET_CODE (op0) == AND
2320 && (rtx_equal_p (XEXP (op0, 0), op1)
2321 || rtx_equal_p (XEXP (op0, 1), op1))
2322 && ! side_effects_p (XEXP (op0, 0))
2323 && ! side_effects_p (XEXP (op0, 1)))
2324 return op1;
2326 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2327 mode size to (rotate A CX). */
2329 if (GET_CODE (op1) == ASHIFT
2330 || GET_CODE (op1) == SUBREG)
2332 opleft = op1;
2333 opright = op0;
2335 else
2337 opright = op1;
2338 opleft = op0;
2341 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2342 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2343 && CONST_INT_P (XEXP (opleft, 1))
2344 && CONST_INT_P (XEXP (opright, 1))
2345 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2346 == GET_MODE_BITSIZE (mode)))
2347 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2349 /* Same, but for ashift that has been "simplified" to a wider mode
2350 by simplify_shift_const. */
2352 if (GET_CODE (opleft) == SUBREG
2353 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2354 && GET_CODE (opright) == LSHIFTRT
2355 && GET_CODE (XEXP (opright, 0)) == SUBREG
2356 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2357 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2358 && (GET_MODE_SIZE (GET_MODE (opleft))
2359 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2360 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2361 SUBREG_REG (XEXP (opright, 0)))
2362 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2363 && CONST_INT_P (XEXP (opright, 1))
2364 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2365 == GET_MODE_BITSIZE (mode)))
2366 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2367 XEXP (SUBREG_REG (opleft), 1));
2369 /* If we have (ior (and (X C1) C2)), simplify this by making
2370 C1 as small as possible if C1 actually changes. */
2371 if (CONST_INT_P (op1)
2372 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2373 || INTVAL (op1) > 0)
2374 && GET_CODE (op0) == AND
2375 && CONST_INT_P (XEXP (op0, 1))
2376 && CONST_INT_P (op1)
2377 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2378 return simplify_gen_binary (IOR, mode,
2379 simplify_gen_binary
2380 (AND, mode, XEXP (op0, 0),
2381 GEN_INT (UINTVAL (XEXP (op0, 1))
2382 & ~UINTVAL (op1))),
2383 op1);
2385 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2386 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2387 the PLUS does not affect any of the bits in OP1: then we can do
2388 the IOR as a PLUS and we can associate. This is valid if OP1
2389 can be safely shifted left C bits. */
2390 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2391 && GET_CODE (XEXP (op0, 0)) == PLUS
2392 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2393 && CONST_INT_P (XEXP (op0, 1))
2394 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2396 int count = INTVAL (XEXP (op0, 1));
2397 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2399 if (mask >> count == INTVAL (trueop1)
2400 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2401 return simplify_gen_binary (ASHIFTRT, mode,
2402 plus_constant (XEXP (op0, 0), mask),
2403 XEXP (op0, 1));
2406 tem = simplify_associative_operation (code, mode, op0, op1);
2407 if (tem)
2408 return tem;
2409 break;
2411 case XOR:
2412 if (trueop1 == CONST0_RTX (mode))
2413 return op0;
2414 if (CONST_INT_P (trueop1)
2415 && ((UINTVAL (trueop1) & GET_MODE_MASK (mode))
2416 == GET_MODE_MASK (mode)))
2417 return simplify_gen_unary (NOT, mode, op0, mode);
2418 if (rtx_equal_p (trueop0, trueop1)
2419 && ! side_effects_p (op0)
2420 && GET_MODE_CLASS (mode) != MODE_CC)
2421 return CONST0_RTX (mode);
2423 /* Canonicalize XOR of the most significant bit to PLUS. */
2424 if ((CONST_INT_P (op1)
2425 || GET_CODE (op1) == CONST_DOUBLE)
2426 && mode_signbit_p (mode, op1))
2427 return simplify_gen_binary (PLUS, mode, op0, op1);
2428 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2429 if ((CONST_INT_P (op1)
2430 || GET_CODE (op1) == CONST_DOUBLE)
2431 && GET_CODE (op0) == PLUS
2432 && (CONST_INT_P (XEXP (op0, 1))
2433 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2434 && mode_signbit_p (mode, XEXP (op0, 1)))
2435 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2436 simplify_gen_binary (XOR, mode, op1,
2437 XEXP (op0, 1)));
2439 /* If we are XORing two things that have no bits in common,
2440 convert them into an IOR. This helps to detect rotation encoded
2441 using those methods and possibly other simplifications. */
2443 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2444 && (nonzero_bits (op0, mode)
2445 & nonzero_bits (op1, mode)) == 0)
2446 return (simplify_gen_binary (IOR, mode, op0, op1));
2448 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2449 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2450 (NOT y). */
2452 int num_negated = 0;
2454 if (GET_CODE (op0) == NOT)
2455 num_negated++, op0 = XEXP (op0, 0);
2456 if (GET_CODE (op1) == NOT)
2457 num_negated++, op1 = XEXP (op1, 0);
2459 if (num_negated == 2)
2460 return simplify_gen_binary (XOR, mode, op0, op1);
2461 else if (num_negated == 1)
2462 return simplify_gen_unary (NOT, mode,
2463 simplify_gen_binary (XOR, mode, op0, op1),
2464 mode);
2467 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2468 correspond to a machine insn or result in further simplifications
2469 if B is a constant. */
2471 if (GET_CODE (op0) == AND
2472 && rtx_equal_p (XEXP (op0, 1), op1)
2473 && ! side_effects_p (op1))
2474 return simplify_gen_binary (AND, mode,
2475 simplify_gen_unary (NOT, mode,
2476 XEXP (op0, 0), mode),
2477 op1);
2479 else if (GET_CODE (op0) == AND
2480 && rtx_equal_p (XEXP (op0, 0), op1)
2481 && ! side_effects_p (op1))
2482 return simplify_gen_binary (AND, mode,
2483 simplify_gen_unary (NOT, mode,
2484 XEXP (op0, 1), mode),
2485 op1);
2487 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2488 we can transform like this:
2489 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2490 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2491 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2492 Attempt a few simplifications when B and C are both constants. */
2493 if (GET_CODE (op0) == AND
2494 && CONST_INT_P (op1)
2495 && CONST_INT_P (XEXP (op0, 1)))
2497 rtx a = XEXP (op0, 0);
2498 rtx b = XEXP (op0, 1);
2499 rtx c = op1;
2500 HOST_WIDE_INT bval = INTVAL (b);
2501 HOST_WIDE_INT cval = INTVAL (c);
2503 rtx na_c
2504 = simplify_binary_operation (AND, mode,
2505 simplify_gen_unary (NOT, mode, a, mode),
2507 if ((~cval & bval) == 0)
2509 /* Try to simplify ~A&C | ~B&C. */
2510 if (na_c != NULL_RTX)
2511 return simplify_gen_binary (IOR, mode, na_c,
2512 GEN_INT (~bval & cval));
2514 else
2516 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2517 if (na_c == const0_rtx)
2519 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2520 GEN_INT (~cval & bval));
2521 return simplify_gen_binary (IOR, mode, a_nc_b,
2522 GEN_INT (~bval & cval));
2527 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2528 comparison if STORE_FLAG_VALUE is 1. */
2529 if (STORE_FLAG_VALUE == 1
2530 && trueop1 == const1_rtx
2531 && COMPARISON_P (op0)
2532 && (reversed = reversed_comparison (op0, mode)))
2533 return reversed;
2535 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2536 is (lt foo (const_int 0)), so we can perform the above
2537 simplification if STORE_FLAG_VALUE is 1. */
2539 if (STORE_FLAG_VALUE == 1
2540 && trueop1 == const1_rtx
2541 && GET_CODE (op0) == LSHIFTRT
2542 && CONST_INT_P (XEXP (op0, 1))
2543 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2544 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2546 /* (xor (comparison foo bar) (const_int sign-bit))
2547 when STORE_FLAG_VALUE is the sign bit. */
2548 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2549 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2550 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2551 && trueop1 == const_true_rtx
2552 && COMPARISON_P (op0)
2553 && (reversed = reversed_comparison (op0, mode)))
2554 return reversed;
2556 tem = simplify_associative_operation (code, mode, op0, op1);
2557 if (tem)
2558 return tem;
2559 break;
2561 case AND:
2562 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2563 return trueop1;
2564 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
2566 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2567 HOST_WIDE_INT nzop1;
2568 if (CONST_INT_P (trueop1))
2570 HOST_WIDE_INT val1 = INTVAL (trueop1);
2571 /* If we are turning off bits already known off in OP0, we need
2572 not do an AND. */
2573 if ((nzop0 & ~val1) == 0)
2574 return op0;
2576 nzop1 = nonzero_bits (trueop1, mode);
2577 /* If we are clearing all the nonzero bits, the result is zero. */
2578 if ((nzop1 & nzop0) == 0
2579 && !side_effects_p (op0) && !side_effects_p (op1))
2580 return CONST0_RTX (mode);
2582 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2583 && GET_MODE_CLASS (mode) != MODE_CC)
2584 return op0;
2585 /* A & (~A) -> 0 */
2586 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2587 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2588 && ! side_effects_p (op0)
2589 && GET_MODE_CLASS (mode) != MODE_CC)
2590 return CONST0_RTX (mode);
2592 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2593 there are no nonzero bits of C outside of X's mode. */
2594 if ((GET_CODE (op0) == SIGN_EXTEND
2595 || GET_CODE (op0) == ZERO_EXTEND)
2596 && CONST_INT_P (trueop1)
2597 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2598 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2599 & UINTVAL (trueop1)) == 0)
2601 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2602 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2603 gen_int_mode (INTVAL (trueop1),
2604 imode));
2605 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2608 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2609 we might be able to further simplify the AND with X and potentially
2610 remove the truncation altogether. */
2611 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2613 rtx x = XEXP (op0, 0);
2614 enum machine_mode xmode = GET_MODE (x);
2615 tem = simplify_gen_binary (AND, xmode, x,
2616 gen_int_mode (INTVAL (trueop1), xmode));
2617 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2620 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2621 if (GET_CODE (op0) == IOR
2622 && CONST_INT_P (trueop1)
2623 && CONST_INT_P (XEXP (op0, 1)))
2625 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2626 return simplify_gen_binary (IOR, mode,
2627 simplify_gen_binary (AND, mode,
2628 XEXP (op0, 0), op1),
2629 gen_int_mode (tmp, mode));
2632 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2633 insn (and may simplify more). */
2634 if (GET_CODE (op0) == XOR
2635 && rtx_equal_p (XEXP (op0, 0), op1)
2636 && ! side_effects_p (op1))
2637 return simplify_gen_binary (AND, mode,
2638 simplify_gen_unary (NOT, mode,
2639 XEXP (op0, 1), mode),
2640 op1);
2642 if (GET_CODE (op0) == XOR
2643 && rtx_equal_p (XEXP (op0, 1), op1)
2644 && ! side_effects_p (op1))
2645 return simplify_gen_binary (AND, mode,
2646 simplify_gen_unary (NOT, mode,
2647 XEXP (op0, 0), mode),
2648 op1);
2650 /* Similarly for (~(A ^ B)) & A. */
2651 if (GET_CODE (op0) == NOT
2652 && GET_CODE (XEXP (op0, 0)) == XOR
2653 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2654 && ! side_effects_p (op1))
2655 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2657 if (GET_CODE (op0) == NOT
2658 && GET_CODE (XEXP (op0, 0)) == XOR
2659 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2660 && ! side_effects_p (op1))
2661 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2663 /* Convert (A | B) & A to A. */
2664 if (GET_CODE (op0) == IOR
2665 && (rtx_equal_p (XEXP (op0, 0), op1)
2666 || rtx_equal_p (XEXP (op0, 1), op1))
2667 && ! side_effects_p (XEXP (op0, 0))
2668 && ! side_effects_p (XEXP (op0, 1)))
2669 return op1;
2671 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2672 ((A & N) + B) & M -> (A + B) & M
2673 Similarly if (N & M) == 0,
2674 ((A | N) + B) & M -> (A + B) & M
2675 and for - instead of + and/or ^ instead of |.
2676 Also, if (N & M) == 0, then
2677 (A +- N) & M -> A & M. */
2678 if (CONST_INT_P (trueop1)
2679 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2680 && ~UINTVAL (trueop1)
2681 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
2682 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2684 rtx pmop[2];
2685 int which;
2687 pmop[0] = XEXP (op0, 0);
2688 pmop[1] = XEXP (op0, 1);
2690 if (CONST_INT_P (pmop[1])
2691 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
2692 return simplify_gen_binary (AND, mode, pmop[0], op1);
2694 for (which = 0; which < 2; which++)
2696 tem = pmop[which];
2697 switch (GET_CODE (tem))
2699 case AND:
2700 if (CONST_INT_P (XEXP (tem, 1))
2701 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
2702 == UINTVAL (trueop1))
2703 pmop[which] = XEXP (tem, 0);
2704 break;
2705 case IOR:
2706 case XOR:
2707 if (CONST_INT_P (XEXP (tem, 1))
2708 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
2709 pmop[which] = XEXP (tem, 0);
2710 break;
2711 default:
2712 break;
2716 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2718 tem = simplify_gen_binary (GET_CODE (op0), mode,
2719 pmop[0], pmop[1]);
2720 return simplify_gen_binary (code, mode, tem, op1);
2724 /* (and X (ior (not X) Y) -> (and X Y) */
2725 if (GET_CODE (op1) == IOR
2726 && GET_CODE (XEXP (op1, 0)) == NOT
2727 && op0 == XEXP (XEXP (op1, 0), 0))
2728 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2730 /* (and (ior (not X) Y) X) -> (and X Y) */
2731 if (GET_CODE (op0) == IOR
2732 && GET_CODE (XEXP (op0, 0)) == NOT
2733 && op1 == XEXP (XEXP (op0, 0), 0))
2734 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2736 tem = simplify_associative_operation (code, mode, op0, op1);
2737 if (tem)
2738 return tem;
2739 break;
2741 case UDIV:
2742 /* 0/x is 0 (or x&0 if x has side-effects). */
2743 if (trueop0 == CONST0_RTX (mode))
2745 if (side_effects_p (op1))
2746 return simplify_gen_binary (AND, mode, op1, trueop0);
2747 return trueop0;
2749 /* x/1 is x. */
2750 if (trueop1 == CONST1_RTX (mode))
2751 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2752 /* Convert divide by power of two into shift. */
2753 if (CONST_INT_P (trueop1)
2754 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
2755 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2756 break;
2758 case DIV:
2759 /* Handle floating point and integers separately. */
2760 if (SCALAR_FLOAT_MODE_P (mode))
2762 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2763 safe for modes with NaNs, since 0.0 / 0.0 will then be
2764 NaN rather than 0.0. Nor is it safe for modes with signed
2765 zeros, since dividing 0 by a negative number gives -0.0 */
2766 if (trueop0 == CONST0_RTX (mode)
2767 && !HONOR_NANS (mode)
2768 && !HONOR_SIGNED_ZEROS (mode)
2769 && ! side_effects_p (op1))
2770 return op0;
2771 /* x/1.0 is x. */
2772 if (trueop1 == CONST1_RTX (mode)
2773 && !HONOR_SNANS (mode))
2774 return op0;
2776 if (GET_CODE (trueop1) == CONST_DOUBLE
2777 && trueop1 != CONST0_RTX (mode))
2779 REAL_VALUE_TYPE d;
2780 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2782 /* x/-1.0 is -x. */
2783 if (REAL_VALUES_EQUAL (d, dconstm1)
2784 && !HONOR_SNANS (mode))
2785 return simplify_gen_unary (NEG, mode, op0, mode);
2787 /* Change FP division by a constant into multiplication.
2788 Only do this with -freciprocal-math. */
2789 if (flag_reciprocal_math
2790 && !REAL_VALUES_EQUAL (d, dconst0))
2792 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2793 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2794 return simplify_gen_binary (MULT, mode, op0, tem);
2798 else
2800 /* 0/x is 0 (or x&0 if x has side-effects). */
2801 if (trueop0 == CONST0_RTX (mode)
2802 && !cfun->can_throw_non_call_exceptions)
2804 if (side_effects_p (op1))
2805 return simplify_gen_binary (AND, mode, op1, trueop0);
2806 return trueop0;
2808 /* x/1 is x. */
2809 if (trueop1 == CONST1_RTX (mode))
2810 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2811 /* x/-1 is -x. */
2812 if (trueop1 == constm1_rtx)
2814 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2815 return simplify_gen_unary (NEG, mode, x, mode);
2818 break;
2820 case UMOD:
2821 /* 0%x is 0 (or x&0 if x has side-effects). */
2822 if (trueop0 == CONST0_RTX (mode))
2824 if (side_effects_p (op1))
2825 return simplify_gen_binary (AND, mode, op1, trueop0);
2826 return trueop0;
2828 /* x%1 is 0 (of x&0 if x has side-effects). */
2829 if (trueop1 == CONST1_RTX (mode))
2831 if (side_effects_p (op0))
2832 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2833 return CONST0_RTX (mode);
2835 /* Implement modulus by power of two as AND. */
2836 if (CONST_INT_P (trueop1)
2837 && exact_log2 (UINTVAL (trueop1)) > 0)
2838 return simplify_gen_binary (AND, mode, op0,
2839 GEN_INT (INTVAL (op1) - 1));
2840 break;
2842 case MOD:
2843 /* 0%x is 0 (or x&0 if x has side-effects). */
2844 if (trueop0 == CONST0_RTX (mode))
2846 if (side_effects_p (op1))
2847 return simplify_gen_binary (AND, mode, op1, trueop0);
2848 return trueop0;
2850 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2851 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2853 if (side_effects_p (op0))
2854 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2855 return CONST0_RTX (mode);
2857 break;
2859 case ROTATERT:
2860 case ROTATE:
2861 case ASHIFTRT:
2862 if (trueop1 == CONST0_RTX (mode))
2863 return op0;
2864 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2865 return op0;
2866 /* Rotating ~0 always results in ~0. */
2867 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
2868 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
2869 && ! side_effects_p (op1))
2870 return op0;
2871 canonicalize_shift:
2872 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
2874 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
2875 if (val != INTVAL (op1))
2876 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
2878 break;
2880 case ASHIFT:
2881 case SS_ASHIFT:
2882 case US_ASHIFT:
2883 if (trueop1 == CONST0_RTX (mode))
2884 return op0;
2885 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2886 return op0;
2887 goto canonicalize_shift;
2889 case LSHIFTRT:
2890 if (trueop1 == CONST0_RTX (mode))
2891 return op0;
2892 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2893 return op0;
2894 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2895 if (GET_CODE (op0) == CLZ
2896 && CONST_INT_P (trueop1)
2897 && STORE_FLAG_VALUE == 1
2898 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2900 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2901 unsigned HOST_WIDE_INT zero_val = 0;
2903 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2904 && zero_val == GET_MODE_BITSIZE (imode)
2905 && INTVAL (trueop1) == exact_log2 (zero_val))
2906 return simplify_gen_relational (EQ, mode, imode,
2907 XEXP (op0, 0), const0_rtx);
2909 goto canonicalize_shift;
2911 case SMIN:
2912 if (width <= HOST_BITS_PER_WIDE_INT
2913 && CONST_INT_P (trueop1)
2914 && UINTVAL (trueop1) == (unsigned HOST_WIDE_INT) 1 << (width -1)
2915 && ! side_effects_p (op0))
2916 return op1;
2917 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2918 return op0;
2919 tem = simplify_associative_operation (code, mode, op0, op1);
2920 if (tem)
2921 return tem;
2922 break;
2924 case SMAX:
2925 if (width <= HOST_BITS_PER_WIDE_INT
2926 && CONST_INT_P (trueop1)
2927 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
2928 && ! side_effects_p (op0))
2929 return op1;
2930 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2931 return op0;
2932 tem = simplify_associative_operation (code, mode, op0, op1);
2933 if (tem)
2934 return tem;
2935 break;
2937 case UMIN:
2938 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2939 return op1;
2940 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2941 return op0;
2942 tem = simplify_associative_operation (code, mode, op0, op1);
2943 if (tem)
2944 return tem;
2945 break;
2947 case UMAX:
2948 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2949 return op1;
2950 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2951 return op0;
2952 tem = simplify_associative_operation (code, mode, op0, op1);
2953 if (tem)
2954 return tem;
2955 break;
2957 case SS_PLUS:
2958 case US_PLUS:
2959 case SS_MINUS:
2960 case US_MINUS:
2961 case SS_MULT:
2962 case US_MULT:
2963 case SS_DIV:
2964 case US_DIV:
2965 /* ??? There are simplifications that can be done. */
2966 return 0;
2968 case VEC_SELECT:
2969 if (!VECTOR_MODE_P (mode))
2971 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2972 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2973 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2974 gcc_assert (XVECLEN (trueop1, 0) == 1);
2975 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
2977 if (GET_CODE (trueop0) == CONST_VECTOR)
2978 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2979 (trueop1, 0, 0)));
2981 /* Extract a scalar element from a nested VEC_SELECT expression
2982 (with optional nested VEC_CONCAT expression). Some targets
2983 (i386) extract scalar element from a vector using chain of
2984 nested VEC_SELECT expressions. When input operand is a memory
2985 operand, this operation can be simplified to a simple scalar
2986 load from an offseted memory address. */
2987 if (GET_CODE (trueop0) == VEC_SELECT)
2989 rtx op0 = XEXP (trueop0, 0);
2990 rtx op1 = XEXP (trueop0, 1);
2992 enum machine_mode opmode = GET_MODE (op0);
2993 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
2994 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
2996 int i = INTVAL (XVECEXP (trueop1, 0, 0));
2997 int elem;
2999 rtvec vec;
3000 rtx tmp_op, tmp;
3002 gcc_assert (GET_CODE (op1) == PARALLEL);
3003 gcc_assert (i < n_elts);
3005 /* Select element, pointed by nested selector. */
3006 elem = INTVAL (XVECEXP (op1, 0, i));
3008 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3009 if (GET_CODE (op0) == VEC_CONCAT)
3011 rtx op00 = XEXP (op0, 0);
3012 rtx op01 = XEXP (op0, 1);
3014 enum machine_mode mode00, mode01;
3015 int n_elts00, n_elts01;
3017 mode00 = GET_MODE (op00);
3018 mode01 = GET_MODE (op01);
3020 /* Find out number of elements of each operand. */
3021 if (VECTOR_MODE_P (mode00))
3023 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3024 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3026 else
3027 n_elts00 = 1;
3029 if (VECTOR_MODE_P (mode01))
3031 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3032 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3034 else
3035 n_elts01 = 1;
3037 gcc_assert (n_elts == n_elts00 + n_elts01);
3039 /* Select correct operand of VEC_CONCAT
3040 and adjust selector. */
3041 if (elem < n_elts01)
3042 tmp_op = op00;
3043 else
3045 tmp_op = op01;
3046 elem -= n_elts00;
3049 else
3050 tmp_op = op0;
3052 vec = rtvec_alloc (1);
3053 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3055 tmp = gen_rtx_fmt_ee (code, mode,
3056 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3057 return tmp;
3059 if (GET_CODE (trueop0) == VEC_DUPLICATE
3060 && GET_MODE (XEXP (trueop0, 0)) == mode)
3061 return XEXP (trueop0, 0);
3063 else
3065 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3066 gcc_assert (GET_MODE_INNER (mode)
3067 == GET_MODE_INNER (GET_MODE (trueop0)));
3068 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3070 if (GET_CODE (trueop0) == CONST_VECTOR)
3072 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3073 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3074 rtvec v = rtvec_alloc (n_elts);
3075 unsigned int i;
3077 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3078 for (i = 0; i < n_elts; i++)
3080 rtx x = XVECEXP (trueop1, 0, i);
3082 gcc_assert (CONST_INT_P (x));
3083 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3084 INTVAL (x));
3087 return gen_rtx_CONST_VECTOR (mode, v);
3091 if (XVECLEN (trueop1, 0) == 1
3092 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3093 && GET_CODE (trueop0) == VEC_CONCAT)
3095 rtx vec = trueop0;
3096 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3098 /* Try to find the element in the VEC_CONCAT. */
3099 while (GET_MODE (vec) != mode
3100 && GET_CODE (vec) == VEC_CONCAT)
3102 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3103 if (offset < vec_size)
3104 vec = XEXP (vec, 0);
3105 else
3107 offset -= vec_size;
3108 vec = XEXP (vec, 1);
3110 vec = avoid_constant_pool_reference (vec);
3113 if (GET_MODE (vec) == mode)
3114 return vec;
3117 return 0;
3118 case VEC_CONCAT:
3120 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3121 ? GET_MODE (trueop0)
3122 : GET_MODE_INNER (mode));
3123 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3124 ? GET_MODE (trueop1)
3125 : GET_MODE_INNER (mode));
3127 gcc_assert (VECTOR_MODE_P (mode));
3128 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3129 == GET_MODE_SIZE (mode));
3131 if (VECTOR_MODE_P (op0_mode))
3132 gcc_assert (GET_MODE_INNER (mode)
3133 == GET_MODE_INNER (op0_mode));
3134 else
3135 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3137 if (VECTOR_MODE_P (op1_mode))
3138 gcc_assert (GET_MODE_INNER (mode)
3139 == GET_MODE_INNER (op1_mode));
3140 else
3141 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3143 if ((GET_CODE (trueop0) == CONST_VECTOR
3144 || CONST_INT_P (trueop0)
3145 || GET_CODE (trueop0) == CONST_DOUBLE)
3146 && (GET_CODE (trueop1) == CONST_VECTOR
3147 || CONST_INT_P (trueop1)
3148 || GET_CODE (trueop1) == CONST_DOUBLE))
3150 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3151 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3152 rtvec v = rtvec_alloc (n_elts);
3153 unsigned int i;
3154 unsigned in_n_elts = 1;
3156 if (VECTOR_MODE_P (op0_mode))
3157 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3158 for (i = 0; i < n_elts; i++)
3160 if (i < in_n_elts)
3162 if (!VECTOR_MODE_P (op0_mode))
3163 RTVEC_ELT (v, i) = trueop0;
3164 else
3165 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3167 else
3169 if (!VECTOR_MODE_P (op1_mode))
3170 RTVEC_ELT (v, i) = trueop1;
3171 else
3172 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3173 i - in_n_elts);
3177 return gen_rtx_CONST_VECTOR (mode, v);
3180 return 0;
3182 default:
3183 gcc_unreachable ();
3186 return 0;
3190 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3191 rtx op0, rtx op1)
3193 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3194 HOST_WIDE_INT val;
3195 unsigned int width = GET_MODE_BITSIZE (mode);
3197 if (VECTOR_MODE_P (mode)
3198 && code != VEC_CONCAT
3199 && GET_CODE (op0) == CONST_VECTOR
3200 && GET_CODE (op1) == CONST_VECTOR)
3202 unsigned n_elts = GET_MODE_NUNITS (mode);
3203 enum machine_mode op0mode = GET_MODE (op0);
3204 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3205 enum machine_mode op1mode = GET_MODE (op1);
3206 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3207 rtvec v = rtvec_alloc (n_elts);
3208 unsigned int i;
3210 gcc_assert (op0_n_elts == n_elts);
3211 gcc_assert (op1_n_elts == n_elts);
3212 for (i = 0; i < n_elts; i++)
3214 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3215 CONST_VECTOR_ELT (op0, i),
3216 CONST_VECTOR_ELT (op1, i));
3217 if (!x)
3218 return 0;
3219 RTVEC_ELT (v, i) = x;
3222 return gen_rtx_CONST_VECTOR (mode, v);
3225 if (VECTOR_MODE_P (mode)
3226 && code == VEC_CONCAT
3227 && (CONST_INT_P (op0)
3228 || GET_CODE (op0) == CONST_DOUBLE
3229 || GET_CODE (op0) == CONST_FIXED)
3230 && (CONST_INT_P (op1)
3231 || GET_CODE (op1) == CONST_DOUBLE
3232 || GET_CODE (op1) == CONST_FIXED))
3234 unsigned n_elts = GET_MODE_NUNITS (mode);
3235 rtvec v = rtvec_alloc (n_elts);
3237 gcc_assert (n_elts >= 2);
3238 if (n_elts == 2)
3240 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3241 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3243 RTVEC_ELT (v, 0) = op0;
3244 RTVEC_ELT (v, 1) = op1;
3246 else
3248 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3249 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3250 unsigned i;
3252 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3253 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3254 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3256 for (i = 0; i < op0_n_elts; ++i)
3257 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3258 for (i = 0; i < op1_n_elts; ++i)
3259 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3262 return gen_rtx_CONST_VECTOR (mode, v);
3265 if (SCALAR_FLOAT_MODE_P (mode)
3266 && GET_CODE (op0) == CONST_DOUBLE
3267 && GET_CODE (op1) == CONST_DOUBLE
3268 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3270 if (code == AND
3271 || code == IOR
3272 || code == XOR)
3274 long tmp0[4];
3275 long tmp1[4];
3276 REAL_VALUE_TYPE r;
3277 int i;
3279 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3280 GET_MODE (op0));
3281 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3282 GET_MODE (op1));
3283 for (i = 0; i < 4; i++)
3285 switch (code)
3287 case AND:
3288 tmp0[i] &= tmp1[i];
3289 break;
3290 case IOR:
3291 tmp0[i] |= tmp1[i];
3292 break;
3293 case XOR:
3294 tmp0[i] ^= tmp1[i];
3295 break;
3296 default:
3297 gcc_unreachable ();
3300 real_from_target (&r, tmp0, mode);
3301 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3303 else
3305 REAL_VALUE_TYPE f0, f1, value, result;
3306 bool inexact;
3308 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3309 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3310 real_convert (&f0, mode, &f0);
3311 real_convert (&f1, mode, &f1);
3313 if (HONOR_SNANS (mode)
3314 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3315 return 0;
3317 if (code == DIV
3318 && REAL_VALUES_EQUAL (f1, dconst0)
3319 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3320 return 0;
3322 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3323 && flag_trapping_math
3324 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3326 int s0 = REAL_VALUE_NEGATIVE (f0);
3327 int s1 = REAL_VALUE_NEGATIVE (f1);
3329 switch (code)
3331 case PLUS:
3332 /* Inf + -Inf = NaN plus exception. */
3333 if (s0 != s1)
3334 return 0;
3335 break;
3336 case MINUS:
3337 /* Inf - Inf = NaN plus exception. */
3338 if (s0 == s1)
3339 return 0;
3340 break;
3341 case DIV:
3342 /* Inf / Inf = NaN plus exception. */
3343 return 0;
3344 default:
3345 break;
3349 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3350 && flag_trapping_math
3351 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3352 || (REAL_VALUE_ISINF (f1)
3353 && REAL_VALUES_EQUAL (f0, dconst0))))
3354 /* Inf * 0 = NaN plus exception. */
3355 return 0;
3357 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3358 &f0, &f1);
3359 real_convert (&result, mode, &value);
3361 /* Don't constant fold this floating point operation if
3362 the result has overflowed and flag_trapping_math. */
3364 if (flag_trapping_math
3365 && MODE_HAS_INFINITIES (mode)
3366 && REAL_VALUE_ISINF (result)
3367 && !REAL_VALUE_ISINF (f0)
3368 && !REAL_VALUE_ISINF (f1))
3369 /* Overflow plus exception. */
3370 return 0;
3372 /* Don't constant fold this floating point operation if the
3373 result may dependent upon the run-time rounding mode and
3374 flag_rounding_math is set, or if GCC's software emulation
3375 is unable to accurately represent the result. */
3377 if ((flag_rounding_math
3378 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3379 && (inexact || !real_identical (&result, &value)))
3380 return NULL_RTX;
3382 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3386 /* We can fold some multi-word operations. */
3387 if (GET_MODE_CLASS (mode) == MODE_INT
3388 && width == HOST_BITS_PER_DOUBLE_INT
3389 && (CONST_DOUBLE_P (op0) || CONST_INT_P (op0))
3390 && (CONST_DOUBLE_P (op1) || CONST_INT_P (op1)))
3392 double_int o0, o1, res, tmp;
3394 o0 = rtx_to_double_int (op0);
3395 o1 = rtx_to_double_int (op1);
3397 switch (code)
3399 case MINUS:
3400 /* A - B == A + (-B). */
3401 o1 = double_int_neg (o1);
3403 /* Fall through.... */
3405 case PLUS:
3406 res = double_int_add (o0, o1);
3407 break;
3409 case MULT:
3410 res = double_int_mul (o0, o1);
3411 break;
3413 case DIV:
3414 if (div_and_round_double (TRUNC_DIV_EXPR, 0,
3415 o0.low, o0.high, o1.low, o1.high,
3416 &res.low, &res.high,
3417 &tmp.low, &tmp.high))
3418 return 0;
3419 break;
3421 case MOD:
3422 if (div_and_round_double (TRUNC_DIV_EXPR, 0,
3423 o0.low, o0.high, o1.low, o1.high,
3424 &tmp.low, &tmp.high,
3425 &res.low, &res.high))
3426 return 0;
3427 break;
3429 case UDIV:
3430 if (div_and_round_double (TRUNC_DIV_EXPR, 1,
3431 o0.low, o0.high, o1.low, o1.high,
3432 &res.low, &res.high,
3433 &tmp.low, &tmp.high))
3434 return 0;
3435 break;
3437 case UMOD:
3438 if (div_and_round_double (TRUNC_DIV_EXPR, 1,
3439 o0.low, o0.high, o1.low, o1.high,
3440 &tmp.low, &tmp.high,
3441 &res.low, &res.high))
3442 return 0;
3443 break;
3445 case AND:
3446 res = double_int_and (o0, o1);
3447 break;
3449 case IOR:
3450 res = double_int_ior (o0, o1);
3451 break;
3453 case XOR:
3454 res = double_int_xor (o0, o1);
3455 break;
3457 case SMIN:
3458 res = double_int_smin (o0, o1);
3459 break;
3461 case SMAX:
3462 res = double_int_smax (o0, o1);
3463 break;
3465 case UMIN:
3466 res = double_int_umin (o0, o1);
3467 break;
3469 case UMAX:
3470 res = double_int_umax (o0, o1);
3471 break;
3473 case LSHIFTRT: case ASHIFTRT:
3474 case ASHIFT:
3475 case ROTATE: case ROTATERT:
3477 unsigned HOST_WIDE_INT cnt;
3479 if (SHIFT_COUNT_TRUNCATED)
3480 o1 = double_int_zext (o1, GET_MODE_BITSIZE (mode));
3482 if (!double_int_fits_in_uhwi_p (o1)
3483 || double_int_to_uhwi (o1) >= GET_MODE_BITSIZE (mode))
3484 return 0;
3486 cnt = double_int_to_uhwi (o1);
3488 if (code == LSHIFTRT || code == ASHIFTRT)
3489 res = double_int_rshift (o0, cnt, GET_MODE_BITSIZE (mode),
3490 code == ASHIFTRT);
3491 else if (code == ASHIFT)
3492 res = double_int_lshift (o0, cnt, GET_MODE_BITSIZE (mode),
3493 true);
3494 else if (code == ROTATE)
3495 res = double_int_lrotate (o0, cnt, GET_MODE_BITSIZE (mode));
3496 else /* code == ROTATERT */
3497 res = double_int_rrotate (o0, cnt, GET_MODE_BITSIZE (mode));
3499 break;
3501 default:
3502 return 0;
3505 return immed_double_int_const (res, mode);
3508 if (CONST_INT_P (op0) && CONST_INT_P (op1)
3509 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3511 /* Get the integer argument values in two forms:
3512 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3514 arg0 = INTVAL (op0);
3515 arg1 = INTVAL (op1);
3517 if (width < HOST_BITS_PER_WIDE_INT)
3519 arg0 &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
3520 arg1 &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
3522 arg0s = arg0;
3523 if (arg0s & ((unsigned HOST_WIDE_INT) 1 << (width - 1)))
3524 arg0s |= ((unsigned HOST_WIDE_INT) (-1) << width);
3526 arg1s = arg1;
3527 if (arg1s & ((unsigned HOST_WIDE_INT) 1 << (width - 1)))
3528 arg1s |= ((unsigned HOST_WIDE_INT) (-1) << width);
3530 else
3532 arg0s = arg0;
3533 arg1s = arg1;
3536 /* Compute the value of the arithmetic. */
3538 switch (code)
3540 case PLUS:
3541 val = arg0s + arg1s;
3542 break;
3544 case MINUS:
3545 val = arg0s - arg1s;
3546 break;
3548 case MULT:
3549 val = arg0s * arg1s;
3550 break;
3552 case DIV:
3553 if (arg1s == 0
3554 || ((unsigned HOST_WIDE_INT) arg0s
3555 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3556 && arg1s == -1))
3557 return 0;
3558 val = arg0s / arg1s;
3559 break;
3561 case MOD:
3562 if (arg1s == 0
3563 || ((unsigned HOST_WIDE_INT) arg0s
3564 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3565 && arg1s == -1))
3566 return 0;
3567 val = arg0s % arg1s;
3568 break;
3570 case UDIV:
3571 if (arg1 == 0
3572 || ((unsigned HOST_WIDE_INT) arg0s
3573 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3574 && arg1s == -1))
3575 return 0;
3576 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3577 break;
3579 case UMOD:
3580 if (arg1 == 0
3581 || ((unsigned HOST_WIDE_INT) arg0s
3582 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3583 && arg1s == -1))
3584 return 0;
3585 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3586 break;
3588 case AND:
3589 val = arg0 & arg1;
3590 break;
3592 case IOR:
3593 val = arg0 | arg1;
3594 break;
3596 case XOR:
3597 val = arg0 ^ arg1;
3598 break;
3600 case LSHIFTRT:
3601 case ASHIFT:
3602 case ASHIFTRT:
3603 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3604 the value is in range. We can't return any old value for
3605 out-of-range arguments because either the middle-end (via
3606 shift_truncation_mask) or the back-end might be relying on
3607 target-specific knowledge. Nor can we rely on
3608 shift_truncation_mask, since the shift might not be part of an
3609 ashlM3, lshrM3 or ashrM3 instruction. */
3610 if (SHIFT_COUNT_TRUNCATED)
3611 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3612 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3613 return 0;
3615 val = (code == ASHIFT
3616 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3617 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3619 /* Sign-extend the result for arithmetic right shifts. */
3620 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3621 val |= ((unsigned HOST_WIDE_INT) (-1)) << (width - arg1);
3622 break;
3624 case ROTATERT:
3625 if (arg1 < 0)
3626 return 0;
3628 arg1 %= width;
3629 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3630 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3631 break;
3633 case ROTATE:
3634 if (arg1 < 0)
3635 return 0;
3637 arg1 %= width;
3638 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3639 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3640 break;
3642 case COMPARE:
3643 /* Do nothing here. */
3644 return 0;
3646 case SMIN:
3647 val = arg0s <= arg1s ? arg0s : arg1s;
3648 break;
3650 case UMIN:
3651 val = ((unsigned HOST_WIDE_INT) arg0
3652 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3653 break;
3655 case SMAX:
3656 val = arg0s > arg1s ? arg0s : arg1s;
3657 break;
3659 case UMAX:
3660 val = ((unsigned HOST_WIDE_INT) arg0
3661 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3662 break;
3664 case SS_PLUS:
3665 case US_PLUS:
3666 case SS_MINUS:
3667 case US_MINUS:
3668 case SS_MULT:
3669 case US_MULT:
3670 case SS_DIV:
3671 case US_DIV:
3672 case SS_ASHIFT:
3673 case US_ASHIFT:
3674 /* ??? There are simplifications that can be done. */
3675 return 0;
3677 default:
3678 gcc_unreachable ();
3681 return gen_int_mode (val, mode);
3684 return NULL_RTX;
3689 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3690 PLUS or MINUS.
3692 Rather than test for specific case, we do this by a brute-force method
3693 and do all possible simplifications until no more changes occur. Then
3694 we rebuild the operation. */
3696 struct simplify_plus_minus_op_data
3698 rtx op;
3699 short neg;
3702 static bool
3703 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3705 int result;
3707 result = (commutative_operand_precedence (y)
3708 - commutative_operand_precedence (x));
3709 if (result)
3710 return result > 0;
3712 /* Group together equal REGs to do more simplification. */
3713 if (REG_P (x) && REG_P (y))
3714 return REGNO (x) > REGNO (y);
3715 else
3716 return false;
3719 static rtx
3720 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3721 rtx op1)
3723 struct simplify_plus_minus_op_data ops[8];
3724 rtx result, tem;
3725 int n_ops = 2, input_ops = 2;
3726 int changed, n_constants = 0, canonicalized = 0;
3727 int i, j;
3729 memset (ops, 0, sizeof ops);
3731 /* Set up the two operands and then expand them until nothing has been
3732 changed. If we run out of room in our array, give up; this should
3733 almost never happen. */
3735 ops[0].op = op0;
3736 ops[0].neg = 0;
3737 ops[1].op = op1;
3738 ops[1].neg = (code == MINUS);
3742 changed = 0;
3744 for (i = 0; i < n_ops; i++)
3746 rtx this_op = ops[i].op;
3747 int this_neg = ops[i].neg;
3748 enum rtx_code this_code = GET_CODE (this_op);
3750 switch (this_code)
3752 case PLUS:
3753 case MINUS:
3754 if (n_ops == 7)
3755 return NULL_RTX;
3757 ops[n_ops].op = XEXP (this_op, 1);
3758 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3759 n_ops++;
3761 ops[i].op = XEXP (this_op, 0);
3762 input_ops++;
3763 changed = 1;
3764 canonicalized |= this_neg;
3765 break;
3767 case NEG:
3768 ops[i].op = XEXP (this_op, 0);
3769 ops[i].neg = ! this_neg;
3770 changed = 1;
3771 canonicalized = 1;
3772 break;
3774 case CONST:
3775 if (n_ops < 7
3776 && GET_CODE (XEXP (this_op, 0)) == PLUS
3777 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3778 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3780 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3781 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3782 ops[n_ops].neg = this_neg;
3783 n_ops++;
3784 changed = 1;
3785 canonicalized = 1;
3787 break;
3789 case NOT:
3790 /* ~a -> (-a - 1) */
3791 if (n_ops != 7)
3793 ops[n_ops].op = constm1_rtx;
3794 ops[n_ops++].neg = this_neg;
3795 ops[i].op = XEXP (this_op, 0);
3796 ops[i].neg = !this_neg;
3797 changed = 1;
3798 canonicalized = 1;
3800 break;
3802 case CONST_INT:
3803 n_constants++;
3804 if (this_neg)
3806 ops[i].op = neg_const_int (mode, this_op);
3807 ops[i].neg = 0;
3808 changed = 1;
3809 canonicalized = 1;
3811 break;
3813 default:
3814 break;
3818 while (changed);
3820 if (n_constants > 1)
3821 canonicalized = 1;
3823 gcc_assert (n_ops >= 2);
3825 /* If we only have two operands, we can avoid the loops. */
3826 if (n_ops == 2)
3828 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3829 rtx lhs, rhs;
3831 /* Get the two operands. Be careful with the order, especially for
3832 the cases where code == MINUS. */
3833 if (ops[0].neg && ops[1].neg)
3835 lhs = gen_rtx_NEG (mode, ops[0].op);
3836 rhs = ops[1].op;
3838 else if (ops[0].neg)
3840 lhs = ops[1].op;
3841 rhs = ops[0].op;
3843 else
3845 lhs = ops[0].op;
3846 rhs = ops[1].op;
3849 return simplify_const_binary_operation (code, mode, lhs, rhs);
3852 /* Now simplify each pair of operands until nothing changes. */
3855 /* Insertion sort is good enough for an eight-element array. */
3856 for (i = 1; i < n_ops; i++)
3858 struct simplify_plus_minus_op_data save;
3859 j = i - 1;
3860 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
3861 continue;
3863 canonicalized = 1;
3864 save = ops[i];
3866 ops[j + 1] = ops[j];
3867 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
3868 ops[j + 1] = save;
3871 changed = 0;
3872 for (i = n_ops - 1; i > 0; i--)
3873 for (j = i - 1; j >= 0; j--)
3875 rtx lhs = ops[j].op, rhs = ops[i].op;
3876 int lneg = ops[j].neg, rneg = ops[i].neg;
3878 if (lhs != 0 && rhs != 0)
3880 enum rtx_code ncode = PLUS;
3882 if (lneg != rneg)
3884 ncode = MINUS;
3885 if (lneg)
3886 tem = lhs, lhs = rhs, rhs = tem;
3888 else if (swap_commutative_operands_p (lhs, rhs))
3889 tem = lhs, lhs = rhs, rhs = tem;
3891 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
3892 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
3894 rtx tem_lhs, tem_rhs;
3896 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3897 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3898 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3900 if (tem && !CONSTANT_P (tem))
3901 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3903 else
3904 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3906 /* Reject "simplifications" that just wrap the two
3907 arguments in a CONST. Failure to do so can result
3908 in infinite recursion with simplify_binary_operation
3909 when it calls us to simplify CONST operations. */
3910 if (tem
3911 && ! (GET_CODE (tem) == CONST
3912 && GET_CODE (XEXP (tem, 0)) == ncode
3913 && XEXP (XEXP (tem, 0), 0) == lhs
3914 && XEXP (XEXP (tem, 0), 1) == rhs))
3916 lneg &= rneg;
3917 if (GET_CODE (tem) == NEG)
3918 tem = XEXP (tem, 0), lneg = !lneg;
3919 if (CONST_INT_P (tem) && lneg)
3920 tem = neg_const_int (mode, tem), lneg = 0;
3922 ops[i].op = tem;
3923 ops[i].neg = lneg;
3924 ops[j].op = NULL_RTX;
3925 changed = 1;
3926 canonicalized = 1;
3931 /* If nothing changed, fail. */
3932 if (!canonicalized)
3933 return NULL_RTX;
3935 /* Pack all the operands to the lower-numbered entries. */
3936 for (i = 0, j = 0; j < n_ops; j++)
3937 if (ops[j].op)
3939 ops[i] = ops[j];
3940 i++;
3942 n_ops = i;
3944 while (changed);
3946 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3947 if (n_ops == 2
3948 && CONST_INT_P (ops[1].op)
3949 && CONSTANT_P (ops[0].op)
3950 && ops[0].neg)
3951 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3953 /* We suppressed creation of trivial CONST expressions in the
3954 combination loop to avoid recursion. Create one manually now.
3955 The combination loop should have ensured that there is exactly
3956 one CONST_INT, and the sort will have ensured that it is last
3957 in the array and that any other constant will be next-to-last. */
3959 if (n_ops > 1
3960 && CONST_INT_P (ops[n_ops - 1].op)
3961 && CONSTANT_P (ops[n_ops - 2].op))
3963 rtx value = ops[n_ops - 1].op;
3964 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3965 value = neg_const_int (mode, value);
3966 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3967 n_ops--;
3970 /* Put a non-negated operand first, if possible. */
3972 for (i = 0; i < n_ops && ops[i].neg; i++)
3973 continue;
3974 if (i == n_ops)
3975 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3976 else if (i != 0)
3978 tem = ops[0].op;
3979 ops[0] = ops[i];
3980 ops[i].op = tem;
3981 ops[i].neg = 1;
3984 /* Now make the result by performing the requested operations. */
3985 result = ops[0].op;
3986 for (i = 1; i < n_ops; i++)
3987 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3988 mode, result, ops[i].op);
3990 return result;
3993 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3994 static bool
3995 plus_minus_operand_p (const_rtx x)
3997 return GET_CODE (x) == PLUS
3998 || GET_CODE (x) == MINUS
3999 || (GET_CODE (x) == CONST
4000 && GET_CODE (XEXP (x, 0)) == PLUS
4001 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4002 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4005 /* Like simplify_binary_operation except used for relational operators.
4006 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4007 not also be VOIDmode.
4009 CMP_MODE specifies in which mode the comparison is done in, so it is
4010 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4011 the operands or, if both are VOIDmode, the operands are compared in
4012 "infinite precision". */
4014 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
4015 enum machine_mode cmp_mode, rtx op0, rtx op1)
4017 rtx tem, trueop0, trueop1;
4019 if (cmp_mode == VOIDmode)
4020 cmp_mode = GET_MODE (op0);
4021 if (cmp_mode == VOIDmode)
4022 cmp_mode = GET_MODE (op1);
4024 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4025 if (tem)
4027 if (SCALAR_FLOAT_MODE_P (mode))
4029 if (tem == const0_rtx)
4030 return CONST0_RTX (mode);
4031 #ifdef FLOAT_STORE_FLAG_VALUE
4033 REAL_VALUE_TYPE val;
4034 val = FLOAT_STORE_FLAG_VALUE (mode);
4035 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4037 #else
4038 return NULL_RTX;
4039 #endif
4041 if (VECTOR_MODE_P (mode))
4043 if (tem == const0_rtx)
4044 return CONST0_RTX (mode);
4045 #ifdef VECTOR_STORE_FLAG_VALUE
4047 int i, units;
4048 rtvec v;
4050 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4051 if (val == NULL_RTX)
4052 return NULL_RTX;
4053 if (val == const1_rtx)
4054 return CONST1_RTX (mode);
4056 units = GET_MODE_NUNITS (mode);
4057 v = rtvec_alloc (units);
4058 for (i = 0; i < units; i++)
4059 RTVEC_ELT (v, i) = val;
4060 return gen_rtx_raw_CONST_VECTOR (mode, v);
4062 #else
4063 return NULL_RTX;
4064 #endif
4067 return tem;
4070 /* For the following tests, ensure const0_rtx is op1. */
4071 if (swap_commutative_operands_p (op0, op1)
4072 || (op0 == const0_rtx && op1 != const0_rtx))
4073 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4075 /* If op0 is a compare, extract the comparison arguments from it. */
4076 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4077 return simplify_gen_relational (code, mode, VOIDmode,
4078 XEXP (op0, 0), XEXP (op0, 1));
4080 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4081 || CC0_P (op0))
4082 return NULL_RTX;
4084 trueop0 = avoid_constant_pool_reference (op0);
4085 trueop1 = avoid_constant_pool_reference (op1);
4086 return simplify_relational_operation_1 (code, mode, cmp_mode,
4087 trueop0, trueop1);
4090 /* This part of simplify_relational_operation is only used when CMP_MODE
4091 is not in class MODE_CC (i.e. it is a real comparison).
4093 MODE is the mode of the result, while CMP_MODE specifies in which
4094 mode the comparison is done in, so it is the mode of the operands. */
4096 static rtx
4097 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
4098 enum machine_mode cmp_mode, rtx op0, rtx op1)
4100 enum rtx_code op0code = GET_CODE (op0);
4102 if (op1 == const0_rtx && COMPARISON_P (op0))
4104 /* If op0 is a comparison, extract the comparison arguments
4105 from it. */
4106 if (code == NE)
4108 if (GET_MODE (op0) == mode)
4109 return simplify_rtx (op0);
4110 else
4111 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4112 XEXP (op0, 0), XEXP (op0, 1));
4114 else if (code == EQ)
4116 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4117 if (new_code != UNKNOWN)
4118 return simplify_gen_relational (new_code, mode, VOIDmode,
4119 XEXP (op0, 0), XEXP (op0, 1));
4123 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4124 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4125 if ((code == LTU || code == GEU)
4126 && GET_CODE (op0) == PLUS
4127 && CONST_INT_P (XEXP (op0, 1))
4128 && (rtx_equal_p (op1, XEXP (op0, 0))
4129 || rtx_equal_p (op1, XEXP (op0, 1))))
4131 rtx new_cmp
4132 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4133 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4134 cmp_mode, XEXP (op0, 0), new_cmp);
4137 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4138 if ((code == LTU || code == GEU)
4139 && GET_CODE (op0) == PLUS
4140 && rtx_equal_p (op1, XEXP (op0, 1))
4141 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4142 && !rtx_equal_p (op1, XEXP (op0, 0)))
4143 return simplify_gen_relational (code, mode, cmp_mode, op0,
4144 copy_rtx (XEXP (op0, 0)));
4146 if (op1 == const0_rtx)
4148 /* Canonicalize (GTU x 0) as (NE x 0). */
4149 if (code == GTU)
4150 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4151 /* Canonicalize (LEU x 0) as (EQ x 0). */
4152 if (code == LEU)
4153 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4155 else if (op1 == const1_rtx)
4157 switch (code)
4159 case GE:
4160 /* Canonicalize (GE x 1) as (GT x 0). */
4161 return simplify_gen_relational (GT, mode, cmp_mode,
4162 op0, const0_rtx);
4163 case GEU:
4164 /* Canonicalize (GEU x 1) as (NE x 0). */
4165 return simplify_gen_relational (NE, mode, cmp_mode,
4166 op0, const0_rtx);
4167 case LT:
4168 /* Canonicalize (LT x 1) as (LE x 0). */
4169 return simplify_gen_relational (LE, mode, cmp_mode,
4170 op0, const0_rtx);
4171 case LTU:
4172 /* Canonicalize (LTU x 1) as (EQ x 0). */
4173 return simplify_gen_relational (EQ, mode, cmp_mode,
4174 op0, const0_rtx);
4175 default:
4176 break;
4179 else if (op1 == constm1_rtx)
4181 /* Canonicalize (LE x -1) as (LT x 0). */
4182 if (code == LE)
4183 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4184 /* Canonicalize (GT x -1) as (GE x 0). */
4185 if (code == GT)
4186 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4189 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4190 if ((code == EQ || code == NE)
4191 && (op0code == PLUS || op0code == MINUS)
4192 && CONSTANT_P (op1)
4193 && CONSTANT_P (XEXP (op0, 1))
4194 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4196 rtx x = XEXP (op0, 0);
4197 rtx c = XEXP (op0, 1);
4199 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
4200 cmp_mode, op1, c);
4201 return simplify_gen_relational (code, mode, cmp_mode, x, c);
4204 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4205 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4206 if (code == NE
4207 && op1 == const0_rtx
4208 && GET_MODE_CLASS (mode) == MODE_INT
4209 && cmp_mode != VOIDmode
4210 /* ??? Work-around BImode bugs in the ia64 backend. */
4211 && mode != BImode
4212 && cmp_mode != BImode
4213 && nonzero_bits (op0, cmp_mode) == 1
4214 && STORE_FLAG_VALUE == 1)
4215 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4216 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4217 : lowpart_subreg (mode, op0, cmp_mode);
4219 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4220 if ((code == EQ || code == NE)
4221 && op1 == const0_rtx
4222 && op0code == XOR)
4223 return simplify_gen_relational (code, mode, cmp_mode,
4224 XEXP (op0, 0), XEXP (op0, 1));
4226 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4227 if ((code == EQ || code == NE)
4228 && op0code == XOR
4229 && rtx_equal_p (XEXP (op0, 0), op1)
4230 && !side_effects_p (XEXP (op0, 0)))
4231 return simplify_gen_relational (code, mode, cmp_mode,
4232 XEXP (op0, 1), const0_rtx);
4234 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4235 if ((code == EQ || code == NE)
4236 && op0code == XOR
4237 && rtx_equal_p (XEXP (op0, 1), op1)
4238 && !side_effects_p (XEXP (op0, 1)))
4239 return simplify_gen_relational (code, mode, cmp_mode,
4240 XEXP (op0, 0), const0_rtx);
4242 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4243 if ((code == EQ || code == NE)
4244 && op0code == XOR
4245 && (CONST_INT_P (op1)
4246 || GET_CODE (op1) == CONST_DOUBLE)
4247 && (CONST_INT_P (XEXP (op0, 1))
4248 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
4249 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4250 simplify_gen_binary (XOR, cmp_mode,
4251 XEXP (op0, 1), op1));
4253 if (op0code == POPCOUNT && op1 == const0_rtx)
4254 switch (code)
4256 case EQ:
4257 case LE:
4258 case LEU:
4259 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4260 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4261 XEXP (op0, 0), const0_rtx);
4263 case NE:
4264 case GT:
4265 case GTU:
4266 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4267 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4268 XEXP (op0, 0), const0_rtx);
4270 default:
4271 break;
4274 return NULL_RTX;
4277 enum
4279 CMP_EQ = 1,
4280 CMP_LT = 2,
4281 CMP_GT = 4,
4282 CMP_LTU = 8,
4283 CMP_GTU = 16
4287 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4288 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4289 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4290 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4291 For floating-point comparisons, assume that the operands were ordered. */
4293 static rtx
4294 comparison_result (enum rtx_code code, int known_results)
4296 switch (code)
4298 case EQ:
4299 case UNEQ:
4300 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4301 case NE:
4302 case LTGT:
4303 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4305 case LT:
4306 case UNLT:
4307 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4308 case GE:
4309 case UNGE:
4310 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4312 case GT:
4313 case UNGT:
4314 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4315 case LE:
4316 case UNLE:
4317 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4319 case LTU:
4320 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4321 case GEU:
4322 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4324 case GTU:
4325 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4326 case LEU:
4327 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4329 case ORDERED:
4330 return const_true_rtx;
4331 case UNORDERED:
4332 return const0_rtx;
4333 default:
4334 gcc_unreachable ();
4338 /* Check if the given comparison (done in the given MODE) is actually a
4339 tautology or a contradiction.
4340 If no simplification is possible, this function returns zero.
4341 Otherwise, it returns either const_true_rtx or const0_rtx. */
4344 simplify_const_relational_operation (enum rtx_code code,
4345 enum machine_mode mode,
4346 rtx op0, rtx op1)
4348 rtx tem;
4349 rtx trueop0;
4350 rtx trueop1;
4352 gcc_assert (mode != VOIDmode
4353 || (GET_MODE (op0) == VOIDmode
4354 && GET_MODE (op1) == VOIDmode));
4356 /* If op0 is a compare, extract the comparison arguments from it. */
4357 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4359 op1 = XEXP (op0, 1);
4360 op0 = XEXP (op0, 0);
4362 if (GET_MODE (op0) != VOIDmode)
4363 mode = GET_MODE (op0);
4364 else if (GET_MODE (op1) != VOIDmode)
4365 mode = GET_MODE (op1);
4366 else
4367 return 0;
4370 /* We can't simplify MODE_CC values since we don't know what the
4371 actual comparison is. */
4372 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4373 return 0;
4375 /* Make sure the constant is second. */
4376 if (swap_commutative_operands_p (op0, op1))
4378 tem = op0, op0 = op1, op1 = tem;
4379 code = swap_condition (code);
4382 trueop0 = avoid_constant_pool_reference (op0);
4383 trueop1 = avoid_constant_pool_reference (op1);
4385 /* For integer comparisons of A and B maybe we can simplify A - B and can
4386 then simplify a comparison of that with zero. If A and B are both either
4387 a register or a CONST_INT, this can't help; testing for these cases will
4388 prevent infinite recursion here and speed things up.
4390 We can only do this for EQ and NE comparisons as otherwise we may
4391 lose or introduce overflow which we cannot disregard as undefined as
4392 we do not know the signedness of the operation on either the left or
4393 the right hand side of the comparison. */
4395 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4396 && (code == EQ || code == NE)
4397 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4398 && (REG_P (op1) || CONST_INT_P (trueop1)))
4399 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4400 /* We cannot do this if tem is a nonzero address. */
4401 && ! nonzero_address_p (tem))
4402 return simplify_const_relational_operation (signed_condition (code),
4403 mode, tem, const0_rtx);
4405 if (! HONOR_NANS (mode) && code == ORDERED)
4406 return const_true_rtx;
4408 if (! HONOR_NANS (mode) && code == UNORDERED)
4409 return const0_rtx;
4411 /* For modes without NaNs, if the two operands are equal, we know the
4412 result except if they have side-effects. Even with NaNs we know
4413 the result of unordered comparisons and, if signaling NaNs are
4414 irrelevant, also the result of LT/GT/LTGT. */
4415 if ((! HONOR_NANS (GET_MODE (trueop0))
4416 || code == UNEQ || code == UNLE || code == UNGE
4417 || ((code == LT || code == GT || code == LTGT)
4418 && ! HONOR_SNANS (GET_MODE (trueop0))))
4419 && rtx_equal_p (trueop0, trueop1)
4420 && ! side_effects_p (trueop0))
4421 return comparison_result (code, CMP_EQ);
4423 /* If the operands are floating-point constants, see if we can fold
4424 the result. */
4425 if (GET_CODE (trueop0) == CONST_DOUBLE
4426 && GET_CODE (trueop1) == CONST_DOUBLE
4427 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4429 REAL_VALUE_TYPE d0, d1;
4431 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4432 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4434 /* Comparisons are unordered iff at least one of the values is NaN. */
4435 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4436 switch (code)
4438 case UNEQ:
4439 case UNLT:
4440 case UNGT:
4441 case UNLE:
4442 case UNGE:
4443 case NE:
4444 case UNORDERED:
4445 return const_true_rtx;
4446 case EQ:
4447 case LT:
4448 case GT:
4449 case LE:
4450 case GE:
4451 case LTGT:
4452 case ORDERED:
4453 return const0_rtx;
4454 default:
4455 return 0;
4458 return comparison_result (code,
4459 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4460 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4463 /* Otherwise, see if the operands are both integers. */
4464 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4465 && (GET_CODE (trueop0) == CONST_DOUBLE
4466 || CONST_INT_P (trueop0))
4467 && (GET_CODE (trueop1) == CONST_DOUBLE
4468 || CONST_INT_P (trueop1)))
4470 int width = GET_MODE_BITSIZE (mode);
4471 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4472 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4474 /* Get the two words comprising each integer constant. */
4475 if (GET_CODE (trueop0) == CONST_DOUBLE)
4477 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4478 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4480 else
4482 l0u = l0s = INTVAL (trueop0);
4483 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4486 if (GET_CODE (trueop1) == CONST_DOUBLE)
4488 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4489 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4491 else
4493 l1u = l1s = INTVAL (trueop1);
4494 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4497 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4498 we have to sign or zero-extend the values. */
4499 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4501 l0u &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
4502 l1u &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
4504 if (l0s & ((unsigned HOST_WIDE_INT) 1 << (width - 1)))
4505 l0s |= ((unsigned HOST_WIDE_INT) (-1) << width);
4507 if (l1s & ((unsigned HOST_WIDE_INT) 1 << (width - 1)))
4508 l1s |= ((unsigned HOST_WIDE_INT) (-1) << width);
4510 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4511 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4513 if (h0u == h1u && l0u == l1u)
4514 return comparison_result (code, CMP_EQ);
4515 else
4517 int cr;
4518 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
4519 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
4520 return comparison_result (code, cr);
4524 /* Optimize comparisons with upper and lower bounds. */
4525 if (SCALAR_INT_MODE_P (mode)
4526 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4527 && CONST_INT_P (trueop1))
4529 int sign;
4530 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4531 HOST_WIDE_INT val = INTVAL (trueop1);
4532 HOST_WIDE_INT mmin, mmax;
4534 if (code == GEU
4535 || code == LEU
4536 || code == GTU
4537 || code == LTU)
4538 sign = 0;
4539 else
4540 sign = 1;
4542 /* Get a reduced range if the sign bit is zero. */
4543 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4545 mmin = 0;
4546 mmax = nonzero;
4548 else
4550 rtx mmin_rtx, mmax_rtx;
4551 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4553 mmin = INTVAL (mmin_rtx);
4554 mmax = INTVAL (mmax_rtx);
4555 if (sign)
4557 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4559 mmin >>= (sign_copies - 1);
4560 mmax >>= (sign_copies - 1);
4564 switch (code)
4566 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4567 case GEU:
4568 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4569 return const_true_rtx;
4570 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4571 return const0_rtx;
4572 break;
4573 case GE:
4574 if (val <= mmin)
4575 return const_true_rtx;
4576 if (val > mmax)
4577 return const0_rtx;
4578 break;
4580 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4581 case LEU:
4582 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4583 return const_true_rtx;
4584 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4585 return const0_rtx;
4586 break;
4587 case LE:
4588 if (val >= mmax)
4589 return const_true_rtx;
4590 if (val < mmin)
4591 return const0_rtx;
4592 break;
4594 case EQ:
4595 /* x == y is always false for y out of range. */
4596 if (val < mmin || val > mmax)
4597 return const0_rtx;
4598 break;
4600 /* x > y is always false for y >= mmax, always true for y < mmin. */
4601 case GTU:
4602 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4603 return const0_rtx;
4604 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4605 return const_true_rtx;
4606 break;
4607 case GT:
4608 if (val >= mmax)
4609 return const0_rtx;
4610 if (val < mmin)
4611 return const_true_rtx;
4612 break;
4614 /* x < y is always false for y <= mmin, always true for y > mmax. */
4615 case LTU:
4616 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4617 return const0_rtx;
4618 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4619 return const_true_rtx;
4620 break;
4621 case LT:
4622 if (val <= mmin)
4623 return const0_rtx;
4624 if (val > mmax)
4625 return const_true_rtx;
4626 break;
4628 case NE:
4629 /* x != y is always true for y out of range. */
4630 if (val < mmin || val > mmax)
4631 return const_true_rtx;
4632 break;
4634 default:
4635 break;
4639 /* Optimize integer comparisons with zero. */
4640 if (trueop1 == const0_rtx)
4642 /* Some addresses are known to be nonzero. We don't know
4643 their sign, but equality comparisons are known. */
4644 if (nonzero_address_p (trueop0))
4646 if (code == EQ || code == LEU)
4647 return const0_rtx;
4648 if (code == NE || code == GTU)
4649 return const_true_rtx;
4652 /* See if the first operand is an IOR with a constant. If so, we
4653 may be able to determine the result of this comparison. */
4654 if (GET_CODE (op0) == IOR)
4656 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
4657 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
4659 int sign_bitnum = GET_MODE_BITSIZE (mode) - 1;
4660 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
4661 && (UINTVAL (inner_const)
4662 & ((unsigned HOST_WIDE_INT) 1
4663 << sign_bitnum)));
4665 switch (code)
4667 case EQ:
4668 case LEU:
4669 return const0_rtx;
4670 case NE:
4671 case GTU:
4672 return const_true_rtx;
4673 case LT:
4674 case LE:
4675 if (has_sign)
4676 return const_true_rtx;
4677 break;
4678 case GT:
4679 case GE:
4680 if (has_sign)
4681 return const0_rtx;
4682 break;
4683 default:
4684 break;
4690 /* Optimize comparison of ABS with zero. */
4691 if (trueop1 == CONST0_RTX (mode)
4692 && (GET_CODE (trueop0) == ABS
4693 || (GET_CODE (trueop0) == FLOAT_EXTEND
4694 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
4696 switch (code)
4698 case LT:
4699 /* Optimize abs(x) < 0.0. */
4700 if (!HONOR_SNANS (mode)
4701 && (!INTEGRAL_MODE_P (mode)
4702 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4704 if (INTEGRAL_MODE_P (mode)
4705 && (issue_strict_overflow_warning
4706 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4707 warning (OPT_Wstrict_overflow,
4708 ("assuming signed overflow does not occur when "
4709 "assuming abs (x) < 0 is false"));
4710 return const0_rtx;
4712 break;
4714 case GE:
4715 /* Optimize abs(x) >= 0.0. */
4716 if (!HONOR_NANS (mode)
4717 && (!INTEGRAL_MODE_P (mode)
4718 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4720 if (INTEGRAL_MODE_P (mode)
4721 && (issue_strict_overflow_warning
4722 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4723 warning (OPT_Wstrict_overflow,
4724 ("assuming signed overflow does not occur when "
4725 "assuming abs (x) >= 0 is true"));
4726 return const_true_rtx;
4728 break;
4730 case UNGE:
4731 /* Optimize ! (abs(x) < 0.0). */
4732 return const_true_rtx;
4734 default:
4735 break;
4739 return 0;
4742 /* Simplify CODE, an operation with result mode MODE and three operands,
4743 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4744 a constant. Return 0 if no simplifications is possible. */
4747 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4748 enum machine_mode op0_mode, rtx op0, rtx op1,
4749 rtx op2)
4751 unsigned int width = GET_MODE_BITSIZE (mode);
4752 bool any_change = false;
4753 rtx tem;
4755 /* VOIDmode means "infinite" precision. */
4756 if (width == 0)
4757 width = HOST_BITS_PER_WIDE_INT;
4759 switch (code)
4761 case FMA:
4762 /* Simplify negations around the multiplication. */
4763 /* -a * -b + c => a * b + c. */
4764 if (GET_CODE (op0) == NEG)
4766 tem = simplify_unary_operation (NEG, mode, op1, mode);
4767 if (tem)
4768 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
4770 else if (GET_CODE (op1) == NEG)
4772 tem = simplify_unary_operation (NEG, mode, op0, mode);
4773 if (tem)
4774 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
4777 /* Canonicalize the two multiplication operands. */
4778 /* a * -b + c => -b * a + c. */
4779 if (swap_commutative_operands_p (op0, op1))
4780 tem = op0, op0 = op1, op1 = tem, any_change = true;
4782 if (any_change)
4783 return gen_rtx_FMA (mode, op0, op1, op2);
4784 return NULL_RTX;
4786 case SIGN_EXTRACT:
4787 case ZERO_EXTRACT:
4788 if (CONST_INT_P (op0)
4789 && CONST_INT_P (op1)
4790 && CONST_INT_P (op2)
4791 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4792 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4794 /* Extracting a bit-field from a constant */
4795 unsigned HOST_WIDE_INT val = UINTVAL (op0);
4797 if (BITS_BIG_ENDIAN)
4798 val >>= GET_MODE_BITSIZE (op0_mode) - INTVAL (op2) - INTVAL (op1);
4799 else
4800 val >>= INTVAL (op2);
4802 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4804 /* First zero-extend. */
4805 val &= ((unsigned HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4806 /* If desired, propagate sign bit. */
4807 if (code == SIGN_EXTRACT
4808 && (val & ((unsigned HOST_WIDE_INT) 1 << (INTVAL (op1) - 1)))
4809 != 0)
4810 val |= ~ (((unsigned HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4813 /* Clear the bits that don't belong in our mode,
4814 unless they and our sign bit are all one.
4815 So we get either a reasonable negative value or a reasonable
4816 unsigned value for this mode. */
4817 if (width < HOST_BITS_PER_WIDE_INT
4818 && ((val & ((unsigned HOST_WIDE_INT) (-1) << (width - 1)))
4819 != ((unsigned HOST_WIDE_INT) (-1) << (width - 1))))
4820 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
4822 return gen_int_mode (val, mode);
4824 break;
4826 case IF_THEN_ELSE:
4827 if (CONST_INT_P (op0))
4828 return op0 != const0_rtx ? op1 : op2;
4830 /* Convert c ? a : a into "a". */
4831 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4832 return op1;
4834 /* Convert a != b ? a : b into "a". */
4835 if (GET_CODE (op0) == NE
4836 && ! side_effects_p (op0)
4837 && ! HONOR_NANS (mode)
4838 && ! HONOR_SIGNED_ZEROS (mode)
4839 && ((rtx_equal_p (XEXP (op0, 0), op1)
4840 && rtx_equal_p (XEXP (op0, 1), op2))
4841 || (rtx_equal_p (XEXP (op0, 0), op2)
4842 && rtx_equal_p (XEXP (op0, 1), op1))))
4843 return op1;
4845 /* Convert a == b ? a : b into "b". */
4846 if (GET_CODE (op0) == EQ
4847 && ! side_effects_p (op0)
4848 && ! HONOR_NANS (mode)
4849 && ! HONOR_SIGNED_ZEROS (mode)
4850 && ((rtx_equal_p (XEXP (op0, 0), op1)
4851 && rtx_equal_p (XEXP (op0, 1), op2))
4852 || (rtx_equal_p (XEXP (op0, 0), op2)
4853 && rtx_equal_p (XEXP (op0, 1), op1))))
4854 return op2;
4856 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4858 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4859 ? GET_MODE (XEXP (op0, 1))
4860 : GET_MODE (XEXP (op0, 0)));
4861 rtx temp;
4863 /* Look for happy constants in op1 and op2. */
4864 if (CONST_INT_P (op1) && CONST_INT_P (op2))
4866 HOST_WIDE_INT t = INTVAL (op1);
4867 HOST_WIDE_INT f = INTVAL (op2);
4869 if (t == STORE_FLAG_VALUE && f == 0)
4870 code = GET_CODE (op0);
4871 else if (t == 0 && f == STORE_FLAG_VALUE)
4873 enum rtx_code tmp;
4874 tmp = reversed_comparison_code (op0, NULL_RTX);
4875 if (tmp == UNKNOWN)
4876 break;
4877 code = tmp;
4879 else
4880 break;
4882 return simplify_gen_relational (code, mode, cmp_mode,
4883 XEXP (op0, 0), XEXP (op0, 1));
4886 if (cmp_mode == VOIDmode)
4887 cmp_mode = op0_mode;
4888 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4889 cmp_mode, XEXP (op0, 0),
4890 XEXP (op0, 1));
4892 /* See if any simplifications were possible. */
4893 if (temp)
4895 if (CONST_INT_P (temp))
4896 return temp == const0_rtx ? op2 : op1;
4897 else if (temp)
4898 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4901 break;
4903 case VEC_MERGE:
4904 gcc_assert (GET_MODE (op0) == mode);
4905 gcc_assert (GET_MODE (op1) == mode);
4906 gcc_assert (VECTOR_MODE_P (mode));
4907 op2 = avoid_constant_pool_reference (op2);
4908 if (CONST_INT_P (op2))
4910 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4911 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4912 int mask = (1 << n_elts) - 1;
4914 if (!(INTVAL (op2) & mask))
4915 return op1;
4916 if ((INTVAL (op2) & mask) == mask)
4917 return op0;
4919 op0 = avoid_constant_pool_reference (op0);
4920 op1 = avoid_constant_pool_reference (op1);
4921 if (GET_CODE (op0) == CONST_VECTOR
4922 && GET_CODE (op1) == CONST_VECTOR)
4924 rtvec v = rtvec_alloc (n_elts);
4925 unsigned int i;
4927 for (i = 0; i < n_elts; i++)
4928 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4929 ? CONST_VECTOR_ELT (op0, i)
4930 : CONST_VECTOR_ELT (op1, i));
4931 return gen_rtx_CONST_VECTOR (mode, v);
4934 break;
4936 default:
4937 gcc_unreachable ();
4940 return 0;
4943 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
4944 or CONST_VECTOR,
4945 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
4947 Works by unpacking OP into a collection of 8-bit values
4948 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4949 and then repacking them again for OUTERMODE. */
4951 static rtx
4952 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4953 enum machine_mode innermode, unsigned int byte)
4955 /* We support up to 512-bit values (for V8DFmode). */
4956 enum {
4957 max_bitsize = 512,
4958 value_bit = 8,
4959 value_mask = (1 << value_bit) - 1
4961 unsigned char value[max_bitsize / value_bit];
4962 int value_start;
4963 int i;
4964 int elem;
4966 int num_elem;
4967 rtx * elems;
4968 int elem_bitsize;
4969 rtx result_s;
4970 rtvec result_v = NULL;
4971 enum mode_class outer_class;
4972 enum machine_mode outer_submode;
4974 /* Some ports misuse CCmode. */
4975 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
4976 return op;
4978 /* We have no way to represent a complex constant at the rtl level. */
4979 if (COMPLEX_MODE_P (outermode))
4980 return NULL_RTX;
4982 /* Unpack the value. */
4984 if (GET_CODE (op) == CONST_VECTOR)
4986 num_elem = CONST_VECTOR_NUNITS (op);
4987 elems = &CONST_VECTOR_ELT (op, 0);
4988 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4990 else
4992 num_elem = 1;
4993 elems = &op;
4994 elem_bitsize = max_bitsize;
4996 /* If this asserts, it is too complicated; reducing value_bit may help. */
4997 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4998 /* I don't know how to handle endianness of sub-units. */
4999 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5001 for (elem = 0; elem < num_elem; elem++)
5003 unsigned char * vp;
5004 rtx el = elems[elem];
5006 /* Vectors are kept in target memory order. (This is probably
5007 a mistake.) */
5009 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5010 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5011 / BITS_PER_UNIT);
5012 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5013 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5014 unsigned bytele = (subword_byte % UNITS_PER_WORD
5015 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5016 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5019 switch (GET_CODE (el))
5021 case CONST_INT:
5022 for (i = 0;
5023 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5024 i += value_bit)
5025 *vp++ = INTVAL (el) >> i;
5026 /* CONST_INTs are always logically sign-extended. */
5027 for (; i < elem_bitsize; i += value_bit)
5028 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5029 break;
5031 case CONST_DOUBLE:
5032 if (GET_MODE (el) == VOIDmode)
5034 /* If this triggers, someone should have generated a
5035 CONST_INT instead. */
5036 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5038 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5039 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5040 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
5042 *vp++
5043 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5044 i += value_bit;
5046 /* It shouldn't matter what's done here, so fill it with
5047 zero. */
5048 for (; i < elem_bitsize; i += value_bit)
5049 *vp++ = 0;
5051 else
5053 long tmp[max_bitsize / 32];
5054 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5056 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5057 gcc_assert (bitsize <= elem_bitsize);
5058 gcc_assert (bitsize % value_bit == 0);
5060 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5061 GET_MODE (el));
5063 /* real_to_target produces its result in words affected by
5064 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5065 and use WORDS_BIG_ENDIAN instead; see the documentation
5066 of SUBREG in rtl.texi. */
5067 for (i = 0; i < bitsize; i += value_bit)
5069 int ibase;
5070 if (WORDS_BIG_ENDIAN)
5071 ibase = bitsize - 1 - i;
5072 else
5073 ibase = i;
5074 *vp++ = tmp[ibase / 32] >> i % 32;
5077 /* It shouldn't matter what's done here, so fill it with
5078 zero. */
5079 for (; i < elem_bitsize; i += value_bit)
5080 *vp++ = 0;
5082 break;
5084 case CONST_FIXED:
5085 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5087 for (i = 0; i < elem_bitsize; i += value_bit)
5088 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5090 else
5092 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5093 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5094 for (; i < 2 * HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5095 i += value_bit)
5096 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5097 >> (i - HOST_BITS_PER_WIDE_INT);
5098 for (; i < elem_bitsize; i += value_bit)
5099 *vp++ = 0;
5101 break;
5103 default:
5104 gcc_unreachable ();
5108 /* Now, pick the right byte to start with. */
5109 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5110 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5111 will already have offset 0. */
5112 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5114 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5115 - byte);
5116 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5117 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5118 byte = (subword_byte % UNITS_PER_WORD
5119 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5122 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5123 so if it's become negative it will instead be very large.) */
5124 gcc_assert (byte < GET_MODE_SIZE (innermode));
5126 /* Convert from bytes to chunks of size value_bit. */
5127 value_start = byte * (BITS_PER_UNIT / value_bit);
5129 /* Re-pack the value. */
5131 if (VECTOR_MODE_P (outermode))
5133 num_elem = GET_MODE_NUNITS (outermode);
5134 result_v = rtvec_alloc (num_elem);
5135 elems = &RTVEC_ELT (result_v, 0);
5136 outer_submode = GET_MODE_INNER (outermode);
5138 else
5140 num_elem = 1;
5141 elems = &result_s;
5142 outer_submode = outermode;
5145 outer_class = GET_MODE_CLASS (outer_submode);
5146 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5148 gcc_assert (elem_bitsize % value_bit == 0);
5149 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5151 for (elem = 0; elem < num_elem; elem++)
5153 unsigned char *vp;
5155 /* Vectors are stored in target memory order. (This is probably
5156 a mistake.) */
5158 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5159 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5160 / BITS_PER_UNIT);
5161 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5162 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5163 unsigned bytele = (subword_byte % UNITS_PER_WORD
5164 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5165 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5168 switch (outer_class)
5170 case MODE_INT:
5171 case MODE_PARTIAL_INT:
5173 unsigned HOST_WIDE_INT hi = 0, lo = 0;
5175 for (i = 0;
5176 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5177 i += value_bit)
5178 lo |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5179 for (; i < elem_bitsize; i += value_bit)
5180 hi |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5181 << (i - HOST_BITS_PER_WIDE_INT);
5183 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5184 know why. */
5185 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5186 elems[elem] = gen_int_mode (lo, outer_submode);
5187 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
5188 elems[elem] = immed_double_const (lo, hi, outer_submode);
5189 else
5190 return NULL_RTX;
5192 break;
5194 case MODE_FLOAT:
5195 case MODE_DECIMAL_FLOAT:
5197 REAL_VALUE_TYPE r;
5198 long tmp[max_bitsize / 32];
5200 /* real_from_target wants its input in words affected by
5201 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5202 and use WORDS_BIG_ENDIAN instead; see the documentation
5203 of SUBREG in rtl.texi. */
5204 for (i = 0; i < max_bitsize / 32; i++)
5205 tmp[i] = 0;
5206 for (i = 0; i < elem_bitsize; i += value_bit)
5208 int ibase;
5209 if (WORDS_BIG_ENDIAN)
5210 ibase = elem_bitsize - 1 - i;
5211 else
5212 ibase = i;
5213 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5216 real_from_target (&r, tmp, outer_submode);
5217 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5219 break;
5221 case MODE_FRACT:
5222 case MODE_UFRACT:
5223 case MODE_ACCUM:
5224 case MODE_UACCUM:
5226 FIXED_VALUE_TYPE f;
5227 f.data.low = 0;
5228 f.data.high = 0;
5229 f.mode = outer_submode;
5231 for (i = 0;
5232 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5233 i += value_bit)
5234 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5235 for (; i < elem_bitsize; i += value_bit)
5236 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5237 << (i - HOST_BITS_PER_WIDE_INT));
5239 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5241 break;
5243 default:
5244 gcc_unreachable ();
5247 if (VECTOR_MODE_P (outermode))
5248 return gen_rtx_CONST_VECTOR (outermode, result_v);
5249 else
5250 return result_s;
5253 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5254 Return 0 if no simplifications are possible. */
5256 simplify_subreg (enum machine_mode outermode, rtx op,
5257 enum machine_mode innermode, unsigned int byte)
5259 /* Little bit of sanity checking. */
5260 gcc_assert (innermode != VOIDmode);
5261 gcc_assert (outermode != VOIDmode);
5262 gcc_assert (innermode != BLKmode);
5263 gcc_assert (outermode != BLKmode);
5265 gcc_assert (GET_MODE (op) == innermode
5266 || GET_MODE (op) == VOIDmode);
5268 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
5269 gcc_assert (byte < GET_MODE_SIZE (innermode));
5271 if (outermode == innermode && !byte)
5272 return op;
5274 if (CONST_INT_P (op)
5275 || GET_CODE (op) == CONST_DOUBLE
5276 || GET_CODE (op) == CONST_FIXED
5277 || GET_CODE (op) == CONST_VECTOR)
5278 return simplify_immed_subreg (outermode, op, innermode, byte);
5280 /* Changing mode twice with SUBREG => just change it once,
5281 or not at all if changing back op starting mode. */
5282 if (GET_CODE (op) == SUBREG)
5284 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5285 int final_offset = byte + SUBREG_BYTE (op);
5286 rtx newx;
5288 if (outermode == innermostmode
5289 && byte == 0 && SUBREG_BYTE (op) == 0)
5290 return SUBREG_REG (op);
5292 /* The SUBREG_BYTE represents offset, as if the value were stored
5293 in memory. Irritating exception is paradoxical subreg, where
5294 we define SUBREG_BYTE to be 0. On big endian machines, this
5295 value should be negative. For a moment, undo this exception. */
5296 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5298 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5299 if (WORDS_BIG_ENDIAN)
5300 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5301 if (BYTES_BIG_ENDIAN)
5302 final_offset += difference % UNITS_PER_WORD;
5304 if (SUBREG_BYTE (op) == 0
5305 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5307 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5308 if (WORDS_BIG_ENDIAN)
5309 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5310 if (BYTES_BIG_ENDIAN)
5311 final_offset += difference % UNITS_PER_WORD;
5314 /* See whether resulting subreg will be paradoxical. */
5315 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5317 /* In nonparadoxical subregs we can't handle negative offsets. */
5318 if (final_offset < 0)
5319 return NULL_RTX;
5320 /* Bail out in case resulting subreg would be incorrect. */
5321 if (final_offset % GET_MODE_SIZE (outermode)
5322 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5323 return NULL_RTX;
5325 else
5327 int offset = 0;
5328 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5330 /* In paradoxical subreg, see if we are still looking on lower part.
5331 If so, our SUBREG_BYTE will be 0. */
5332 if (WORDS_BIG_ENDIAN)
5333 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5334 if (BYTES_BIG_ENDIAN)
5335 offset += difference % UNITS_PER_WORD;
5336 if (offset == final_offset)
5337 final_offset = 0;
5338 else
5339 return NULL_RTX;
5342 /* Recurse for further possible simplifications. */
5343 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5344 final_offset);
5345 if (newx)
5346 return newx;
5347 if (validate_subreg (outermode, innermostmode,
5348 SUBREG_REG (op), final_offset))
5350 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5351 if (SUBREG_PROMOTED_VAR_P (op)
5352 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5353 && GET_MODE_CLASS (outermode) == MODE_INT
5354 && IN_RANGE (GET_MODE_SIZE (outermode),
5355 GET_MODE_SIZE (innermode),
5356 GET_MODE_SIZE (innermostmode))
5357 && subreg_lowpart_p (newx))
5359 SUBREG_PROMOTED_VAR_P (newx) = 1;
5360 SUBREG_PROMOTED_UNSIGNED_SET
5361 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5363 return newx;
5365 return NULL_RTX;
5368 /* Merge implicit and explicit truncations. */
5370 if (GET_CODE (op) == TRUNCATE
5371 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
5372 && subreg_lowpart_offset (outermode, innermode) == byte)
5373 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
5374 GET_MODE (XEXP (op, 0)));
5376 /* SUBREG of a hard register => just change the register number
5377 and/or mode. If the hard register is not valid in that mode,
5378 suppress this simplification. If the hard register is the stack,
5379 frame, or argument pointer, leave this as a SUBREG. */
5381 if (REG_P (op) && HARD_REGISTER_P (op))
5383 unsigned int regno, final_regno;
5385 regno = REGNO (op);
5386 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5387 if (HARD_REGISTER_NUM_P (final_regno))
5389 rtx x;
5390 int final_offset = byte;
5392 /* Adjust offset for paradoxical subregs. */
5393 if (byte == 0
5394 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5396 int difference = (GET_MODE_SIZE (innermode)
5397 - GET_MODE_SIZE (outermode));
5398 if (WORDS_BIG_ENDIAN)
5399 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5400 if (BYTES_BIG_ENDIAN)
5401 final_offset += difference % UNITS_PER_WORD;
5404 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5406 /* Propagate original regno. We don't have any way to specify
5407 the offset inside original regno, so do so only for lowpart.
5408 The information is used only by alias analysis that can not
5409 grog partial register anyway. */
5411 if (subreg_lowpart_offset (outermode, innermode) == byte)
5412 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5413 return x;
5417 /* If we have a SUBREG of a register that we are replacing and we are
5418 replacing it with a MEM, make a new MEM and try replacing the
5419 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5420 or if we would be widening it. */
5422 if (MEM_P (op)
5423 && ! mode_dependent_address_p (XEXP (op, 0))
5424 /* Allow splitting of volatile memory references in case we don't
5425 have instruction to move the whole thing. */
5426 && (! MEM_VOLATILE_P (op)
5427 || ! have_insn_for (SET, innermode))
5428 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5429 return adjust_address_nv (op, outermode, byte);
5431 /* Handle complex values represented as CONCAT
5432 of real and imaginary part. */
5433 if (GET_CODE (op) == CONCAT)
5435 unsigned int part_size, final_offset;
5436 rtx part, res;
5438 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5439 if (byte < part_size)
5441 part = XEXP (op, 0);
5442 final_offset = byte;
5444 else
5446 part = XEXP (op, 1);
5447 final_offset = byte - part_size;
5450 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5451 return NULL_RTX;
5453 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5454 if (res)
5455 return res;
5456 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5457 return gen_rtx_SUBREG (outermode, part, final_offset);
5458 return NULL_RTX;
5461 /* Optimize SUBREG truncations of zero and sign extended values. */
5462 if ((GET_CODE (op) == ZERO_EXTEND
5463 || GET_CODE (op) == SIGN_EXTEND)
5464 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
5466 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5468 /* If we're requesting the lowpart of a zero or sign extension,
5469 there are three possibilities. If the outermode is the same
5470 as the origmode, we can omit both the extension and the subreg.
5471 If the outermode is not larger than the origmode, we can apply
5472 the truncation without the extension. Finally, if the outermode
5473 is larger than the origmode, but both are integer modes, we
5474 can just extend to the appropriate mode. */
5475 if (bitpos == 0)
5477 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
5478 if (outermode == origmode)
5479 return XEXP (op, 0);
5480 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
5481 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
5482 subreg_lowpart_offset (outermode,
5483 origmode));
5484 if (SCALAR_INT_MODE_P (outermode))
5485 return simplify_gen_unary (GET_CODE (op), outermode,
5486 XEXP (op, 0), origmode);
5489 /* A SUBREG resulting from a zero extension may fold to zero if
5490 it extracts higher bits that the ZERO_EXTEND's source bits. */
5491 if (GET_CODE (op) == ZERO_EXTEND
5492 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
5493 return CONST0_RTX (outermode);
5496 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5497 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5498 the outer subreg is effectively a truncation to the original mode. */
5499 if ((GET_CODE (op) == LSHIFTRT
5500 || GET_CODE (op) == ASHIFTRT)
5501 && SCALAR_INT_MODE_P (outermode)
5502 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5503 to avoid the possibility that an outer LSHIFTRT shifts by more
5504 than the sign extension's sign_bit_copies and introduces zeros
5505 into the high bits of the result. */
5506 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
5507 && CONST_INT_P (XEXP (op, 1))
5508 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
5509 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5510 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5511 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5512 return simplify_gen_binary (ASHIFTRT, outermode,
5513 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5515 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5516 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5517 the outer subreg is effectively a truncation to the original mode. */
5518 if ((GET_CODE (op) == LSHIFTRT
5519 || GET_CODE (op) == ASHIFTRT)
5520 && SCALAR_INT_MODE_P (outermode)
5521 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5522 && CONST_INT_P (XEXP (op, 1))
5523 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5524 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5525 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5526 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5527 return simplify_gen_binary (LSHIFTRT, outermode,
5528 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5530 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5531 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5532 the outer subreg is effectively a truncation to the original mode. */
5533 if (GET_CODE (op) == ASHIFT
5534 && SCALAR_INT_MODE_P (outermode)
5535 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5536 && CONST_INT_P (XEXP (op, 1))
5537 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5538 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
5539 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5540 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5541 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5542 return simplify_gen_binary (ASHIFT, outermode,
5543 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5545 /* Recognize a word extraction from a multi-word subreg. */
5546 if ((GET_CODE (op) == LSHIFTRT
5547 || GET_CODE (op) == ASHIFTRT)
5548 && SCALAR_INT_MODE_P (outermode)
5549 && GET_MODE_BITSIZE (outermode) >= BITS_PER_WORD
5550 && GET_MODE_BITSIZE (innermode) >= (2 * GET_MODE_BITSIZE (outermode))
5551 && CONST_INT_P (XEXP (op, 1))
5552 && (INTVAL (XEXP (op, 1)) & (GET_MODE_BITSIZE (outermode) - 1)) == 0
5553 && INTVAL (XEXP (op, 1)) >= 0
5554 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
5555 && byte == subreg_lowpart_offset (outermode, innermode))
5557 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5558 return simplify_gen_subreg (outermode, XEXP (op, 0), innermode,
5559 (WORDS_BIG_ENDIAN
5560 ? byte - shifted_bytes
5561 : byte + shifted_bytes));
5564 /* If we have a lowpart SUBREG of a right shift of MEM, make a new MEM
5565 and try replacing the SUBREG and shift with it. Don't do this if
5566 the MEM has a mode-dependent address or if we would be widening it. */
5568 if ((GET_CODE (op) == LSHIFTRT
5569 || GET_CODE (op) == ASHIFTRT)
5570 && MEM_P (XEXP (op, 0))
5571 && CONST_INT_P (XEXP (op, 1))
5572 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (GET_MODE (op))
5573 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (outermode)) == 0
5574 && INTVAL (XEXP (op, 1)) > 0
5575 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
5576 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0))
5577 && ! MEM_VOLATILE_P (XEXP (op, 0))
5578 && byte == subreg_lowpart_offset (outermode, innermode)
5579 && (GET_MODE_SIZE (outermode) >= UNITS_PER_WORD
5580 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
5582 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5583 return adjust_address_nv (XEXP (op, 0), outermode,
5584 (WORDS_BIG_ENDIAN
5585 ? byte - shifted_bytes
5586 : byte + shifted_bytes));
5589 return NULL_RTX;
5592 /* Make a SUBREG operation or equivalent if it folds. */
5595 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5596 enum machine_mode innermode, unsigned int byte)
5598 rtx newx;
5600 newx = simplify_subreg (outermode, op, innermode, byte);
5601 if (newx)
5602 return newx;
5604 if (GET_CODE (op) == SUBREG
5605 || GET_CODE (op) == CONCAT
5606 || GET_MODE (op) == VOIDmode)
5607 return NULL_RTX;
5609 if (validate_subreg (outermode, innermode, op, byte))
5610 return gen_rtx_SUBREG (outermode, op, byte);
5612 return NULL_RTX;
5615 /* Simplify X, an rtx expression.
5617 Return the simplified expression or NULL if no simplifications
5618 were possible.
5620 This is the preferred entry point into the simplification routines;
5621 however, we still allow passes to call the more specific routines.
5623 Right now GCC has three (yes, three) major bodies of RTL simplification
5624 code that need to be unified.
5626 1. fold_rtx in cse.c. This code uses various CSE specific
5627 information to aid in RTL simplification.
5629 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5630 it uses combine specific information to aid in RTL
5631 simplification.
5633 3. The routines in this file.
5636 Long term we want to only have one body of simplification code; to
5637 get to that state I recommend the following steps:
5639 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5640 which are not pass dependent state into these routines.
5642 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5643 use this routine whenever possible.
5645 3. Allow for pass dependent state to be provided to these
5646 routines and add simplifications based on the pass dependent
5647 state. Remove code from cse.c & combine.c that becomes
5648 redundant/dead.
5650 It will take time, but ultimately the compiler will be easier to
5651 maintain and improve. It's totally silly that when we add a
5652 simplification that it needs to be added to 4 places (3 for RTL
5653 simplification and 1 for tree simplification. */
5656 simplify_rtx (const_rtx x)
5658 const enum rtx_code code = GET_CODE (x);
5659 const enum machine_mode mode = GET_MODE (x);
5661 switch (GET_RTX_CLASS (code))
5663 case RTX_UNARY:
5664 return simplify_unary_operation (code, mode,
5665 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5666 case RTX_COMM_ARITH:
5667 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5668 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5670 /* Fall through.... */
5672 case RTX_BIN_ARITH:
5673 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5675 case RTX_TERNARY:
5676 case RTX_BITFIELD_OPS:
5677 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5678 XEXP (x, 0), XEXP (x, 1),
5679 XEXP (x, 2));
5681 case RTX_COMPARE:
5682 case RTX_COMM_COMPARE:
5683 return simplify_relational_operation (code, mode,
5684 ((GET_MODE (XEXP (x, 0))
5685 != VOIDmode)
5686 ? GET_MODE (XEXP (x, 0))
5687 : GET_MODE (XEXP (x, 1))),
5688 XEXP (x, 0),
5689 XEXP (x, 1));
5691 case RTX_EXTRA:
5692 if (code == SUBREG)
5693 return simplify_subreg (mode, SUBREG_REG (x),
5694 GET_MODE (SUBREG_REG (x)),
5695 SUBREG_BYTE (x));
5696 break;
5698 case RTX_OBJ:
5699 if (code == LO_SUM)
5701 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5702 if (GET_CODE (XEXP (x, 0)) == HIGH
5703 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5704 return XEXP (x, 1);
5706 break;
5708 default:
5709 break;
5711 return NULL;