missing Changelog
[official-gcc.git] / gcc / simplify-rtx.c
blobf26f0083cf6db2d9dd974f35da7e7e68c204839e
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011, 2012 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "insn-config.h"
34 #include "recog.h"
35 #include "function.h"
36 #include "expr.h"
37 #include "diagnostic-core.h"
38 #include "ggc.h"
39 #include "target.h"
41 /* Simplification and canonicalization of RTL. */
43 /* Much code operates on (low, high) pairs; the low value is an
44 unsigned wide int, the high value a signed wide int. We
45 occasionally need to sign extend from low to high as if low were a
46 signed wide int. */
47 #define HWI_SIGN_EXTEND(low) \
48 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
50 static rtx neg_const_int (enum machine_mode, const_rtx);
51 static bool plus_minus_operand_p (const_rtx);
52 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
53 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
54 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
55 unsigned int);
56 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
57 rtx, rtx);
58 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
59 enum machine_mode, rtx, rtx);
60 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
61 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
62 rtx, rtx, rtx, rtx);
64 /* Negate a CONST_INT rtx, truncating (because a conversion from a
65 maximally negative number can overflow). */
66 static rtx
67 neg_const_int (enum machine_mode mode, const_rtx i)
69 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
75 bool
76 mode_signbit_p (enum machine_mode mode, const_rtx x)
78 unsigned HOST_WIDE_INT val;
79 unsigned int width;
81 if (GET_MODE_CLASS (mode) != MODE_INT)
82 return false;
84 width = GET_MODE_PRECISION (mode);
85 if (width == 0)
86 return false;
88 if (width <= HOST_BITS_PER_WIDE_INT
89 && CONST_INT_P (x))
90 val = INTVAL (x);
91 else if (width <= HOST_BITS_PER_DOUBLE_INT
92 && CONST_DOUBLE_AS_INT_P (x)
93 && CONST_DOUBLE_LOW (x) == 0)
95 val = CONST_DOUBLE_HIGH (x);
96 width -= HOST_BITS_PER_WIDE_INT;
98 else
99 /* FIXME: We don't yet have a representation for wider modes. */
100 return false;
102 if (width < HOST_BITS_PER_WIDE_INT)
103 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
104 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
107 /* Test whether VAL is equal to the most significant bit of mode MODE
108 (after masking with the mode mask of MODE). Returns false if the
109 precision of MODE is too large to handle. */
111 bool
112 val_signbit_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
114 unsigned int width;
116 if (GET_MODE_CLASS (mode) != MODE_INT)
117 return false;
119 width = GET_MODE_PRECISION (mode);
120 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
121 return false;
123 val &= GET_MODE_MASK (mode);
124 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
127 /* Test whether the most significant bit of mode MODE is set in VAL.
128 Returns false if the precision of MODE is too large to handle. */
129 bool
130 val_signbit_known_set_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
132 unsigned int width;
134 if (GET_MODE_CLASS (mode) != MODE_INT)
135 return false;
137 width = GET_MODE_PRECISION (mode);
138 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
139 return false;
141 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
142 return val != 0;
145 /* Test whether the most significant bit of mode MODE is clear in VAL.
146 Returns false if the precision of MODE is too large to handle. */
147 bool
148 val_signbit_known_clear_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
150 unsigned int width;
152 if (GET_MODE_CLASS (mode) != MODE_INT)
153 return false;
155 width = GET_MODE_PRECISION (mode);
156 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
157 return false;
159 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
160 return val == 0;
163 /* Make a binary operation by properly ordering the operands and
164 seeing if the expression folds. */
167 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
168 rtx op1)
170 rtx tem;
172 /* If this simplifies, do it. */
173 tem = simplify_binary_operation (code, mode, op0, op1);
174 if (tem)
175 return tem;
177 /* Put complex operands first and constants second if commutative. */
178 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
179 && swap_commutative_operands_p (op0, op1))
180 tem = op0, op0 = op1, op1 = tem;
182 return gen_rtx_fmt_ee (code, mode, op0, op1);
185 /* If X is a MEM referencing the constant pool, return the real value.
186 Otherwise return X. */
188 avoid_constant_pool_reference (rtx x)
190 rtx c, tmp, addr;
191 enum machine_mode cmode;
192 HOST_WIDE_INT offset = 0;
194 switch (GET_CODE (x))
196 case MEM:
197 break;
199 case FLOAT_EXTEND:
200 /* Handle float extensions of constant pool references. */
201 tmp = XEXP (x, 0);
202 c = avoid_constant_pool_reference (tmp);
203 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
205 REAL_VALUE_TYPE d;
207 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
208 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
210 return x;
212 default:
213 return x;
216 if (GET_MODE (x) == BLKmode)
217 return x;
219 addr = XEXP (x, 0);
221 /* Call target hook to avoid the effects of -fpic etc.... */
222 addr = targetm.delegitimize_address (addr);
224 /* Split the address into a base and integer offset. */
225 if (GET_CODE (addr) == CONST
226 && GET_CODE (XEXP (addr, 0)) == PLUS
227 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
229 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
230 addr = XEXP (XEXP (addr, 0), 0);
233 if (GET_CODE (addr) == LO_SUM)
234 addr = XEXP (addr, 1);
236 /* If this is a constant pool reference, we can turn it into its
237 constant and hope that simplifications happen. */
238 if (GET_CODE (addr) == SYMBOL_REF
239 && CONSTANT_POOL_ADDRESS_P (addr))
241 c = get_pool_constant (addr);
242 cmode = get_pool_mode (addr);
244 /* If we're accessing the constant in a different mode than it was
245 originally stored, attempt to fix that up via subreg simplifications.
246 If that fails we have no choice but to return the original memory. */
247 if (offset != 0 || cmode != GET_MODE (x))
249 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
250 if (tem && CONSTANT_P (tem))
251 return tem;
253 else
254 return c;
257 return x;
260 /* Simplify a MEM based on its attributes. This is the default
261 delegitimize_address target hook, and it's recommended that every
262 overrider call it. */
265 delegitimize_mem_from_attrs (rtx x)
267 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
268 use their base addresses as equivalent. */
269 if (MEM_P (x)
270 && MEM_EXPR (x)
271 && MEM_OFFSET_KNOWN_P (x))
273 tree decl = MEM_EXPR (x);
274 enum machine_mode mode = GET_MODE (x);
275 HOST_WIDE_INT offset = 0;
277 switch (TREE_CODE (decl))
279 default:
280 decl = NULL;
281 break;
283 case VAR_DECL:
284 break;
286 case ARRAY_REF:
287 case ARRAY_RANGE_REF:
288 case COMPONENT_REF:
289 case BIT_FIELD_REF:
290 case REALPART_EXPR:
291 case IMAGPART_EXPR:
292 case VIEW_CONVERT_EXPR:
294 HOST_WIDE_INT bitsize, bitpos;
295 tree toffset;
296 int unsignedp, volatilep = 0;
298 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
299 &mode, &unsignedp, &volatilep, false);
300 if (bitsize != GET_MODE_BITSIZE (mode)
301 || (bitpos % BITS_PER_UNIT)
302 || (toffset && !host_integerp (toffset, 0)))
303 decl = NULL;
304 else
306 offset += bitpos / BITS_PER_UNIT;
307 if (toffset)
308 offset += TREE_INT_CST_LOW (toffset);
310 break;
314 if (decl
315 && mode == GET_MODE (x)
316 && TREE_CODE (decl) == VAR_DECL
317 && (TREE_STATIC (decl)
318 || DECL_THREAD_LOCAL_P (decl))
319 && DECL_RTL_SET_P (decl)
320 && MEM_P (DECL_RTL (decl)))
322 rtx newx;
324 offset += MEM_OFFSET (x);
326 newx = DECL_RTL (decl);
328 if (MEM_P (newx))
330 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
332 /* Avoid creating a new MEM needlessly if we already had
333 the same address. We do if there's no OFFSET and the
334 old address X is identical to NEWX, or if X is of the
335 form (plus NEWX OFFSET), or the NEWX is of the form
336 (plus Y (const_int Z)) and X is that with the offset
337 added: (plus Y (const_int Z+OFFSET)). */
338 if (!((offset == 0
339 || (GET_CODE (o) == PLUS
340 && GET_CODE (XEXP (o, 1)) == CONST_INT
341 && (offset == INTVAL (XEXP (o, 1))
342 || (GET_CODE (n) == PLUS
343 && GET_CODE (XEXP (n, 1)) == CONST_INT
344 && (INTVAL (XEXP (n, 1)) + offset
345 == INTVAL (XEXP (o, 1)))
346 && (n = XEXP (n, 0))))
347 && (o = XEXP (o, 0))))
348 && rtx_equal_p (o, n)))
349 x = adjust_address_nv (newx, mode, offset);
351 else if (GET_MODE (x) == GET_MODE (newx)
352 && offset == 0)
353 x = newx;
357 return x;
360 /* Make a unary operation by first seeing if it folds and otherwise making
361 the specified operation. */
364 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
365 enum machine_mode op_mode)
367 rtx tem;
369 /* If this simplifies, use it. */
370 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
371 return tem;
373 return gen_rtx_fmt_e (code, mode, op);
376 /* Likewise for ternary operations. */
379 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
380 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
382 rtx tem;
384 /* If this simplifies, use it. */
385 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
386 op0, op1, op2)))
387 return tem;
389 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
392 /* Likewise, for relational operations.
393 CMP_MODE specifies mode comparison is done in. */
396 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
397 enum machine_mode cmp_mode, rtx op0, rtx op1)
399 rtx tem;
401 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
402 op0, op1)))
403 return tem;
405 return gen_rtx_fmt_ee (code, mode, op0, op1);
408 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
409 and simplify the result. If FN is non-NULL, call this callback on each
410 X, if it returns non-NULL, replace X with its return value and simplify the
411 result. */
414 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
415 rtx (*fn) (rtx, const_rtx, void *), void *data)
417 enum rtx_code code = GET_CODE (x);
418 enum machine_mode mode = GET_MODE (x);
419 enum machine_mode op_mode;
420 const char *fmt;
421 rtx op0, op1, op2, newx, op;
422 rtvec vec, newvec;
423 int i, j;
425 if (__builtin_expect (fn != NULL, 0))
427 newx = fn (x, old_rtx, data);
428 if (newx)
429 return newx;
431 else if (rtx_equal_p (x, old_rtx))
432 return copy_rtx ((rtx) data);
434 switch (GET_RTX_CLASS (code))
436 case RTX_UNARY:
437 op0 = XEXP (x, 0);
438 op_mode = GET_MODE (op0);
439 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
440 if (op0 == XEXP (x, 0))
441 return x;
442 return simplify_gen_unary (code, mode, op0, op_mode);
444 case RTX_BIN_ARITH:
445 case RTX_COMM_ARITH:
446 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
447 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
448 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
449 return x;
450 return simplify_gen_binary (code, mode, op0, op1);
452 case RTX_COMPARE:
453 case RTX_COMM_COMPARE:
454 op0 = XEXP (x, 0);
455 op1 = XEXP (x, 1);
456 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
457 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
458 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
459 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
460 return x;
461 return simplify_gen_relational (code, mode, op_mode, op0, op1);
463 case RTX_TERNARY:
464 case RTX_BITFIELD_OPS:
465 op0 = XEXP (x, 0);
466 op_mode = GET_MODE (op0);
467 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
468 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
469 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
470 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
471 return x;
472 if (op_mode == VOIDmode)
473 op_mode = GET_MODE (op0);
474 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
476 case RTX_EXTRA:
477 if (code == SUBREG)
479 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
480 if (op0 == SUBREG_REG (x))
481 return x;
482 op0 = simplify_gen_subreg (GET_MODE (x), op0,
483 GET_MODE (SUBREG_REG (x)),
484 SUBREG_BYTE (x));
485 return op0 ? op0 : x;
487 break;
489 case RTX_OBJ:
490 if (code == MEM)
492 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
493 if (op0 == XEXP (x, 0))
494 return x;
495 return replace_equiv_address_nv (x, op0);
497 else if (code == LO_SUM)
499 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
500 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
502 /* (lo_sum (high x) x) -> x */
503 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
504 return op1;
506 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
507 return x;
508 return gen_rtx_LO_SUM (mode, op0, op1);
510 break;
512 default:
513 break;
516 newx = x;
517 fmt = GET_RTX_FORMAT (code);
518 for (i = 0; fmt[i]; i++)
519 switch (fmt[i])
521 case 'E':
522 vec = XVEC (x, i);
523 newvec = XVEC (newx, i);
524 for (j = 0; j < GET_NUM_ELEM (vec); j++)
526 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
527 old_rtx, fn, data);
528 if (op != RTVEC_ELT (vec, j))
530 if (newvec == vec)
532 newvec = shallow_copy_rtvec (vec);
533 if (x == newx)
534 newx = shallow_copy_rtx (x);
535 XVEC (newx, i) = newvec;
537 RTVEC_ELT (newvec, j) = op;
540 break;
542 case 'e':
543 if (XEXP (x, i))
545 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
546 if (op != XEXP (x, i))
548 if (x == newx)
549 newx = shallow_copy_rtx (x);
550 XEXP (newx, i) = op;
553 break;
555 return newx;
558 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
559 resulting RTX. Return a new RTX which is as simplified as possible. */
562 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
564 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
567 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
568 Only handle cases where the truncated value is inherently an rvalue.
570 RTL provides two ways of truncating a value:
572 1. a lowpart subreg. This form is only a truncation when both
573 the outer and inner modes (here MODE and OP_MODE respectively)
574 are scalar integers, and only then when the subreg is used as
575 an rvalue.
577 It is only valid to form such truncating subregs if the
578 truncation requires no action by the target. The onus for
579 proving this is on the creator of the subreg -- e.g. the
580 caller to simplify_subreg or simplify_gen_subreg -- and typically
581 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
583 2. a TRUNCATE. This form handles both scalar and compound integers.
585 The first form is preferred where valid. However, the TRUNCATE
586 handling in simplify_unary_operation turns the second form into the
587 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
588 so it is generally safe to form rvalue truncations using:
590 simplify_gen_unary (TRUNCATE, ...)
592 and leave simplify_unary_operation to work out which representation
593 should be used.
595 Because of the proof requirements on (1), simplify_truncation must
596 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
597 regardless of whether the outer truncation came from a SUBREG or a
598 TRUNCATE. For example, if the caller has proven that an SImode
599 truncation of:
601 (and:DI X Y)
603 is a no-op and can be represented as a subreg, it does not follow
604 that SImode truncations of X and Y are also no-ops. On a target
605 like 64-bit MIPS that requires SImode values to be stored in
606 sign-extended form, an SImode truncation of:
608 (and:DI (reg:DI X) (const_int 63))
610 is trivially a no-op because only the lower 6 bits can be set.
611 However, X is still an arbitrary 64-bit number and so we cannot
612 assume that truncating it too is a no-op. */
614 static rtx
615 simplify_truncation (enum machine_mode mode, rtx op,
616 enum machine_mode op_mode)
618 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
619 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
620 gcc_assert (precision <= op_precision);
622 /* Optimize truncations of zero and sign extended values. */
623 if (GET_CODE (op) == ZERO_EXTEND
624 || GET_CODE (op) == SIGN_EXTEND)
626 /* There are three possibilities. If MODE is the same as the
627 origmode, we can omit both the extension and the subreg.
628 If MODE is not larger than the origmode, we can apply the
629 truncation without the extension. Finally, if the outermode
630 is larger than the origmode, we can just extend to the appropriate
631 mode. */
632 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
633 if (mode == origmode)
634 return XEXP (op, 0);
635 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
636 return simplify_gen_unary (TRUNCATE, mode,
637 XEXP (op, 0), origmode);
638 else
639 return simplify_gen_unary (GET_CODE (op), mode,
640 XEXP (op, 0), origmode);
643 /* Simplify (truncate:SI (op:DI (x:DI) (y:DI)))
644 to (op:SI (truncate:SI (x:DI)) (truncate:SI (x:DI))). */
645 if (GET_CODE (op) == PLUS
646 || GET_CODE (op) == MINUS
647 || GET_CODE (op) == MULT)
649 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
650 if (op0)
652 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
653 if (op1)
654 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
658 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
659 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
660 the outer subreg is effectively a truncation to the original mode. */
661 if ((GET_CODE (op) == LSHIFTRT
662 || GET_CODE (op) == ASHIFTRT)
663 /* Ensure that OP_MODE is at least twice as wide as MODE
664 to avoid the possibility that an outer LSHIFTRT shifts by more
665 than the sign extension's sign_bit_copies and introduces zeros
666 into the high bits of the result. */
667 && 2 * precision <= op_precision
668 && CONST_INT_P (XEXP (op, 1))
669 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
670 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
671 && UINTVAL (XEXP (op, 1)) < precision)
672 return simplify_gen_binary (ASHIFTRT, mode,
673 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
675 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
676 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
677 the outer subreg is effectively a truncation to the original mode. */
678 if ((GET_CODE (op) == LSHIFTRT
679 || GET_CODE (op) == ASHIFTRT)
680 && CONST_INT_P (XEXP (op, 1))
681 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
682 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
683 && UINTVAL (XEXP (op, 1)) < precision)
684 return simplify_gen_binary (LSHIFTRT, mode,
685 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
687 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
688 to (ashift:QI (x:QI) C), where C is a suitable small constant and
689 the outer subreg is effectively a truncation to the original mode. */
690 if (GET_CODE (op) == ASHIFT
691 && CONST_INT_P (XEXP (op, 1))
692 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
693 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
694 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
695 && UINTVAL (XEXP (op, 1)) < precision)
696 return simplify_gen_binary (ASHIFT, mode,
697 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
699 /* Recognize a word extraction from a multi-word subreg. */
700 if ((GET_CODE (op) == LSHIFTRT
701 || GET_CODE (op) == ASHIFTRT)
702 && SCALAR_INT_MODE_P (mode)
703 && SCALAR_INT_MODE_P (op_mode)
704 && precision >= BITS_PER_WORD
705 && 2 * precision <= op_precision
706 && CONST_INT_P (XEXP (op, 1))
707 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
708 && UINTVAL (XEXP (op, 1)) < op_precision)
710 int byte = subreg_lowpart_offset (mode, op_mode);
711 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
712 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
713 (WORDS_BIG_ENDIAN
714 ? byte - shifted_bytes
715 : byte + shifted_bytes));
718 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
719 and try replacing the TRUNCATE and shift with it. Don't do this
720 if the MEM has a mode-dependent address. */
721 if ((GET_CODE (op) == LSHIFTRT
722 || GET_CODE (op) == ASHIFTRT)
723 && SCALAR_INT_MODE_P (op_mode)
724 && MEM_P (XEXP (op, 0))
725 && CONST_INT_P (XEXP (op, 1))
726 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
727 && INTVAL (XEXP (op, 1)) > 0
728 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
729 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
730 MEM_ADDR_SPACE (XEXP (op, 0)))
731 && ! MEM_VOLATILE_P (XEXP (op, 0))
732 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
733 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
735 int byte = subreg_lowpart_offset (mode, op_mode);
736 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
737 return adjust_address_nv (XEXP (op, 0), mode,
738 (WORDS_BIG_ENDIAN
739 ? byte - shifted_bytes
740 : byte + shifted_bytes));
743 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
744 (OP:SI foo:SI) if OP is NEG or ABS. */
745 if ((GET_CODE (op) == ABS
746 || GET_CODE (op) == NEG)
747 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
748 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
749 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
750 return simplify_gen_unary (GET_CODE (op), mode,
751 XEXP (XEXP (op, 0), 0), mode);
753 /* (truncate:A (subreg:B (truncate:C X) 0)) is
754 (truncate:A X). */
755 if (GET_CODE (op) == SUBREG
756 && SCALAR_INT_MODE_P (mode)
757 && SCALAR_INT_MODE_P (op_mode)
758 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
759 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
760 && subreg_lowpart_p (op))
761 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
762 GET_MODE (XEXP (SUBREG_REG (op), 0)));
764 /* (truncate:A (truncate:B X)) is (truncate:A X). */
765 if (GET_CODE (op) == TRUNCATE)
766 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
767 GET_MODE (XEXP (op, 0)));
769 return NULL_RTX;
772 /* Try to simplify a unary operation CODE whose output mode is to be
773 MODE with input operand OP whose mode was originally OP_MODE.
774 Return zero if no simplification can be made. */
776 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
777 rtx op, enum machine_mode op_mode)
779 rtx trueop, tem;
781 trueop = avoid_constant_pool_reference (op);
783 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
784 if (tem)
785 return tem;
787 return simplify_unary_operation_1 (code, mode, op);
790 /* Perform some simplifications we can do even if the operands
791 aren't constant. */
792 static rtx
793 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
795 enum rtx_code reversed;
796 rtx temp;
798 switch (code)
800 case NOT:
801 /* (not (not X)) == X. */
802 if (GET_CODE (op) == NOT)
803 return XEXP (op, 0);
805 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
806 comparison is all ones. */
807 if (COMPARISON_P (op)
808 && (mode == BImode || STORE_FLAG_VALUE == -1)
809 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
810 return simplify_gen_relational (reversed, mode, VOIDmode,
811 XEXP (op, 0), XEXP (op, 1));
813 /* (not (plus X -1)) can become (neg X). */
814 if (GET_CODE (op) == PLUS
815 && XEXP (op, 1) == constm1_rtx)
816 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
818 /* Similarly, (not (neg X)) is (plus X -1). */
819 if (GET_CODE (op) == NEG)
820 return plus_constant (mode, XEXP (op, 0), -1);
822 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
823 if (GET_CODE (op) == XOR
824 && CONST_INT_P (XEXP (op, 1))
825 && (temp = simplify_unary_operation (NOT, mode,
826 XEXP (op, 1), mode)) != 0)
827 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
829 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
830 if (GET_CODE (op) == PLUS
831 && CONST_INT_P (XEXP (op, 1))
832 && mode_signbit_p (mode, XEXP (op, 1))
833 && (temp = simplify_unary_operation (NOT, mode,
834 XEXP (op, 1), mode)) != 0)
835 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
838 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
839 operands other than 1, but that is not valid. We could do a
840 similar simplification for (not (lshiftrt C X)) where C is
841 just the sign bit, but this doesn't seem common enough to
842 bother with. */
843 if (GET_CODE (op) == ASHIFT
844 && XEXP (op, 0) == const1_rtx)
846 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
847 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
850 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
851 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
852 so we can perform the above simplification. */
854 if (STORE_FLAG_VALUE == -1
855 && GET_CODE (op) == ASHIFTRT
856 && GET_CODE (XEXP (op, 1))
857 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
858 return simplify_gen_relational (GE, mode, VOIDmode,
859 XEXP (op, 0), const0_rtx);
862 if (GET_CODE (op) == SUBREG
863 && subreg_lowpart_p (op)
864 && (GET_MODE_SIZE (GET_MODE (op))
865 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
866 && GET_CODE (SUBREG_REG (op)) == ASHIFT
867 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
869 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
870 rtx x;
872 x = gen_rtx_ROTATE (inner_mode,
873 simplify_gen_unary (NOT, inner_mode, const1_rtx,
874 inner_mode),
875 XEXP (SUBREG_REG (op), 1));
876 return rtl_hooks.gen_lowpart_no_emit (mode, x);
879 /* Apply De Morgan's laws to reduce number of patterns for machines
880 with negating logical insns (and-not, nand, etc.). If result has
881 only one NOT, put it first, since that is how the patterns are
882 coded. */
884 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
886 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
887 enum machine_mode op_mode;
889 op_mode = GET_MODE (in1);
890 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
892 op_mode = GET_MODE (in2);
893 if (op_mode == VOIDmode)
894 op_mode = mode;
895 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
897 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
899 rtx tem = in2;
900 in2 = in1; in1 = tem;
903 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
904 mode, in1, in2);
906 break;
908 case NEG:
909 /* (neg (neg X)) == X. */
910 if (GET_CODE (op) == NEG)
911 return XEXP (op, 0);
913 /* (neg (plus X 1)) can become (not X). */
914 if (GET_CODE (op) == PLUS
915 && XEXP (op, 1) == const1_rtx)
916 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
918 /* Similarly, (neg (not X)) is (plus X 1). */
919 if (GET_CODE (op) == NOT)
920 return plus_constant (mode, XEXP (op, 0), 1);
922 /* (neg (minus X Y)) can become (minus Y X). This transformation
923 isn't safe for modes with signed zeros, since if X and Y are
924 both +0, (minus Y X) is the same as (minus X Y). If the
925 rounding mode is towards +infinity (or -infinity) then the two
926 expressions will be rounded differently. */
927 if (GET_CODE (op) == MINUS
928 && !HONOR_SIGNED_ZEROS (mode)
929 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
930 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
932 if (GET_CODE (op) == PLUS
933 && !HONOR_SIGNED_ZEROS (mode)
934 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
936 /* (neg (plus A C)) is simplified to (minus -C A). */
937 if (CONST_SCALAR_INT_P (XEXP (op, 1))
938 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
940 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
941 if (temp)
942 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
945 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
946 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
947 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
950 /* (neg (mult A B)) becomes (mult A (neg B)).
951 This works even for floating-point values. */
952 if (GET_CODE (op) == MULT
953 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
955 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
956 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
959 /* NEG commutes with ASHIFT since it is multiplication. Only do
960 this if we can then eliminate the NEG (e.g., if the operand
961 is a constant). */
962 if (GET_CODE (op) == ASHIFT)
964 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
965 if (temp)
966 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
969 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
970 C is equal to the width of MODE minus 1. */
971 if (GET_CODE (op) == ASHIFTRT
972 && CONST_INT_P (XEXP (op, 1))
973 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
974 return simplify_gen_binary (LSHIFTRT, mode,
975 XEXP (op, 0), XEXP (op, 1));
977 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
978 C is equal to the width of MODE minus 1. */
979 if (GET_CODE (op) == LSHIFTRT
980 && CONST_INT_P (XEXP (op, 1))
981 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
982 return simplify_gen_binary (ASHIFTRT, mode,
983 XEXP (op, 0), XEXP (op, 1));
985 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
986 if (GET_CODE (op) == XOR
987 && XEXP (op, 1) == const1_rtx
988 && nonzero_bits (XEXP (op, 0), mode) == 1)
989 return plus_constant (mode, XEXP (op, 0), -1);
991 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
992 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
993 if (GET_CODE (op) == LT
994 && XEXP (op, 1) == const0_rtx
995 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
997 enum machine_mode inner = GET_MODE (XEXP (op, 0));
998 int isize = GET_MODE_PRECISION (inner);
999 if (STORE_FLAG_VALUE == 1)
1001 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1002 GEN_INT (isize - 1));
1003 if (mode == inner)
1004 return temp;
1005 if (GET_MODE_PRECISION (mode) > isize)
1006 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1007 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1009 else if (STORE_FLAG_VALUE == -1)
1011 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1012 GEN_INT (isize - 1));
1013 if (mode == inner)
1014 return temp;
1015 if (GET_MODE_PRECISION (mode) > isize)
1016 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1017 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1020 break;
1022 case TRUNCATE:
1023 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1024 with the umulXi3_highpart patterns. */
1025 if (GET_CODE (op) == LSHIFTRT
1026 && GET_CODE (XEXP (op, 0)) == MULT)
1027 break;
1029 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1031 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1032 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1033 /* We can't handle truncation to a partial integer mode here
1034 because we don't know the real bitsize of the partial
1035 integer mode. */
1036 break;
1039 if (GET_MODE (op) != VOIDmode)
1041 temp = simplify_truncation (mode, op, GET_MODE (op));
1042 if (temp)
1043 return temp;
1046 /* If we know that the value is already truncated, we can
1047 replace the TRUNCATE with a SUBREG. */
1048 if (GET_MODE_NUNITS (mode) == 1
1049 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1050 || truncated_to_mode (mode, op)))
1051 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1053 /* A truncate of a comparison can be replaced with a subreg if
1054 STORE_FLAG_VALUE permits. This is like the previous test,
1055 but it works even if the comparison is done in a mode larger
1056 than HOST_BITS_PER_WIDE_INT. */
1057 if (HWI_COMPUTABLE_MODE_P (mode)
1058 && COMPARISON_P (op)
1059 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1060 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1062 /* A truncate of a memory is just loading the low part of the memory
1063 if we are not changing the meaning of the address. */
1064 if (GET_CODE (op) == MEM
1065 && !VECTOR_MODE_P (mode)
1066 && !MEM_VOLATILE_P (op)
1067 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1068 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1070 break;
1072 case FLOAT_TRUNCATE:
1073 if (DECIMAL_FLOAT_MODE_P (mode))
1074 break;
1076 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1077 if (GET_CODE (op) == FLOAT_EXTEND
1078 && GET_MODE (XEXP (op, 0)) == mode)
1079 return XEXP (op, 0);
1081 /* (float_truncate:SF (float_truncate:DF foo:XF))
1082 = (float_truncate:SF foo:XF).
1083 This may eliminate double rounding, so it is unsafe.
1085 (float_truncate:SF (float_extend:XF foo:DF))
1086 = (float_truncate:SF foo:DF).
1088 (float_truncate:DF (float_extend:XF foo:SF))
1089 = (float_extend:SF foo:DF). */
1090 if ((GET_CODE (op) == FLOAT_TRUNCATE
1091 && flag_unsafe_math_optimizations)
1092 || GET_CODE (op) == FLOAT_EXTEND)
1093 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1094 0)))
1095 > GET_MODE_SIZE (mode)
1096 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1097 mode,
1098 XEXP (op, 0), mode);
1100 /* (float_truncate (float x)) is (float x) */
1101 if (GET_CODE (op) == FLOAT
1102 && (flag_unsafe_math_optimizations
1103 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
1104 && ((unsigned)significand_size (GET_MODE (op))
1105 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1106 - num_sign_bit_copies (XEXP (op, 0),
1107 GET_MODE (XEXP (op, 0))))))))
1108 return simplify_gen_unary (FLOAT, mode,
1109 XEXP (op, 0),
1110 GET_MODE (XEXP (op, 0)));
1112 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1113 (OP:SF foo:SF) if OP is NEG or ABS. */
1114 if ((GET_CODE (op) == ABS
1115 || GET_CODE (op) == NEG)
1116 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1117 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1118 return simplify_gen_unary (GET_CODE (op), mode,
1119 XEXP (XEXP (op, 0), 0), mode);
1121 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1122 is (float_truncate:SF x). */
1123 if (GET_CODE (op) == SUBREG
1124 && subreg_lowpart_p (op)
1125 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1126 return SUBREG_REG (op);
1127 break;
1129 case FLOAT_EXTEND:
1130 if (DECIMAL_FLOAT_MODE_P (mode))
1131 break;
1133 /* (float_extend (float_extend x)) is (float_extend x)
1135 (float_extend (float x)) is (float x) assuming that double
1136 rounding can't happen.
1138 if (GET_CODE (op) == FLOAT_EXTEND
1139 || (GET_CODE (op) == FLOAT
1140 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1141 && ((unsigned)significand_size (GET_MODE (op))
1142 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1143 - num_sign_bit_copies (XEXP (op, 0),
1144 GET_MODE (XEXP (op, 0)))))))
1145 return simplify_gen_unary (GET_CODE (op), mode,
1146 XEXP (op, 0),
1147 GET_MODE (XEXP (op, 0)));
1149 break;
1151 case ABS:
1152 /* (abs (neg <foo>)) -> (abs <foo>) */
1153 if (GET_CODE (op) == NEG)
1154 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1155 GET_MODE (XEXP (op, 0)));
1157 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1158 do nothing. */
1159 if (GET_MODE (op) == VOIDmode)
1160 break;
1162 /* If operand is something known to be positive, ignore the ABS. */
1163 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1164 || val_signbit_known_clear_p (GET_MODE (op),
1165 nonzero_bits (op, GET_MODE (op))))
1166 return op;
1168 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1169 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1170 return gen_rtx_NEG (mode, op);
1172 break;
1174 case FFS:
1175 /* (ffs (*_extend <X>)) = (ffs <X>) */
1176 if (GET_CODE (op) == SIGN_EXTEND
1177 || GET_CODE (op) == ZERO_EXTEND)
1178 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1179 GET_MODE (XEXP (op, 0)));
1180 break;
1182 case POPCOUNT:
1183 switch (GET_CODE (op))
1185 case BSWAP:
1186 case ZERO_EXTEND:
1187 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1188 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1189 GET_MODE (XEXP (op, 0)));
1191 case ROTATE:
1192 case ROTATERT:
1193 /* Rotations don't affect popcount. */
1194 if (!side_effects_p (XEXP (op, 1)))
1195 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1196 GET_MODE (XEXP (op, 0)));
1197 break;
1199 default:
1200 break;
1202 break;
1204 case PARITY:
1205 switch (GET_CODE (op))
1207 case NOT:
1208 case BSWAP:
1209 case ZERO_EXTEND:
1210 case SIGN_EXTEND:
1211 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1212 GET_MODE (XEXP (op, 0)));
1214 case ROTATE:
1215 case ROTATERT:
1216 /* Rotations don't affect parity. */
1217 if (!side_effects_p (XEXP (op, 1)))
1218 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1219 GET_MODE (XEXP (op, 0)));
1220 break;
1222 default:
1223 break;
1225 break;
1227 case BSWAP:
1228 /* (bswap (bswap x)) -> x. */
1229 if (GET_CODE (op) == BSWAP)
1230 return XEXP (op, 0);
1231 break;
1233 case FLOAT:
1234 /* (float (sign_extend <X>)) = (float <X>). */
1235 if (GET_CODE (op) == SIGN_EXTEND)
1236 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1237 GET_MODE (XEXP (op, 0)));
1238 break;
1240 case SIGN_EXTEND:
1241 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1242 becomes just the MINUS if its mode is MODE. This allows
1243 folding switch statements on machines using casesi (such as
1244 the VAX). */
1245 if (GET_CODE (op) == TRUNCATE
1246 && GET_MODE (XEXP (op, 0)) == mode
1247 && GET_CODE (XEXP (op, 0)) == MINUS
1248 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1249 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1250 return XEXP (op, 0);
1252 /* Extending a widening multiplication should be canonicalized to
1253 a wider widening multiplication. */
1254 if (GET_CODE (op) == MULT)
1256 rtx lhs = XEXP (op, 0);
1257 rtx rhs = XEXP (op, 1);
1258 enum rtx_code lcode = GET_CODE (lhs);
1259 enum rtx_code rcode = GET_CODE (rhs);
1261 /* Widening multiplies usually extend both operands, but sometimes
1262 they use a shift to extract a portion of a register. */
1263 if ((lcode == SIGN_EXTEND
1264 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1265 && (rcode == SIGN_EXTEND
1266 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1268 enum machine_mode lmode = GET_MODE (lhs);
1269 enum machine_mode rmode = GET_MODE (rhs);
1270 int bits;
1272 if (lcode == ASHIFTRT)
1273 /* Number of bits not shifted off the end. */
1274 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1275 else /* lcode == SIGN_EXTEND */
1276 /* Size of inner mode. */
1277 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1279 if (rcode == ASHIFTRT)
1280 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1281 else /* rcode == SIGN_EXTEND */
1282 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1284 /* We can only widen multiplies if the result is mathematiclly
1285 equivalent. I.e. if overflow was impossible. */
1286 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1287 return simplify_gen_binary
1288 (MULT, mode,
1289 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1290 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1294 /* Check for a sign extension of a subreg of a promoted
1295 variable, where the promotion is sign-extended, and the
1296 target mode is the same as the variable's promotion. */
1297 if (GET_CODE (op) == SUBREG
1298 && SUBREG_PROMOTED_VAR_P (op)
1299 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1300 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1301 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1303 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1304 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1305 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1307 gcc_assert (GET_MODE_BITSIZE (mode)
1308 > GET_MODE_BITSIZE (GET_MODE (op)));
1309 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1310 GET_MODE (XEXP (op, 0)));
1313 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1314 is (sign_extend:M (subreg:O <X>)) if there is mode with
1315 GET_MODE_BITSIZE (N) - I bits.
1316 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1317 is similarly (zero_extend:M (subreg:O <X>)). */
1318 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1319 && GET_CODE (XEXP (op, 0)) == ASHIFT
1320 && CONST_INT_P (XEXP (op, 1))
1321 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1322 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1324 enum machine_mode tmode
1325 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1326 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1327 gcc_assert (GET_MODE_BITSIZE (mode)
1328 > GET_MODE_BITSIZE (GET_MODE (op)));
1329 if (tmode != BLKmode)
1331 rtx inner =
1332 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1333 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1334 ? SIGN_EXTEND : ZERO_EXTEND,
1335 mode, inner, tmode);
1339 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1340 /* As we do not know which address space the pointer is referring to,
1341 we can do this only if the target does not support different pointer
1342 or address modes depending on the address space. */
1343 if (target_default_pointer_address_modes_p ()
1344 && ! POINTERS_EXTEND_UNSIGNED
1345 && mode == Pmode && GET_MODE (op) == ptr_mode
1346 && (CONSTANT_P (op)
1347 || (GET_CODE (op) == SUBREG
1348 && REG_P (SUBREG_REG (op))
1349 && REG_POINTER (SUBREG_REG (op))
1350 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1351 return convert_memory_address (Pmode, op);
1352 #endif
1353 break;
1355 case ZERO_EXTEND:
1356 /* Check for a zero extension of a subreg of a promoted
1357 variable, where the promotion is zero-extended, and the
1358 target mode is the same as the variable's promotion. */
1359 if (GET_CODE (op) == SUBREG
1360 && SUBREG_PROMOTED_VAR_P (op)
1361 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1362 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1363 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1365 /* Extending a widening multiplication should be canonicalized to
1366 a wider widening multiplication. */
1367 if (GET_CODE (op) == MULT)
1369 rtx lhs = XEXP (op, 0);
1370 rtx rhs = XEXP (op, 1);
1371 enum rtx_code lcode = GET_CODE (lhs);
1372 enum rtx_code rcode = GET_CODE (rhs);
1374 /* Widening multiplies usually extend both operands, but sometimes
1375 they use a shift to extract a portion of a register. */
1376 if ((lcode == ZERO_EXTEND
1377 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1378 && (rcode == ZERO_EXTEND
1379 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1381 enum machine_mode lmode = GET_MODE (lhs);
1382 enum machine_mode rmode = GET_MODE (rhs);
1383 int bits;
1385 if (lcode == LSHIFTRT)
1386 /* Number of bits not shifted off the end. */
1387 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1388 else /* lcode == ZERO_EXTEND */
1389 /* Size of inner mode. */
1390 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1392 if (rcode == LSHIFTRT)
1393 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1394 else /* rcode == ZERO_EXTEND */
1395 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1397 /* We can only widen multiplies if the result is mathematiclly
1398 equivalent. I.e. if overflow was impossible. */
1399 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1400 return simplify_gen_binary
1401 (MULT, mode,
1402 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1403 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1407 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1408 if (GET_CODE (op) == ZERO_EXTEND)
1409 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1410 GET_MODE (XEXP (op, 0)));
1412 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1413 is (zero_extend:M (subreg:O <X>)) if there is mode with
1414 GET_MODE_BITSIZE (N) - I bits. */
1415 if (GET_CODE (op) == LSHIFTRT
1416 && GET_CODE (XEXP (op, 0)) == ASHIFT
1417 && CONST_INT_P (XEXP (op, 1))
1418 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1419 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1421 enum machine_mode tmode
1422 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1423 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1424 if (tmode != BLKmode)
1426 rtx inner =
1427 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1428 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1432 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1433 /* As we do not know which address space the pointer is referring to,
1434 we can do this only if the target does not support different pointer
1435 or address modes depending on the address space. */
1436 if (target_default_pointer_address_modes_p ()
1437 && POINTERS_EXTEND_UNSIGNED > 0
1438 && mode == Pmode && GET_MODE (op) == ptr_mode
1439 && (CONSTANT_P (op)
1440 || (GET_CODE (op) == SUBREG
1441 && REG_P (SUBREG_REG (op))
1442 && REG_POINTER (SUBREG_REG (op))
1443 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1444 return convert_memory_address (Pmode, op);
1445 #endif
1446 break;
1448 default:
1449 break;
1452 return 0;
1455 /* Try to compute the value of a unary operation CODE whose output mode is to
1456 be MODE with input operand OP whose mode was originally OP_MODE.
1457 Return zero if the value cannot be computed. */
1459 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1460 rtx op, enum machine_mode op_mode)
1462 unsigned int width = GET_MODE_PRECISION (mode);
1463 unsigned int op_width = GET_MODE_PRECISION (op_mode);
1465 if (code == VEC_DUPLICATE)
1467 gcc_assert (VECTOR_MODE_P (mode));
1468 if (GET_MODE (op) != VOIDmode)
1470 if (!VECTOR_MODE_P (GET_MODE (op)))
1471 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1472 else
1473 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1474 (GET_MODE (op)));
1476 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1477 || GET_CODE (op) == CONST_VECTOR)
1479 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1480 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1481 rtvec v = rtvec_alloc (n_elts);
1482 unsigned int i;
1484 if (GET_CODE (op) != CONST_VECTOR)
1485 for (i = 0; i < n_elts; i++)
1486 RTVEC_ELT (v, i) = op;
1487 else
1489 enum machine_mode inmode = GET_MODE (op);
1490 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1491 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1493 gcc_assert (in_n_elts < n_elts);
1494 gcc_assert ((n_elts % in_n_elts) == 0);
1495 for (i = 0; i < n_elts; i++)
1496 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1498 return gen_rtx_CONST_VECTOR (mode, v);
1502 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1504 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1505 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1506 enum machine_mode opmode = GET_MODE (op);
1507 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1508 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1509 rtvec v = rtvec_alloc (n_elts);
1510 unsigned int i;
1512 gcc_assert (op_n_elts == n_elts);
1513 for (i = 0; i < n_elts; i++)
1515 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1516 CONST_VECTOR_ELT (op, i),
1517 GET_MODE_INNER (opmode));
1518 if (!x)
1519 return 0;
1520 RTVEC_ELT (v, i) = x;
1522 return gen_rtx_CONST_VECTOR (mode, v);
1525 /* The order of these tests is critical so that, for example, we don't
1526 check the wrong mode (input vs. output) for a conversion operation,
1527 such as FIX. At some point, this should be simplified. */
1529 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1531 HOST_WIDE_INT hv, lv;
1532 REAL_VALUE_TYPE d;
1534 if (CONST_INT_P (op))
1535 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1536 else
1537 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1539 REAL_VALUE_FROM_INT (d, lv, hv, mode);
1540 d = real_value_truncate (mode, d);
1541 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1543 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1545 HOST_WIDE_INT hv, lv;
1546 REAL_VALUE_TYPE d;
1548 if (CONST_INT_P (op))
1549 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1550 else
1551 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1553 if (op_mode == VOIDmode
1554 || GET_MODE_PRECISION (op_mode) > HOST_BITS_PER_DOUBLE_INT)
1555 /* We should never get a negative number. */
1556 gcc_assert (hv >= 0);
1557 else if (GET_MODE_PRECISION (op_mode) <= HOST_BITS_PER_WIDE_INT)
1558 hv = 0, lv &= GET_MODE_MASK (op_mode);
1560 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1561 d = real_value_truncate (mode, d);
1562 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1565 if (CONST_INT_P (op)
1566 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1568 HOST_WIDE_INT arg0 = INTVAL (op);
1569 HOST_WIDE_INT val;
1571 switch (code)
1573 case NOT:
1574 val = ~ arg0;
1575 break;
1577 case NEG:
1578 val = - arg0;
1579 break;
1581 case ABS:
1582 val = (arg0 >= 0 ? arg0 : - arg0);
1583 break;
1585 case FFS:
1586 arg0 &= GET_MODE_MASK (mode);
1587 val = ffs_hwi (arg0);
1588 break;
1590 case CLZ:
1591 arg0 &= GET_MODE_MASK (mode);
1592 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1594 else
1595 val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 1;
1596 break;
1598 case CLRSB:
1599 arg0 &= GET_MODE_MASK (mode);
1600 if (arg0 == 0)
1601 val = GET_MODE_PRECISION (mode) - 1;
1602 else if (arg0 >= 0)
1603 val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 2;
1604 else if (arg0 < 0)
1605 val = GET_MODE_PRECISION (mode) - floor_log2 (~arg0) - 2;
1606 break;
1608 case CTZ:
1609 arg0 &= GET_MODE_MASK (mode);
1610 if (arg0 == 0)
1612 /* Even if the value at zero is undefined, we have to come
1613 up with some replacement. Seems good enough. */
1614 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1615 val = GET_MODE_PRECISION (mode);
1617 else
1618 val = ctz_hwi (arg0);
1619 break;
1621 case POPCOUNT:
1622 arg0 &= GET_MODE_MASK (mode);
1623 val = 0;
1624 while (arg0)
1625 val++, arg0 &= arg0 - 1;
1626 break;
1628 case PARITY:
1629 arg0 &= GET_MODE_MASK (mode);
1630 val = 0;
1631 while (arg0)
1632 val++, arg0 &= arg0 - 1;
1633 val &= 1;
1634 break;
1636 case BSWAP:
1638 unsigned int s;
1640 val = 0;
1641 for (s = 0; s < width; s += 8)
1643 unsigned int d = width - s - 8;
1644 unsigned HOST_WIDE_INT byte;
1645 byte = (arg0 >> s) & 0xff;
1646 val |= byte << d;
1649 break;
1651 case TRUNCATE:
1652 val = arg0;
1653 break;
1655 case ZERO_EXTEND:
1656 /* When zero-extending a CONST_INT, we need to know its
1657 original mode. */
1658 gcc_assert (op_mode != VOIDmode);
1659 if (op_width == HOST_BITS_PER_WIDE_INT)
1661 /* If we were really extending the mode,
1662 we would have to distinguish between zero-extension
1663 and sign-extension. */
1664 gcc_assert (width == op_width);
1665 val = arg0;
1667 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1668 val = arg0 & GET_MODE_MASK (op_mode);
1669 else
1670 return 0;
1671 break;
1673 case SIGN_EXTEND:
1674 if (op_mode == VOIDmode)
1675 op_mode = mode;
1676 op_width = GET_MODE_PRECISION (op_mode);
1677 if (op_width == HOST_BITS_PER_WIDE_INT)
1679 /* If we were really extending the mode,
1680 we would have to distinguish between zero-extension
1681 and sign-extension. */
1682 gcc_assert (width == op_width);
1683 val = arg0;
1685 else if (op_width < HOST_BITS_PER_WIDE_INT)
1687 val = arg0 & GET_MODE_MASK (op_mode);
1688 if (val_signbit_known_set_p (op_mode, val))
1689 val |= ~GET_MODE_MASK (op_mode);
1691 else
1692 return 0;
1693 break;
1695 case SQRT:
1696 case FLOAT_EXTEND:
1697 case FLOAT_TRUNCATE:
1698 case SS_TRUNCATE:
1699 case US_TRUNCATE:
1700 case SS_NEG:
1701 case US_NEG:
1702 case SS_ABS:
1703 return 0;
1705 default:
1706 gcc_unreachable ();
1709 return gen_int_mode (val, mode);
1712 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1713 for a DImode operation on a CONST_INT. */
1714 else if (width <= HOST_BITS_PER_DOUBLE_INT
1715 && (CONST_DOUBLE_AS_INT_P (op) || CONST_INT_P (op)))
1717 double_int first, value;
1719 if (CONST_DOUBLE_AS_INT_P (op))
1720 first = double_int::from_pair (CONST_DOUBLE_HIGH (op),
1721 CONST_DOUBLE_LOW (op));
1722 else
1723 first = double_int::from_shwi (INTVAL (op));
1725 switch (code)
1727 case NOT:
1728 value = ~first;
1729 break;
1731 case NEG:
1732 value = -first;
1733 break;
1735 case ABS:
1736 if (first.is_negative ())
1737 value = -first;
1738 else
1739 value = first;
1740 break;
1742 case FFS:
1743 value.high = 0;
1744 if (first.low != 0)
1745 value.low = ffs_hwi (first.low);
1746 else if (first.high != 0)
1747 value.low = HOST_BITS_PER_WIDE_INT + ffs_hwi (first.high);
1748 else
1749 value.low = 0;
1750 break;
1752 case CLZ:
1753 value.high = 0;
1754 if (first.high != 0)
1755 value.low = GET_MODE_PRECISION (mode) - floor_log2 (first.high) - 1
1756 - HOST_BITS_PER_WIDE_INT;
1757 else if (first.low != 0)
1758 value.low = GET_MODE_PRECISION (mode) - floor_log2 (first.low) - 1;
1759 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, value.low))
1760 value.low = GET_MODE_PRECISION (mode);
1761 break;
1763 case CTZ:
1764 value.high = 0;
1765 if (first.low != 0)
1766 value.low = ctz_hwi (first.low);
1767 else if (first.high != 0)
1768 value.low = HOST_BITS_PER_WIDE_INT + ctz_hwi (first.high);
1769 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, value.low))
1770 value.low = GET_MODE_PRECISION (mode);
1771 break;
1773 case POPCOUNT:
1774 value = double_int_zero;
1775 while (first.low)
1777 value.low++;
1778 first.low &= first.low - 1;
1780 while (first.high)
1782 value.low++;
1783 first.high &= first.high - 1;
1785 break;
1787 case PARITY:
1788 value = double_int_zero;
1789 while (first.low)
1791 value.low++;
1792 first.low &= first.low - 1;
1794 while (first.high)
1796 value.low++;
1797 first.high &= first.high - 1;
1799 value.low &= 1;
1800 break;
1802 case BSWAP:
1804 unsigned int s;
1806 value = double_int_zero;
1807 for (s = 0; s < width; s += 8)
1809 unsigned int d = width - s - 8;
1810 unsigned HOST_WIDE_INT byte;
1812 if (s < HOST_BITS_PER_WIDE_INT)
1813 byte = (first.low >> s) & 0xff;
1814 else
1815 byte = (first.high >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1817 if (d < HOST_BITS_PER_WIDE_INT)
1818 value.low |= byte << d;
1819 else
1820 value.high |= byte << (d - HOST_BITS_PER_WIDE_INT);
1823 break;
1825 case TRUNCATE:
1826 /* This is just a change-of-mode, so do nothing. */
1827 value = first;
1828 break;
1830 case ZERO_EXTEND:
1831 gcc_assert (op_mode != VOIDmode);
1833 if (op_width > HOST_BITS_PER_WIDE_INT)
1834 return 0;
1836 value = double_int::from_uhwi (first.low & GET_MODE_MASK (op_mode));
1837 break;
1839 case SIGN_EXTEND:
1840 if (op_mode == VOIDmode
1841 || op_width > HOST_BITS_PER_WIDE_INT)
1842 return 0;
1843 else
1845 value.low = first.low & GET_MODE_MASK (op_mode);
1846 if (val_signbit_known_set_p (op_mode, value.low))
1847 value.low |= ~GET_MODE_MASK (op_mode);
1849 value.high = HWI_SIGN_EXTEND (value.low);
1851 break;
1853 case SQRT:
1854 return 0;
1856 default:
1857 return 0;
1860 return immed_double_int_const (value, mode);
1863 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1864 && SCALAR_FLOAT_MODE_P (mode)
1865 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1867 REAL_VALUE_TYPE d, t;
1868 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1870 switch (code)
1872 case SQRT:
1873 if (HONOR_SNANS (mode) && real_isnan (&d))
1874 return 0;
1875 real_sqrt (&t, mode, &d);
1876 d = t;
1877 break;
1878 case ABS:
1879 d = real_value_abs (&d);
1880 break;
1881 case NEG:
1882 d = real_value_negate (&d);
1883 break;
1884 case FLOAT_TRUNCATE:
1885 d = real_value_truncate (mode, d);
1886 break;
1887 case FLOAT_EXTEND:
1888 /* All this does is change the mode, unless changing
1889 mode class. */
1890 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1891 real_convert (&d, mode, &d);
1892 break;
1893 case FIX:
1894 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1895 break;
1896 case NOT:
1898 long tmp[4];
1899 int i;
1901 real_to_target (tmp, &d, GET_MODE (op));
1902 for (i = 0; i < 4; i++)
1903 tmp[i] = ~tmp[i];
1904 real_from_target (&d, tmp, mode);
1905 break;
1907 default:
1908 gcc_unreachable ();
1910 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1913 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1914 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1915 && GET_MODE_CLASS (mode) == MODE_INT
1916 && width <= HOST_BITS_PER_DOUBLE_INT && width > 0)
1918 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1919 operators are intentionally left unspecified (to ease implementation
1920 by target backends), for consistency, this routine implements the
1921 same semantics for constant folding as used by the middle-end. */
1923 /* This was formerly used only for non-IEEE float.
1924 eggert@twinsun.com says it is safe for IEEE also. */
1925 HOST_WIDE_INT xh, xl, th, tl;
1926 REAL_VALUE_TYPE x, t;
1927 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1928 switch (code)
1930 case FIX:
1931 if (REAL_VALUE_ISNAN (x))
1932 return const0_rtx;
1934 /* Test against the signed upper bound. */
1935 if (width > HOST_BITS_PER_WIDE_INT)
1937 th = ((unsigned HOST_WIDE_INT) 1
1938 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1939 tl = -1;
1941 else
1943 th = 0;
1944 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1946 real_from_integer (&t, VOIDmode, tl, th, 0);
1947 if (REAL_VALUES_LESS (t, x))
1949 xh = th;
1950 xl = tl;
1951 break;
1954 /* Test against the signed lower bound. */
1955 if (width > HOST_BITS_PER_WIDE_INT)
1957 th = (unsigned HOST_WIDE_INT) (-1)
1958 << (width - HOST_BITS_PER_WIDE_INT - 1);
1959 tl = 0;
1961 else
1963 th = -1;
1964 tl = (unsigned HOST_WIDE_INT) (-1) << (width - 1);
1966 real_from_integer (&t, VOIDmode, tl, th, 0);
1967 if (REAL_VALUES_LESS (x, t))
1969 xh = th;
1970 xl = tl;
1971 break;
1973 REAL_VALUE_TO_INT (&xl, &xh, x);
1974 break;
1976 case UNSIGNED_FIX:
1977 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1978 return const0_rtx;
1980 /* Test against the unsigned upper bound. */
1981 if (width == HOST_BITS_PER_DOUBLE_INT)
1983 th = -1;
1984 tl = -1;
1986 else if (width >= HOST_BITS_PER_WIDE_INT)
1988 th = ((unsigned HOST_WIDE_INT) 1
1989 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1990 tl = -1;
1992 else
1994 th = 0;
1995 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1997 real_from_integer (&t, VOIDmode, tl, th, 1);
1998 if (REAL_VALUES_LESS (t, x))
2000 xh = th;
2001 xl = tl;
2002 break;
2005 REAL_VALUE_TO_INT (&xl, &xh, x);
2006 break;
2008 default:
2009 gcc_unreachable ();
2011 return immed_double_const (xl, xh, mode);
2014 return NULL_RTX;
2017 /* Subroutine of simplify_binary_operation to simplify a commutative,
2018 associative binary operation CODE with result mode MODE, operating
2019 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2020 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2021 canonicalization is possible. */
2023 static rtx
2024 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
2025 rtx op0, rtx op1)
2027 rtx tem;
2029 /* Linearize the operator to the left. */
2030 if (GET_CODE (op1) == code)
2032 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2033 if (GET_CODE (op0) == code)
2035 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2036 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2039 /* "a op (b op c)" becomes "(b op c) op a". */
2040 if (! swap_commutative_operands_p (op1, op0))
2041 return simplify_gen_binary (code, mode, op1, op0);
2043 tem = op0;
2044 op0 = op1;
2045 op1 = tem;
2048 if (GET_CODE (op0) == code)
2050 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2051 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2053 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2054 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2057 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2058 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2059 if (tem != 0)
2060 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2062 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2063 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2064 if (tem != 0)
2065 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2068 return 0;
2072 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2073 and OP1. Return 0 if no simplification is possible.
2075 Don't use this for relational operations such as EQ or LT.
2076 Use simplify_relational_operation instead. */
2078 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
2079 rtx op0, rtx op1)
2081 rtx trueop0, trueop1;
2082 rtx tem;
2084 /* Relational operations don't work here. We must know the mode
2085 of the operands in order to do the comparison correctly.
2086 Assuming a full word can give incorrect results.
2087 Consider comparing 128 with -128 in QImode. */
2088 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2089 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2091 /* Make sure the constant is second. */
2092 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2093 && swap_commutative_operands_p (op0, op1))
2095 tem = op0, op0 = op1, op1 = tem;
2098 trueop0 = avoid_constant_pool_reference (op0);
2099 trueop1 = avoid_constant_pool_reference (op1);
2101 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2102 if (tem)
2103 return tem;
2104 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2107 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2108 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2109 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2110 actual constants. */
2112 static rtx
2113 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
2114 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2116 rtx tem, reversed, opleft, opright;
2117 HOST_WIDE_INT val;
2118 unsigned int width = GET_MODE_PRECISION (mode);
2120 /* Even if we can't compute a constant result,
2121 there are some cases worth simplifying. */
2123 switch (code)
2125 case PLUS:
2126 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2127 when x is NaN, infinite, or finite and nonzero. They aren't
2128 when x is -0 and the rounding mode is not towards -infinity,
2129 since (-0) + 0 is then 0. */
2130 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2131 return op0;
2133 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2134 transformations are safe even for IEEE. */
2135 if (GET_CODE (op0) == NEG)
2136 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2137 else if (GET_CODE (op1) == NEG)
2138 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2140 /* (~a) + 1 -> -a */
2141 if (INTEGRAL_MODE_P (mode)
2142 && GET_CODE (op0) == NOT
2143 && trueop1 == const1_rtx)
2144 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2146 /* Handle both-operands-constant cases. We can only add
2147 CONST_INTs to constants since the sum of relocatable symbols
2148 can't be handled by most assemblers. Don't add CONST_INT
2149 to CONST_INT since overflow won't be computed properly if wider
2150 than HOST_BITS_PER_WIDE_INT. */
2152 if ((GET_CODE (op0) == CONST
2153 || GET_CODE (op0) == SYMBOL_REF
2154 || GET_CODE (op0) == LABEL_REF)
2155 && CONST_INT_P (op1))
2156 return plus_constant (mode, op0, INTVAL (op1));
2157 else if ((GET_CODE (op1) == CONST
2158 || GET_CODE (op1) == SYMBOL_REF
2159 || GET_CODE (op1) == LABEL_REF)
2160 && CONST_INT_P (op0))
2161 return plus_constant (mode, op1, INTVAL (op0));
2163 /* See if this is something like X * C - X or vice versa or
2164 if the multiplication is written as a shift. If so, we can
2165 distribute and make a new multiply, shift, or maybe just
2166 have X (if C is 2 in the example above). But don't make
2167 something more expensive than we had before. */
2169 if (SCALAR_INT_MODE_P (mode))
2171 double_int coeff0, coeff1;
2172 rtx lhs = op0, rhs = op1;
2174 coeff0 = double_int_one;
2175 coeff1 = double_int_one;
2177 if (GET_CODE (lhs) == NEG)
2179 coeff0 = double_int_minus_one;
2180 lhs = XEXP (lhs, 0);
2182 else if (GET_CODE (lhs) == MULT
2183 && CONST_INT_P (XEXP (lhs, 1)))
2185 coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1)));
2186 lhs = XEXP (lhs, 0);
2188 else if (GET_CODE (lhs) == ASHIFT
2189 && CONST_INT_P (XEXP (lhs, 1))
2190 && INTVAL (XEXP (lhs, 1)) >= 0
2191 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2193 coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1)));
2194 lhs = XEXP (lhs, 0);
2197 if (GET_CODE (rhs) == NEG)
2199 coeff1 = double_int_minus_one;
2200 rhs = XEXP (rhs, 0);
2202 else if (GET_CODE (rhs) == MULT
2203 && CONST_INT_P (XEXP (rhs, 1)))
2205 coeff1 = double_int::from_shwi (INTVAL (XEXP (rhs, 1)));
2206 rhs = XEXP (rhs, 0);
2208 else if (GET_CODE (rhs) == ASHIFT
2209 && CONST_INT_P (XEXP (rhs, 1))
2210 && INTVAL (XEXP (rhs, 1)) >= 0
2211 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2213 coeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1)));
2214 rhs = XEXP (rhs, 0);
2217 if (rtx_equal_p (lhs, rhs))
2219 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2220 rtx coeff;
2221 double_int val;
2222 bool speed = optimize_function_for_speed_p (cfun);
2224 val = coeff0 + coeff1;
2225 coeff = immed_double_int_const (val, mode);
2227 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2228 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2229 ? tem : 0;
2233 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2234 if (CONST_SCALAR_INT_P (op1)
2235 && GET_CODE (op0) == XOR
2236 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2237 && mode_signbit_p (mode, op1))
2238 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2239 simplify_gen_binary (XOR, mode, op1,
2240 XEXP (op0, 1)));
2242 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2243 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2244 && GET_CODE (op0) == MULT
2245 && GET_CODE (XEXP (op0, 0)) == NEG)
2247 rtx in1, in2;
2249 in1 = XEXP (XEXP (op0, 0), 0);
2250 in2 = XEXP (op0, 1);
2251 return simplify_gen_binary (MINUS, mode, op1,
2252 simplify_gen_binary (MULT, mode,
2253 in1, in2));
2256 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2257 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2258 is 1. */
2259 if (COMPARISON_P (op0)
2260 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2261 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2262 && (reversed = reversed_comparison (op0, mode)))
2263 return
2264 simplify_gen_unary (NEG, mode, reversed, mode);
2266 /* If one of the operands is a PLUS or a MINUS, see if we can
2267 simplify this by the associative law.
2268 Don't use the associative law for floating point.
2269 The inaccuracy makes it nonassociative,
2270 and subtle programs can break if operations are associated. */
2272 if (INTEGRAL_MODE_P (mode)
2273 && (plus_minus_operand_p (op0)
2274 || plus_minus_operand_p (op1))
2275 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2276 return tem;
2278 /* Reassociate floating point addition only when the user
2279 specifies associative math operations. */
2280 if (FLOAT_MODE_P (mode)
2281 && flag_associative_math)
2283 tem = simplify_associative_operation (code, mode, op0, op1);
2284 if (tem)
2285 return tem;
2287 break;
2289 case COMPARE:
2290 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2291 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2292 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2293 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2295 rtx xop00 = XEXP (op0, 0);
2296 rtx xop10 = XEXP (op1, 0);
2298 #ifdef HAVE_cc0
2299 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2300 #else
2301 if (REG_P (xop00) && REG_P (xop10)
2302 && GET_MODE (xop00) == GET_MODE (xop10)
2303 && REGNO (xop00) == REGNO (xop10)
2304 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2305 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2306 #endif
2307 return xop00;
2309 break;
2311 case MINUS:
2312 /* We can't assume x-x is 0 even with non-IEEE floating point,
2313 but since it is zero except in very strange circumstances, we
2314 will treat it as zero with -ffinite-math-only. */
2315 if (rtx_equal_p (trueop0, trueop1)
2316 && ! side_effects_p (op0)
2317 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2318 return CONST0_RTX (mode);
2320 /* Change subtraction from zero into negation. (0 - x) is the
2321 same as -x when x is NaN, infinite, or finite and nonzero.
2322 But if the mode has signed zeros, and does not round towards
2323 -infinity, then 0 - 0 is 0, not -0. */
2324 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2325 return simplify_gen_unary (NEG, mode, op1, mode);
2327 /* (-1 - a) is ~a. */
2328 if (trueop0 == constm1_rtx)
2329 return simplify_gen_unary (NOT, mode, op1, mode);
2331 /* Subtracting 0 has no effect unless the mode has signed zeros
2332 and supports rounding towards -infinity. In such a case,
2333 0 - 0 is -0. */
2334 if (!(HONOR_SIGNED_ZEROS (mode)
2335 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2336 && trueop1 == CONST0_RTX (mode))
2337 return op0;
2339 /* See if this is something like X * C - X or vice versa or
2340 if the multiplication is written as a shift. If so, we can
2341 distribute and make a new multiply, shift, or maybe just
2342 have X (if C is 2 in the example above). But don't make
2343 something more expensive than we had before. */
2345 if (SCALAR_INT_MODE_P (mode))
2347 double_int coeff0, negcoeff1;
2348 rtx lhs = op0, rhs = op1;
2350 coeff0 = double_int_one;
2351 negcoeff1 = double_int_minus_one;
2353 if (GET_CODE (lhs) == NEG)
2355 coeff0 = double_int_minus_one;
2356 lhs = XEXP (lhs, 0);
2358 else if (GET_CODE (lhs) == MULT
2359 && CONST_INT_P (XEXP (lhs, 1)))
2361 coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1)));
2362 lhs = XEXP (lhs, 0);
2364 else if (GET_CODE (lhs) == ASHIFT
2365 && CONST_INT_P (XEXP (lhs, 1))
2366 && INTVAL (XEXP (lhs, 1)) >= 0
2367 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2369 coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1)));
2370 lhs = XEXP (lhs, 0);
2373 if (GET_CODE (rhs) == NEG)
2375 negcoeff1 = double_int_one;
2376 rhs = XEXP (rhs, 0);
2378 else if (GET_CODE (rhs) == MULT
2379 && CONST_INT_P (XEXP (rhs, 1)))
2381 negcoeff1 = double_int::from_shwi (-INTVAL (XEXP (rhs, 1)));
2382 rhs = XEXP (rhs, 0);
2384 else if (GET_CODE (rhs) == ASHIFT
2385 && CONST_INT_P (XEXP (rhs, 1))
2386 && INTVAL (XEXP (rhs, 1)) >= 0
2387 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2389 negcoeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1)));
2390 negcoeff1 = -negcoeff1;
2391 rhs = XEXP (rhs, 0);
2394 if (rtx_equal_p (lhs, rhs))
2396 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2397 rtx coeff;
2398 double_int val;
2399 bool speed = optimize_function_for_speed_p (cfun);
2401 val = coeff0 + negcoeff1;
2402 coeff = immed_double_int_const (val, mode);
2404 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2405 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2406 ? tem : 0;
2410 /* (a - (-b)) -> (a + b). True even for IEEE. */
2411 if (GET_CODE (op1) == NEG)
2412 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2414 /* (-x - c) may be simplified as (-c - x). */
2415 if (GET_CODE (op0) == NEG
2416 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2418 tem = simplify_unary_operation (NEG, mode, op1, mode);
2419 if (tem)
2420 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2423 /* Don't let a relocatable value get a negative coeff. */
2424 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2425 return simplify_gen_binary (PLUS, mode,
2426 op0,
2427 neg_const_int (mode, op1));
2429 /* (x - (x & y)) -> (x & ~y) */
2430 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2432 if (rtx_equal_p (op0, XEXP (op1, 0)))
2434 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2435 GET_MODE (XEXP (op1, 1)));
2436 return simplify_gen_binary (AND, mode, op0, tem);
2438 if (rtx_equal_p (op0, XEXP (op1, 1)))
2440 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2441 GET_MODE (XEXP (op1, 0)));
2442 return simplify_gen_binary (AND, mode, op0, tem);
2446 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2447 by reversing the comparison code if valid. */
2448 if (STORE_FLAG_VALUE == 1
2449 && trueop0 == const1_rtx
2450 && COMPARISON_P (op1)
2451 && (reversed = reversed_comparison (op1, mode)))
2452 return reversed;
2454 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2455 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2456 && GET_CODE (op1) == MULT
2457 && GET_CODE (XEXP (op1, 0)) == NEG)
2459 rtx in1, in2;
2461 in1 = XEXP (XEXP (op1, 0), 0);
2462 in2 = XEXP (op1, 1);
2463 return simplify_gen_binary (PLUS, mode,
2464 simplify_gen_binary (MULT, mode,
2465 in1, in2),
2466 op0);
2469 /* Canonicalize (minus (neg A) (mult B C)) to
2470 (minus (mult (neg B) C) A). */
2471 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2472 && GET_CODE (op1) == MULT
2473 && GET_CODE (op0) == NEG)
2475 rtx in1, in2;
2477 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2478 in2 = XEXP (op1, 1);
2479 return simplify_gen_binary (MINUS, mode,
2480 simplify_gen_binary (MULT, mode,
2481 in1, in2),
2482 XEXP (op0, 0));
2485 /* If one of the operands is a PLUS or a MINUS, see if we can
2486 simplify this by the associative law. This will, for example,
2487 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2488 Don't use the associative law for floating point.
2489 The inaccuracy makes it nonassociative,
2490 and subtle programs can break if operations are associated. */
2492 if (INTEGRAL_MODE_P (mode)
2493 && (plus_minus_operand_p (op0)
2494 || plus_minus_operand_p (op1))
2495 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2496 return tem;
2497 break;
2499 case MULT:
2500 if (trueop1 == constm1_rtx)
2501 return simplify_gen_unary (NEG, mode, op0, mode);
2503 if (GET_CODE (op0) == NEG)
2505 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2506 /* If op1 is a MULT as well and simplify_unary_operation
2507 just moved the NEG to the second operand, simplify_gen_binary
2508 below could through simplify_associative_operation move
2509 the NEG around again and recurse endlessly. */
2510 if (temp
2511 && GET_CODE (op1) == MULT
2512 && GET_CODE (temp) == MULT
2513 && XEXP (op1, 0) == XEXP (temp, 0)
2514 && GET_CODE (XEXP (temp, 1)) == NEG
2515 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2516 temp = NULL_RTX;
2517 if (temp)
2518 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2520 if (GET_CODE (op1) == NEG)
2522 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2523 /* If op0 is a MULT as well and simplify_unary_operation
2524 just moved the NEG to the second operand, simplify_gen_binary
2525 below could through simplify_associative_operation move
2526 the NEG around again and recurse endlessly. */
2527 if (temp
2528 && GET_CODE (op0) == MULT
2529 && GET_CODE (temp) == MULT
2530 && XEXP (op0, 0) == XEXP (temp, 0)
2531 && GET_CODE (XEXP (temp, 1)) == NEG
2532 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2533 temp = NULL_RTX;
2534 if (temp)
2535 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2538 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2539 x is NaN, since x * 0 is then also NaN. Nor is it valid
2540 when the mode has signed zeros, since multiplying a negative
2541 number by 0 will give -0, not 0. */
2542 if (!HONOR_NANS (mode)
2543 && !HONOR_SIGNED_ZEROS (mode)
2544 && trueop1 == CONST0_RTX (mode)
2545 && ! side_effects_p (op0))
2546 return op1;
2548 /* In IEEE floating point, x*1 is not equivalent to x for
2549 signalling NaNs. */
2550 if (!HONOR_SNANS (mode)
2551 && trueop1 == CONST1_RTX (mode))
2552 return op0;
2554 /* Convert multiply by constant power of two into shift unless
2555 we are still generating RTL. This test is a kludge. */
2556 if (CONST_INT_P (trueop1)
2557 && (val = exact_log2 (UINTVAL (trueop1))) >= 0
2558 /* If the mode is larger than the host word size, and the
2559 uppermost bit is set, then this isn't a power of two due
2560 to implicit sign extension. */
2561 && (width <= HOST_BITS_PER_WIDE_INT
2562 || val != HOST_BITS_PER_WIDE_INT - 1))
2563 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2565 /* Likewise for multipliers wider than a word. */
2566 if (CONST_DOUBLE_AS_INT_P (trueop1)
2567 && GET_MODE (op0) == mode
2568 && CONST_DOUBLE_LOW (trueop1) == 0
2569 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0
2570 && (val < HOST_BITS_PER_DOUBLE_INT - 1
2571 || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_DOUBLE_INT))
2572 return simplify_gen_binary (ASHIFT, mode, op0,
2573 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2575 /* x*2 is x+x and x*(-1) is -x */
2576 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2577 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2578 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2579 && GET_MODE (op0) == mode)
2581 REAL_VALUE_TYPE d;
2582 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2584 if (REAL_VALUES_EQUAL (d, dconst2))
2585 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2587 if (!HONOR_SNANS (mode)
2588 && REAL_VALUES_EQUAL (d, dconstm1))
2589 return simplify_gen_unary (NEG, mode, op0, mode);
2592 /* Optimize -x * -x as x * x. */
2593 if (FLOAT_MODE_P (mode)
2594 && GET_CODE (op0) == NEG
2595 && GET_CODE (op1) == NEG
2596 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2597 && !side_effects_p (XEXP (op0, 0)))
2598 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2600 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2601 if (SCALAR_FLOAT_MODE_P (mode)
2602 && GET_CODE (op0) == ABS
2603 && GET_CODE (op1) == ABS
2604 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2605 && !side_effects_p (XEXP (op0, 0)))
2606 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2608 /* Reassociate multiplication, but for floating point MULTs
2609 only when the user specifies unsafe math optimizations. */
2610 if (! FLOAT_MODE_P (mode)
2611 || flag_unsafe_math_optimizations)
2613 tem = simplify_associative_operation (code, mode, op0, op1);
2614 if (tem)
2615 return tem;
2617 break;
2619 case IOR:
2620 if (trueop1 == CONST0_RTX (mode))
2621 return op0;
2622 if (INTEGRAL_MODE_P (mode)
2623 && trueop1 == CONSTM1_RTX (mode)
2624 && !side_effects_p (op0))
2625 return op1;
2626 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2627 return op0;
2628 /* A | (~A) -> -1 */
2629 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2630 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2631 && ! side_effects_p (op0)
2632 && SCALAR_INT_MODE_P (mode))
2633 return constm1_rtx;
2635 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2636 if (CONST_INT_P (op1)
2637 && HWI_COMPUTABLE_MODE_P (mode)
2638 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2639 && !side_effects_p (op0))
2640 return op1;
2642 /* Canonicalize (X & C1) | C2. */
2643 if (GET_CODE (op0) == AND
2644 && CONST_INT_P (trueop1)
2645 && CONST_INT_P (XEXP (op0, 1)))
2647 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2648 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2649 HOST_WIDE_INT c2 = INTVAL (trueop1);
2651 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2652 if ((c1 & c2) == c1
2653 && !side_effects_p (XEXP (op0, 0)))
2654 return trueop1;
2656 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2657 if (((c1|c2) & mask) == mask)
2658 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2660 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2661 if (((c1 & ~c2) & mask) != (c1 & mask))
2663 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2664 gen_int_mode (c1 & ~c2, mode));
2665 return simplify_gen_binary (IOR, mode, tem, op1);
2669 /* Convert (A & B) | A to A. */
2670 if (GET_CODE (op0) == AND
2671 && (rtx_equal_p (XEXP (op0, 0), op1)
2672 || rtx_equal_p (XEXP (op0, 1), op1))
2673 && ! side_effects_p (XEXP (op0, 0))
2674 && ! side_effects_p (XEXP (op0, 1)))
2675 return op1;
2677 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2678 mode size to (rotate A CX). */
2680 if (GET_CODE (op1) == ASHIFT
2681 || GET_CODE (op1) == SUBREG)
2683 opleft = op1;
2684 opright = op0;
2686 else
2688 opright = op1;
2689 opleft = op0;
2692 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2693 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2694 && CONST_INT_P (XEXP (opleft, 1))
2695 && CONST_INT_P (XEXP (opright, 1))
2696 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2697 == GET_MODE_PRECISION (mode)))
2698 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2700 /* Same, but for ashift that has been "simplified" to a wider mode
2701 by simplify_shift_const. */
2703 if (GET_CODE (opleft) == SUBREG
2704 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2705 && GET_CODE (opright) == LSHIFTRT
2706 && GET_CODE (XEXP (opright, 0)) == SUBREG
2707 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2708 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2709 && (GET_MODE_SIZE (GET_MODE (opleft))
2710 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2711 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2712 SUBREG_REG (XEXP (opright, 0)))
2713 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2714 && CONST_INT_P (XEXP (opright, 1))
2715 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2716 == GET_MODE_PRECISION (mode)))
2717 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2718 XEXP (SUBREG_REG (opleft), 1));
2720 /* If we have (ior (and (X C1) C2)), simplify this by making
2721 C1 as small as possible if C1 actually changes. */
2722 if (CONST_INT_P (op1)
2723 && (HWI_COMPUTABLE_MODE_P (mode)
2724 || INTVAL (op1) > 0)
2725 && GET_CODE (op0) == AND
2726 && CONST_INT_P (XEXP (op0, 1))
2727 && CONST_INT_P (op1)
2728 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2729 return simplify_gen_binary (IOR, mode,
2730 simplify_gen_binary
2731 (AND, mode, XEXP (op0, 0),
2732 GEN_INT (UINTVAL (XEXP (op0, 1))
2733 & ~UINTVAL (op1))),
2734 op1);
2736 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2737 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2738 the PLUS does not affect any of the bits in OP1: then we can do
2739 the IOR as a PLUS and we can associate. This is valid if OP1
2740 can be safely shifted left C bits. */
2741 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2742 && GET_CODE (XEXP (op0, 0)) == PLUS
2743 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2744 && CONST_INT_P (XEXP (op0, 1))
2745 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2747 int count = INTVAL (XEXP (op0, 1));
2748 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2750 if (mask >> count == INTVAL (trueop1)
2751 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2752 return simplify_gen_binary (ASHIFTRT, mode,
2753 plus_constant (mode, XEXP (op0, 0),
2754 mask),
2755 XEXP (op0, 1));
2758 tem = simplify_associative_operation (code, mode, op0, op1);
2759 if (tem)
2760 return tem;
2761 break;
2763 case XOR:
2764 if (trueop1 == CONST0_RTX (mode))
2765 return op0;
2766 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2767 return simplify_gen_unary (NOT, mode, op0, mode);
2768 if (rtx_equal_p (trueop0, trueop1)
2769 && ! side_effects_p (op0)
2770 && GET_MODE_CLASS (mode) != MODE_CC)
2771 return CONST0_RTX (mode);
2773 /* Canonicalize XOR of the most significant bit to PLUS. */
2774 if (CONST_SCALAR_INT_P (op1)
2775 && mode_signbit_p (mode, op1))
2776 return simplify_gen_binary (PLUS, mode, op0, op1);
2777 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2778 if (CONST_SCALAR_INT_P (op1)
2779 && GET_CODE (op0) == PLUS
2780 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2781 && mode_signbit_p (mode, XEXP (op0, 1)))
2782 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2783 simplify_gen_binary (XOR, mode, op1,
2784 XEXP (op0, 1)));
2786 /* If we are XORing two things that have no bits in common,
2787 convert them into an IOR. This helps to detect rotation encoded
2788 using those methods and possibly other simplifications. */
2790 if (HWI_COMPUTABLE_MODE_P (mode)
2791 && (nonzero_bits (op0, mode)
2792 & nonzero_bits (op1, mode)) == 0)
2793 return (simplify_gen_binary (IOR, mode, op0, op1));
2795 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2796 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2797 (NOT y). */
2799 int num_negated = 0;
2801 if (GET_CODE (op0) == NOT)
2802 num_negated++, op0 = XEXP (op0, 0);
2803 if (GET_CODE (op1) == NOT)
2804 num_negated++, op1 = XEXP (op1, 0);
2806 if (num_negated == 2)
2807 return simplify_gen_binary (XOR, mode, op0, op1);
2808 else if (num_negated == 1)
2809 return simplify_gen_unary (NOT, mode,
2810 simplify_gen_binary (XOR, mode, op0, op1),
2811 mode);
2814 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2815 correspond to a machine insn or result in further simplifications
2816 if B is a constant. */
2818 if (GET_CODE (op0) == AND
2819 && rtx_equal_p (XEXP (op0, 1), op1)
2820 && ! side_effects_p (op1))
2821 return simplify_gen_binary (AND, mode,
2822 simplify_gen_unary (NOT, mode,
2823 XEXP (op0, 0), mode),
2824 op1);
2826 else if (GET_CODE (op0) == AND
2827 && rtx_equal_p (XEXP (op0, 0), op1)
2828 && ! side_effects_p (op1))
2829 return simplify_gen_binary (AND, mode,
2830 simplify_gen_unary (NOT, mode,
2831 XEXP (op0, 1), mode),
2832 op1);
2834 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2835 we can transform like this:
2836 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2837 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2838 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2839 Attempt a few simplifications when B and C are both constants. */
2840 if (GET_CODE (op0) == AND
2841 && CONST_INT_P (op1)
2842 && CONST_INT_P (XEXP (op0, 1)))
2844 rtx a = XEXP (op0, 0);
2845 rtx b = XEXP (op0, 1);
2846 rtx c = op1;
2847 HOST_WIDE_INT bval = INTVAL (b);
2848 HOST_WIDE_INT cval = INTVAL (c);
2850 rtx na_c
2851 = simplify_binary_operation (AND, mode,
2852 simplify_gen_unary (NOT, mode, a, mode),
2854 if ((~cval & bval) == 0)
2856 /* Try to simplify ~A&C | ~B&C. */
2857 if (na_c != NULL_RTX)
2858 return simplify_gen_binary (IOR, mode, na_c,
2859 GEN_INT (~bval & cval));
2861 else
2863 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2864 if (na_c == const0_rtx)
2866 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2867 GEN_INT (~cval & bval));
2868 return simplify_gen_binary (IOR, mode, a_nc_b,
2869 GEN_INT (~bval & cval));
2874 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2875 comparison if STORE_FLAG_VALUE is 1. */
2876 if (STORE_FLAG_VALUE == 1
2877 && trueop1 == const1_rtx
2878 && COMPARISON_P (op0)
2879 && (reversed = reversed_comparison (op0, mode)))
2880 return reversed;
2882 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2883 is (lt foo (const_int 0)), so we can perform the above
2884 simplification if STORE_FLAG_VALUE is 1. */
2886 if (STORE_FLAG_VALUE == 1
2887 && trueop1 == const1_rtx
2888 && GET_CODE (op0) == LSHIFTRT
2889 && CONST_INT_P (XEXP (op0, 1))
2890 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2891 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2893 /* (xor (comparison foo bar) (const_int sign-bit))
2894 when STORE_FLAG_VALUE is the sign bit. */
2895 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2896 && trueop1 == const_true_rtx
2897 && COMPARISON_P (op0)
2898 && (reversed = reversed_comparison (op0, mode)))
2899 return reversed;
2901 tem = simplify_associative_operation (code, mode, op0, op1);
2902 if (tem)
2903 return tem;
2904 break;
2906 case AND:
2907 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2908 return trueop1;
2909 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2910 return op0;
2911 if (HWI_COMPUTABLE_MODE_P (mode))
2913 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2914 HOST_WIDE_INT nzop1;
2915 if (CONST_INT_P (trueop1))
2917 HOST_WIDE_INT val1 = INTVAL (trueop1);
2918 /* If we are turning off bits already known off in OP0, we need
2919 not do an AND. */
2920 if ((nzop0 & ~val1) == 0)
2921 return op0;
2923 nzop1 = nonzero_bits (trueop1, mode);
2924 /* If we are clearing all the nonzero bits, the result is zero. */
2925 if ((nzop1 & nzop0) == 0
2926 && !side_effects_p (op0) && !side_effects_p (op1))
2927 return CONST0_RTX (mode);
2929 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2930 && GET_MODE_CLASS (mode) != MODE_CC)
2931 return op0;
2932 /* A & (~A) -> 0 */
2933 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2934 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2935 && ! side_effects_p (op0)
2936 && GET_MODE_CLASS (mode) != MODE_CC)
2937 return CONST0_RTX (mode);
2939 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2940 there are no nonzero bits of C outside of X's mode. */
2941 if ((GET_CODE (op0) == SIGN_EXTEND
2942 || GET_CODE (op0) == ZERO_EXTEND)
2943 && CONST_INT_P (trueop1)
2944 && HWI_COMPUTABLE_MODE_P (mode)
2945 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2946 & UINTVAL (trueop1)) == 0)
2948 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2949 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2950 gen_int_mode (INTVAL (trueop1),
2951 imode));
2952 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2955 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2956 we might be able to further simplify the AND with X and potentially
2957 remove the truncation altogether. */
2958 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2960 rtx x = XEXP (op0, 0);
2961 enum machine_mode xmode = GET_MODE (x);
2962 tem = simplify_gen_binary (AND, xmode, x,
2963 gen_int_mode (INTVAL (trueop1), xmode));
2964 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2967 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2968 if (GET_CODE (op0) == IOR
2969 && CONST_INT_P (trueop1)
2970 && CONST_INT_P (XEXP (op0, 1)))
2972 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2973 return simplify_gen_binary (IOR, mode,
2974 simplify_gen_binary (AND, mode,
2975 XEXP (op0, 0), op1),
2976 gen_int_mode (tmp, mode));
2979 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2980 insn (and may simplify more). */
2981 if (GET_CODE (op0) == XOR
2982 && rtx_equal_p (XEXP (op0, 0), op1)
2983 && ! side_effects_p (op1))
2984 return simplify_gen_binary (AND, mode,
2985 simplify_gen_unary (NOT, mode,
2986 XEXP (op0, 1), mode),
2987 op1);
2989 if (GET_CODE (op0) == XOR
2990 && rtx_equal_p (XEXP (op0, 1), op1)
2991 && ! side_effects_p (op1))
2992 return simplify_gen_binary (AND, mode,
2993 simplify_gen_unary (NOT, mode,
2994 XEXP (op0, 0), mode),
2995 op1);
2997 /* Similarly for (~(A ^ B)) & A. */
2998 if (GET_CODE (op0) == NOT
2999 && GET_CODE (XEXP (op0, 0)) == XOR
3000 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3001 && ! side_effects_p (op1))
3002 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3004 if (GET_CODE (op0) == NOT
3005 && GET_CODE (XEXP (op0, 0)) == XOR
3006 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3007 && ! side_effects_p (op1))
3008 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3010 /* Convert (A | B) & A to A. */
3011 if (GET_CODE (op0) == IOR
3012 && (rtx_equal_p (XEXP (op0, 0), op1)
3013 || rtx_equal_p (XEXP (op0, 1), op1))
3014 && ! side_effects_p (XEXP (op0, 0))
3015 && ! side_effects_p (XEXP (op0, 1)))
3016 return op1;
3018 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3019 ((A & N) + B) & M -> (A + B) & M
3020 Similarly if (N & M) == 0,
3021 ((A | N) + B) & M -> (A + B) & M
3022 and for - instead of + and/or ^ instead of |.
3023 Also, if (N & M) == 0, then
3024 (A +- N) & M -> A & M. */
3025 if (CONST_INT_P (trueop1)
3026 && HWI_COMPUTABLE_MODE_P (mode)
3027 && ~UINTVAL (trueop1)
3028 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3029 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3031 rtx pmop[2];
3032 int which;
3034 pmop[0] = XEXP (op0, 0);
3035 pmop[1] = XEXP (op0, 1);
3037 if (CONST_INT_P (pmop[1])
3038 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3039 return simplify_gen_binary (AND, mode, pmop[0], op1);
3041 for (which = 0; which < 2; which++)
3043 tem = pmop[which];
3044 switch (GET_CODE (tem))
3046 case AND:
3047 if (CONST_INT_P (XEXP (tem, 1))
3048 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3049 == UINTVAL (trueop1))
3050 pmop[which] = XEXP (tem, 0);
3051 break;
3052 case IOR:
3053 case XOR:
3054 if (CONST_INT_P (XEXP (tem, 1))
3055 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3056 pmop[which] = XEXP (tem, 0);
3057 break;
3058 default:
3059 break;
3063 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3065 tem = simplify_gen_binary (GET_CODE (op0), mode,
3066 pmop[0], pmop[1]);
3067 return simplify_gen_binary (code, mode, tem, op1);
3071 /* (and X (ior (not X) Y) -> (and X Y) */
3072 if (GET_CODE (op1) == IOR
3073 && GET_CODE (XEXP (op1, 0)) == NOT
3074 && op0 == XEXP (XEXP (op1, 0), 0))
3075 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3077 /* (and (ior (not X) Y) X) -> (and X Y) */
3078 if (GET_CODE (op0) == IOR
3079 && GET_CODE (XEXP (op0, 0)) == NOT
3080 && op1 == XEXP (XEXP (op0, 0), 0))
3081 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3083 tem = simplify_associative_operation (code, mode, op0, op1);
3084 if (tem)
3085 return tem;
3086 break;
3088 case UDIV:
3089 /* 0/x is 0 (or x&0 if x has side-effects). */
3090 if (trueop0 == CONST0_RTX (mode))
3092 if (side_effects_p (op1))
3093 return simplify_gen_binary (AND, mode, op1, trueop0);
3094 return trueop0;
3096 /* x/1 is x. */
3097 if (trueop1 == CONST1_RTX (mode))
3098 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
3099 /* Convert divide by power of two into shift. */
3100 if (CONST_INT_P (trueop1)
3101 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3102 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3103 break;
3105 case DIV:
3106 /* Handle floating point and integers separately. */
3107 if (SCALAR_FLOAT_MODE_P (mode))
3109 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3110 safe for modes with NaNs, since 0.0 / 0.0 will then be
3111 NaN rather than 0.0. Nor is it safe for modes with signed
3112 zeros, since dividing 0 by a negative number gives -0.0 */
3113 if (trueop0 == CONST0_RTX (mode)
3114 && !HONOR_NANS (mode)
3115 && !HONOR_SIGNED_ZEROS (mode)
3116 && ! side_effects_p (op1))
3117 return op0;
3118 /* x/1.0 is x. */
3119 if (trueop1 == CONST1_RTX (mode)
3120 && !HONOR_SNANS (mode))
3121 return op0;
3123 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3124 && trueop1 != CONST0_RTX (mode))
3126 REAL_VALUE_TYPE d;
3127 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
3129 /* x/-1.0 is -x. */
3130 if (REAL_VALUES_EQUAL (d, dconstm1)
3131 && !HONOR_SNANS (mode))
3132 return simplify_gen_unary (NEG, mode, op0, mode);
3134 /* Change FP division by a constant into multiplication.
3135 Only do this with -freciprocal-math. */
3136 if (flag_reciprocal_math
3137 && !REAL_VALUES_EQUAL (d, dconst0))
3139 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
3140 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3141 return simplify_gen_binary (MULT, mode, op0, tem);
3145 else if (SCALAR_INT_MODE_P (mode))
3147 /* 0/x is 0 (or x&0 if x has side-effects). */
3148 if (trueop0 == CONST0_RTX (mode)
3149 && !cfun->can_throw_non_call_exceptions)
3151 if (side_effects_p (op1))
3152 return simplify_gen_binary (AND, mode, op1, trueop0);
3153 return trueop0;
3155 /* x/1 is x. */
3156 if (trueop1 == CONST1_RTX (mode))
3157 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
3158 /* x/-1 is -x. */
3159 if (trueop1 == constm1_rtx)
3161 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3162 return simplify_gen_unary (NEG, mode, x, mode);
3165 break;
3167 case UMOD:
3168 /* 0%x is 0 (or x&0 if x has side-effects). */
3169 if (trueop0 == CONST0_RTX (mode))
3171 if (side_effects_p (op1))
3172 return simplify_gen_binary (AND, mode, op1, trueop0);
3173 return trueop0;
3175 /* x%1 is 0 (of x&0 if x has side-effects). */
3176 if (trueop1 == CONST1_RTX (mode))
3178 if (side_effects_p (op0))
3179 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3180 return CONST0_RTX (mode);
3182 /* Implement modulus by power of two as AND. */
3183 if (CONST_INT_P (trueop1)
3184 && exact_log2 (UINTVAL (trueop1)) > 0)
3185 return simplify_gen_binary (AND, mode, op0,
3186 GEN_INT (INTVAL (op1) - 1));
3187 break;
3189 case MOD:
3190 /* 0%x is 0 (or x&0 if x has side-effects). */
3191 if (trueop0 == CONST0_RTX (mode))
3193 if (side_effects_p (op1))
3194 return simplify_gen_binary (AND, mode, op1, trueop0);
3195 return trueop0;
3197 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3198 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3200 if (side_effects_p (op0))
3201 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3202 return CONST0_RTX (mode);
3204 break;
3206 case ROTATERT:
3207 case ROTATE:
3208 case ASHIFTRT:
3209 if (trueop1 == CONST0_RTX (mode))
3210 return op0;
3211 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3212 return op0;
3213 /* Rotating ~0 always results in ~0. */
3214 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3215 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3216 && ! side_effects_p (op1))
3217 return op0;
3218 canonicalize_shift:
3219 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3221 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
3222 if (val != INTVAL (op1))
3223 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3225 break;
3227 case ASHIFT:
3228 case SS_ASHIFT:
3229 case US_ASHIFT:
3230 if (trueop1 == CONST0_RTX (mode))
3231 return op0;
3232 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3233 return op0;
3234 goto canonicalize_shift;
3236 case LSHIFTRT:
3237 if (trueop1 == CONST0_RTX (mode))
3238 return op0;
3239 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3240 return op0;
3241 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3242 if (GET_CODE (op0) == CLZ
3243 && CONST_INT_P (trueop1)
3244 && STORE_FLAG_VALUE == 1
3245 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3247 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
3248 unsigned HOST_WIDE_INT zero_val = 0;
3250 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3251 && zero_val == GET_MODE_PRECISION (imode)
3252 && INTVAL (trueop1) == exact_log2 (zero_val))
3253 return simplify_gen_relational (EQ, mode, imode,
3254 XEXP (op0, 0), const0_rtx);
3256 goto canonicalize_shift;
3258 case SMIN:
3259 if (width <= HOST_BITS_PER_WIDE_INT
3260 && mode_signbit_p (mode, trueop1)
3261 && ! side_effects_p (op0))
3262 return op1;
3263 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3264 return op0;
3265 tem = simplify_associative_operation (code, mode, op0, op1);
3266 if (tem)
3267 return tem;
3268 break;
3270 case SMAX:
3271 if (width <= HOST_BITS_PER_WIDE_INT
3272 && CONST_INT_P (trueop1)
3273 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3274 && ! side_effects_p (op0))
3275 return op1;
3276 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3277 return op0;
3278 tem = simplify_associative_operation (code, mode, op0, op1);
3279 if (tem)
3280 return tem;
3281 break;
3283 case UMIN:
3284 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3285 return op1;
3286 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3287 return op0;
3288 tem = simplify_associative_operation (code, mode, op0, op1);
3289 if (tem)
3290 return tem;
3291 break;
3293 case UMAX:
3294 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3295 return op1;
3296 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3297 return op0;
3298 tem = simplify_associative_operation (code, mode, op0, op1);
3299 if (tem)
3300 return tem;
3301 break;
3303 case SS_PLUS:
3304 case US_PLUS:
3305 case SS_MINUS:
3306 case US_MINUS:
3307 case SS_MULT:
3308 case US_MULT:
3309 case SS_DIV:
3310 case US_DIV:
3311 /* ??? There are simplifications that can be done. */
3312 return 0;
3314 case VEC_SELECT:
3315 if (!VECTOR_MODE_P (mode))
3317 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3318 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3319 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3320 gcc_assert (XVECLEN (trueop1, 0) == 1);
3321 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3323 if (GET_CODE (trueop0) == CONST_VECTOR)
3324 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3325 (trueop1, 0, 0)));
3327 /* Extract a scalar element from a nested VEC_SELECT expression
3328 (with optional nested VEC_CONCAT expression). Some targets
3329 (i386) extract scalar element from a vector using chain of
3330 nested VEC_SELECT expressions. When input operand is a memory
3331 operand, this operation can be simplified to a simple scalar
3332 load from an offseted memory address. */
3333 if (GET_CODE (trueop0) == VEC_SELECT)
3335 rtx op0 = XEXP (trueop0, 0);
3336 rtx op1 = XEXP (trueop0, 1);
3338 enum machine_mode opmode = GET_MODE (op0);
3339 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3340 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3342 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3343 int elem;
3345 rtvec vec;
3346 rtx tmp_op, tmp;
3348 gcc_assert (GET_CODE (op1) == PARALLEL);
3349 gcc_assert (i < n_elts);
3351 /* Select element, pointed by nested selector. */
3352 elem = INTVAL (XVECEXP (op1, 0, i));
3354 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3355 if (GET_CODE (op0) == VEC_CONCAT)
3357 rtx op00 = XEXP (op0, 0);
3358 rtx op01 = XEXP (op0, 1);
3360 enum machine_mode mode00, mode01;
3361 int n_elts00, n_elts01;
3363 mode00 = GET_MODE (op00);
3364 mode01 = GET_MODE (op01);
3366 /* Find out number of elements of each operand. */
3367 if (VECTOR_MODE_P (mode00))
3369 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3370 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3372 else
3373 n_elts00 = 1;
3375 if (VECTOR_MODE_P (mode01))
3377 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3378 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3380 else
3381 n_elts01 = 1;
3383 gcc_assert (n_elts == n_elts00 + n_elts01);
3385 /* Select correct operand of VEC_CONCAT
3386 and adjust selector. */
3387 if (elem < n_elts01)
3388 tmp_op = op00;
3389 else
3391 tmp_op = op01;
3392 elem -= n_elts00;
3395 else
3396 tmp_op = op0;
3398 vec = rtvec_alloc (1);
3399 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3401 tmp = gen_rtx_fmt_ee (code, mode,
3402 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3403 return tmp;
3405 if (GET_CODE (trueop0) == VEC_DUPLICATE
3406 && GET_MODE (XEXP (trueop0, 0)) == mode)
3407 return XEXP (trueop0, 0);
3409 else
3411 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3412 gcc_assert (GET_MODE_INNER (mode)
3413 == GET_MODE_INNER (GET_MODE (trueop0)));
3414 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3416 if (GET_CODE (trueop0) == CONST_VECTOR)
3418 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3419 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3420 rtvec v = rtvec_alloc (n_elts);
3421 unsigned int i;
3423 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3424 for (i = 0; i < n_elts; i++)
3426 rtx x = XVECEXP (trueop1, 0, i);
3428 gcc_assert (CONST_INT_P (x));
3429 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3430 INTVAL (x));
3433 return gen_rtx_CONST_VECTOR (mode, v);
3436 /* Recognize the identity. */
3437 if (GET_MODE (trueop0) == mode)
3439 bool maybe_ident = true;
3440 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3442 rtx j = XVECEXP (trueop1, 0, i);
3443 if (!CONST_INT_P (j) || INTVAL (j) != i)
3445 maybe_ident = false;
3446 break;
3449 if (maybe_ident)
3450 return trueop0;
3453 /* If we build {a,b} then permute it, build the result directly. */
3454 if (XVECLEN (trueop1, 0) == 2
3455 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3456 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3457 && GET_CODE (trueop0) == VEC_CONCAT
3458 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3459 && GET_MODE (XEXP (trueop0, 0)) == mode
3460 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3461 && GET_MODE (XEXP (trueop0, 1)) == mode)
3463 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3464 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3465 rtx subop0, subop1;
3467 gcc_assert (i0 < 4 && i1 < 4);
3468 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3469 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3471 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3474 if (XVECLEN (trueop1, 0) == 2
3475 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3476 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3477 && GET_CODE (trueop0) == VEC_CONCAT
3478 && GET_MODE (trueop0) == mode)
3480 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3481 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3482 rtx subop0, subop1;
3484 gcc_assert (i0 < 2 && i1 < 2);
3485 subop0 = XEXP (trueop0, i0);
3486 subop1 = XEXP (trueop0, i1);
3488 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3492 if (XVECLEN (trueop1, 0) == 1
3493 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3494 && GET_CODE (trueop0) == VEC_CONCAT)
3496 rtx vec = trueop0;
3497 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3499 /* Try to find the element in the VEC_CONCAT. */
3500 while (GET_MODE (vec) != mode
3501 && GET_CODE (vec) == VEC_CONCAT)
3503 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3504 if (offset < vec_size)
3505 vec = XEXP (vec, 0);
3506 else
3508 offset -= vec_size;
3509 vec = XEXP (vec, 1);
3511 vec = avoid_constant_pool_reference (vec);
3514 if (GET_MODE (vec) == mode)
3515 return vec;
3518 return 0;
3519 case VEC_CONCAT:
3521 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3522 ? GET_MODE (trueop0)
3523 : GET_MODE_INNER (mode));
3524 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3525 ? GET_MODE (trueop1)
3526 : GET_MODE_INNER (mode));
3528 gcc_assert (VECTOR_MODE_P (mode));
3529 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3530 == GET_MODE_SIZE (mode));
3532 if (VECTOR_MODE_P (op0_mode))
3533 gcc_assert (GET_MODE_INNER (mode)
3534 == GET_MODE_INNER (op0_mode));
3535 else
3536 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3538 if (VECTOR_MODE_P (op1_mode))
3539 gcc_assert (GET_MODE_INNER (mode)
3540 == GET_MODE_INNER (op1_mode));
3541 else
3542 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3544 if ((GET_CODE (trueop0) == CONST_VECTOR
3545 || CONST_SCALAR_INT_P (trueop0)
3546 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3547 && (GET_CODE (trueop1) == CONST_VECTOR
3548 || CONST_SCALAR_INT_P (trueop1)
3549 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3551 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3552 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3553 rtvec v = rtvec_alloc (n_elts);
3554 unsigned int i;
3555 unsigned in_n_elts = 1;
3557 if (VECTOR_MODE_P (op0_mode))
3558 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3559 for (i = 0; i < n_elts; i++)
3561 if (i < in_n_elts)
3563 if (!VECTOR_MODE_P (op0_mode))
3564 RTVEC_ELT (v, i) = trueop0;
3565 else
3566 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3568 else
3570 if (!VECTOR_MODE_P (op1_mode))
3571 RTVEC_ELT (v, i) = trueop1;
3572 else
3573 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3574 i - in_n_elts);
3578 return gen_rtx_CONST_VECTOR (mode, v);
3581 /* Try to merge VEC_SELECTs from the same vector into a single one. */
3582 if (GET_CODE (trueop0) == VEC_SELECT
3583 && GET_CODE (trueop1) == VEC_SELECT
3584 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0)))
3586 rtx par0 = XEXP (trueop0, 1);
3587 rtx par1 = XEXP (trueop1, 1);
3588 int len0 = XVECLEN (par0, 0);
3589 int len1 = XVECLEN (par1, 0);
3590 rtvec vec = rtvec_alloc (len0 + len1);
3591 for (int i = 0; i < len0; i++)
3592 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3593 for (int i = 0; i < len1; i++)
3594 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3595 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3596 gen_rtx_PARALLEL (VOIDmode, vec));
3599 return 0;
3601 default:
3602 gcc_unreachable ();
3605 return 0;
3609 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3610 rtx op0, rtx op1)
3612 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3613 HOST_WIDE_INT val;
3614 unsigned int width = GET_MODE_PRECISION (mode);
3616 if (VECTOR_MODE_P (mode)
3617 && code != VEC_CONCAT
3618 && GET_CODE (op0) == CONST_VECTOR
3619 && GET_CODE (op1) == CONST_VECTOR)
3621 unsigned n_elts = GET_MODE_NUNITS (mode);
3622 enum machine_mode op0mode = GET_MODE (op0);
3623 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3624 enum machine_mode op1mode = GET_MODE (op1);
3625 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3626 rtvec v = rtvec_alloc (n_elts);
3627 unsigned int i;
3629 gcc_assert (op0_n_elts == n_elts);
3630 gcc_assert (op1_n_elts == n_elts);
3631 for (i = 0; i < n_elts; i++)
3633 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3634 CONST_VECTOR_ELT (op0, i),
3635 CONST_VECTOR_ELT (op1, i));
3636 if (!x)
3637 return 0;
3638 RTVEC_ELT (v, i) = x;
3641 return gen_rtx_CONST_VECTOR (mode, v);
3644 if (VECTOR_MODE_P (mode)
3645 && code == VEC_CONCAT
3646 && (CONST_SCALAR_INT_P (op0)
3647 || GET_CODE (op0) == CONST_FIXED
3648 || CONST_DOUBLE_AS_FLOAT_P (op0))
3649 && (CONST_SCALAR_INT_P (op1)
3650 || CONST_DOUBLE_AS_FLOAT_P (op1)
3651 || GET_CODE (op1) == CONST_FIXED))
3653 unsigned n_elts = GET_MODE_NUNITS (mode);
3654 rtvec v = rtvec_alloc (n_elts);
3656 gcc_assert (n_elts >= 2);
3657 if (n_elts == 2)
3659 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3660 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3662 RTVEC_ELT (v, 0) = op0;
3663 RTVEC_ELT (v, 1) = op1;
3665 else
3667 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3668 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3669 unsigned i;
3671 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3672 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3673 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3675 for (i = 0; i < op0_n_elts; ++i)
3676 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3677 for (i = 0; i < op1_n_elts; ++i)
3678 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3681 return gen_rtx_CONST_VECTOR (mode, v);
3684 if (SCALAR_FLOAT_MODE_P (mode)
3685 && CONST_DOUBLE_AS_FLOAT_P (op0)
3686 && CONST_DOUBLE_AS_FLOAT_P (op1)
3687 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3689 if (code == AND
3690 || code == IOR
3691 || code == XOR)
3693 long tmp0[4];
3694 long tmp1[4];
3695 REAL_VALUE_TYPE r;
3696 int i;
3698 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3699 GET_MODE (op0));
3700 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3701 GET_MODE (op1));
3702 for (i = 0; i < 4; i++)
3704 switch (code)
3706 case AND:
3707 tmp0[i] &= tmp1[i];
3708 break;
3709 case IOR:
3710 tmp0[i] |= tmp1[i];
3711 break;
3712 case XOR:
3713 tmp0[i] ^= tmp1[i];
3714 break;
3715 default:
3716 gcc_unreachable ();
3719 real_from_target (&r, tmp0, mode);
3720 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3722 else
3724 REAL_VALUE_TYPE f0, f1, value, result;
3725 bool inexact;
3727 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3728 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3729 real_convert (&f0, mode, &f0);
3730 real_convert (&f1, mode, &f1);
3732 if (HONOR_SNANS (mode)
3733 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3734 return 0;
3736 if (code == DIV
3737 && REAL_VALUES_EQUAL (f1, dconst0)
3738 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3739 return 0;
3741 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3742 && flag_trapping_math
3743 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3745 int s0 = REAL_VALUE_NEGATIVE (f0);
3746 int s1 = REAL_VALUE_NEGATIVE (f1);
3748 switch (code)
3750 case PLUS:
3751 /* Inf + -Inf = NaN plus exception. */
3752 if (s0 != s1)
3753 return 0;
3754 break;
3755 case MINUS:
3756 /* Inf - Inf = NaN plus exception. */
3757 if (s0 == s1)
3758 return 0;
3759 break;
3760 case DIV:
3761 /* Inf / Inf = NaN plus exception. */
3762 return 0;
3763 default:
3764 break;
3768 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3769 && flag_trapping_math
3770 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3771 || (REAL_VALUE_ISINF (f1)
3772 && REAL_VALUES_EQUAL (f0, dconst0))))
3773 /* Inf * 0 = NaN plus exception. */
3774 return 0;
3776 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3777 &f0, &f1);
3778 real_convert (&result, mode, &value);
3780 /* Don't constant fold this floating point operation if
3781 the result has overflowed and flag_trapping_math. */
3783 if (flag_trapping_math
3784 && MODE_HAS_INFINITIES (mode)
3785 && REAL_VALUE_ISINF (result)
3786 && !REAL_VALUE_ISINF (f0)
3787 && !REAL_VALUE_ISINF (f1))
3788 /* Overflow plus exception. */
3789 return 0;
3791 /* Don't constant fold this floating point operation if the
3792 result may dependent upon the run-time rounding mode and
3793 flag_rounding_math is set, or if GCC's software emulation
3794 is unable to accurately represent the result. */
3796 if ((flag_rounding_math
3797 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3798 && (inexact || !real_identical (&result, &value)))
3799 return NULL_RTX;
3801 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3805 /* We can fold some multi-word operations. */
3806 if (GET_MODE_CLASS (mode) == MODE_INT
3807 && width == HOST_BITS_PER_DOUBLE_INT
3808 && (CONST_DOUBLE_AS_INT_P (op0) || CONST_INT_P (op0))
3809 && (CONST_DOUBLE_AS_INT_P (op1) || CONST_INT_P (op1)))
3811 double_int o0, o1, res, tmp;
3812 bool overflow;
3814 o0 = rtx_to_double_int (op0);
3815 o1 = rtx_to_double_int (op1);
3817 switch (code)
3819 case MINUS:
3820 /* A - B == A + (-B). */
3821 o1 = -o1;
3823 /* Fall through.... */
3825 case PLUS:
3826 res = o0 + o1;
3827 break;
3829 case MULT:
3830 res = o0 * o1;
3831 break;
3833 case DIV:
3834 res = o0.divmod_with_overflow (o1, false, TRUNC_DIV_EXPR,
3835 &tmp, &overflow);
3836 if (overflow)
3837 return 0;
3838 break;
3840 case MOD:
3841 tmp = o0.divmod_with_overflow (o1, false, TRUNC_DIV_EXPR,
3842 &res, &overflow);
3843 if (overflow)
3844 return 0;
3845 break;
3847 case UDIV:
3848 res = o0.divmod_with_overflow (o1, true, TRUNC_DIV_EXPR,
3849 &tmp, &overflow);
3850 if (overflow)
3851 return 0;
3852 break;
3854 case UMOD:
3855 tmp = o0.divmod_with_overflow (o1, true, TRUNC_DIV_EXPR,
3856 &res, &overflow);
3857 if (overflow)
3858 return 0;
3859 break;
3861 case AND:
3862 res = o0 & o1;
3863 break;
3865 case IOR:
3866 res = o0 | o1;
3867 break;
3869 case XOR:
3870 res = o0 ^ o1;
3871 break;
3873 case SMIN:
3874 res = o0.smin (o1);
3875 break;
3877 case SMAX:
3878 res = o0.smax (o1);
3879 break;
3881 case UMIN:
3882 res = o0.umin (o1);
3883 break;
3885 case UMAX:
3886 res = o0.umax (o1);
3887 break;
3889 case LSHIFTRT: case ASHIFTRT:
3890 case ASHIFT:
3891 case ROTATE: case ROTATERT:
3893 unsigned HOST_WIDE_INT cnt;
3895 if (SHIFT_COUNT_TRUNCATED)
3897 o1.high = 0;
3898 o1.low &= GET_MODE_PRECISION (mode) - 1;
3901 if (!o1.fits_uhwi ()
3902 || o1.to_uhwi () >= GET_MODE_PRECISION (mode))
3903 return 0;
3905 cnt = o1.to_uhwi ();
3906 unsigned short prec = GET_MODE_PRECISION (mode);
3908 if (code == LSHIFTRT || code == ASHIFTRT)
3909 res = o0.rshift (cnt, prec, code == ASHIFTRT);
3910 else if (code == ASHIFT)
3911 res = o0.alshift (cnt, prec);
3912 else if (code == ROTATE)
3913 res = o0.lrotate (cnt, prec);
3914 else /* code == ROTATERT */
3915 res = o0.rrotate (cnt, prec);
3917 break;
3919 default:
3920 return 0;
3923 return immed_double_int_const (res, mode);
3926 if (CONST_INT_P (op0) && CONST_INT_P (op1)
3927 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3929 /* Get the integer argument values in two forms:
3930 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3932 arg0 = INTVAL (op0);
3933 arg1 = INTVAL (op1);
3935 if (width < HOST_BITS_PER_WIDE_INT)
3937 arg0 &= GET_MODE_MASK (mode);
3938 arg1 &= GET_MODE_MASK (mode);
3940 arg0s = arg0;
3941 if (val_signbit_known_set_p (mode, arg0s))
3942 arg0s |= ~GET_MODE_MASK (mode);
3944 arg1s = arg1;
3945 if (val_signbit_known_set_p (mode, arg1s))
3946 arg1s |= ~GET_MODE_MASK (mode);
3948 else
3950 arg0s = arg0;
3951 arg1s = arg1;
3954 /* Compute the value of the arithmetic. */
3956 switch (code)
3958 case PLUS:
3959 val = arg0s + arg1s;
3960 break;
3962 case MINUS:
3963 val = arg0s - arg1s;
3964 break;
3966 case MULT:
3967 val = arg0s * arg1s;
3968 break;
3970 case DIV:
3971 if (arg1s == 0
3972 || ((unsigned HOST_WIDE_INT) arg0s
3973 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3974 && arg1s == -1))
3975 return 0;
3976 val = arg0s / arg1s;
3977 break;
3979 case MOD:
3980 if (arg1s == 0
3981 || ((unsigned HOST_WIDE_INT) arg0s
3982 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3983 && arg1s == -1))
3984 return 0;
3985 val = arg0s % arg1s;
3986 break;
3988 case UDIV:
3989 if (arg1 == 0
3990 || ((unsigned HOST_WIDE_INT) arg0s
3991 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3992 && arg1s == -1))
3993 return 0;
3994 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3995 break;
3997 case UMOD:
3998 if (arg1 == 0
3999 || ((unsigned HOST_WIDE_INT) arg0s
4000 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4001 && arg1s == -1))
4002 return 0;
4003 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
4004 break;
4006 case AND:
4007 val = arg0 & arg1;
4008 break;
4010 case IOR:
4011 val = arg0 | arg1;
4012 break;
4014 case XOR:
4015 val = arg0 ^ arg1;
4016 break;
4018 case LSHIFTRT:
4019 case ASHIFT:
4020 case ASHIFTRT:
4021 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
4022 the value is in range. We can't return any old value for
4023 out-of-range arguments because either the middle-end (via
4024 shift_truncation_mask) or the back-end might be relying on
4025 target-specific knowledge. Nor can we rely on
4026 shift_truncation_mask, since the shift might not be part of an
4027 ashlM3, lshrM3 or ashrM3 instruction. */
4028 if (SHIFT_COUNT_TRUNCATED)
4029 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
4030 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
4031 return 0;
4033 val = (code == ASHIFT
4034 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
4035 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
4037 /* Sign-extend the result for arithmetic right shifts. */
4038 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
4039 val |= ((unsigned HOST_WIDE_INT) (-1)) << (width - arg1);
4040 break;
4042 case ROTATERT:
4043 if (arg1 < 0)
4044 return 0;
4046 arg1 %= width;
4047 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
4048 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
4049 break;
4051 case ROTATE:
4052 if (arg1 < 0)
4053 return 0;
4055 arg1 %= width;
4056 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
4057 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
4058 break;
4060 case COMPARE:
4061 /* Do nothing here. */
4062 return 0;
4064 case SMIN:
4065 val = arg0s <= arg1s ? arg0s : arg1s;
4066 break;
4068 case UMIN:
4069 val = ((unsigned HOST_WIDE_INT) arg0
4070 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4071 break;
4073 case SMAX:
4074 val = arg0s > arg1s ? arg0s : arg1s;
4075 break;
4077 case UMAX:
4078 val = ((unsigned HOST_WIDE_INT) arg0
4079 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4080 break;
4082 case SS_PLUS:
4083 case US_PLUS:
4084 case SS_MINUS:
4085 case US_MINUS:
4086 case SS_MULT:
4087 case US_MULT:
4088 case SS_DIV:
4089 case US_DIV:
4090 case SS_ASHIFT:
4091 case US_ASHIFT:
4092 /* ??? There are simplifications that can be done. */
4093 return 0;
4095 default:
4096 gcc_unreachable ();
4099 return gen_int_mode (val, mode);
4102 return NULL_RTX;
4107 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
4108 PLUS or MINUS.
4110 Rather than test for specific case, we do this by a brute-force method
4111 and do all possible simplifications until no more changes occur. Then
4112 we rebuild the operation. */
4114 struct simplify_plus_minus_op_data
4116 rtx op;
4117 short neg;
4120 static bool
4121 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4123 int result;
4125 result = (commutative_operand_precedence (y)
4126 - commutative_operand_precedence (x));
4127 if (result)
4128 return result > 0;
4130 /* Group together equal REGs to do more simplification. */
4131 if (REG_P (x) && REG_P (y))
4132 return REGNO (x) > REGNO (y);
4133 else
4134 return false;
4137 static rtx
4138 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
4139 rtx op1)
4141 struct simplify_plus_minus_op_data ops[8];
4142 rtx result, tem;
4143 int n_ops = 2, input_ops = 2;
4144 int changed, n_constants = 0, canonicalized = 0;
4145 int i, j;
4147 memset (ops, 0, sizeof ops);
4149 /* Set up the two operands and then expand them until nothing has been
4150 changed. If we run out of room in our array, give up; this should
4151 almost never happen. */
4153 ops[0].op = op0;
4154 ops[0].neg = 0;
4155 ops[1].op = op1;
4156 ops[1].neg = (code == MINUS);
4160 changed = 0;
4162 for (i = 0; i < n_ops; i++)
4164 rtx this_op = ops[i].op;
4165 int this_neg = ops[i].neg;
4166 enum rtx_code this_code = GET_CODE (this_op);
4168 switch (this_code)
4170 case PLUS:
4171 case MINUS:
4172 if (n_ops == 7)
4173 return NULL_RTX;
4175 ops[n_ops].op = XEXP (this_op, 1);
4176 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4177 n_ops++;
4179 ops[i].op = XEXP (this_op, 0);
4180 input_ops++;
4181 changed = 1;
4182 canonicalized |= this_neg;
4183 break;
4185 case NEG:
4186 ops[i].op = XEXP (this_op, 0);
4187 ops[i].neg = ! this_neg;
4188 changed = 1;
4189 canonicalized = 1;
4190 break;
4192 case CONST:
4193 if (n_ops < 7
4194 && GET_CODE (XEXP (this_op, 0)) == PLUS
4195 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4196 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4198 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4199 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4200 ops[n_ops].neg = this_neg;
4201 n_ops++;
4202 changed = 1;
4203 canonicalized = 1;
4205 break;
4207 case NOT:
4208 /* ~a -> (-a - 1) */
4209 if (n_ops != 7)
4211 ops[n_ops].op = CONSTM1_RTX (mode);
4212 ops[n_ops++].neg = this_neg;
4213 ops[i].op = XEXP (this_op, 0);
4214 ops[i].neg = !this_neg;
4215 changed = 1;
4216 canonicalized = 1;
4218 break;
4220 case CONST_INT:
4221 n_constants++;
4222 if (this_neg)
4224 ops[i].op = neg_const_int (mode, this_op);
4225 ops[i].neg = 0;
4226 changed = 1;
4227 canonicalized = 1;
4229 break;
4231 default:
4232 break;
4236 while (changed);
4238 if (n_constants > 1)
4239 canonicalized = 1;
4241 gcc_assert (n_ops >= 2);
4243 /* If we only have two operands, we can avoid the loops. */
4244 if (n_ops == 2)
4246 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4247 rtx lhs, rhs;
4249 /* Get the two operands. Be careful with the order, especially for
4250 the cases where code == MINUS. */
4251 if (ops[0].neg && ops[1].neg)
4253 lhs = gen_rtx_NEG (mode, ops[0].op);
4254 rhs = ops[1].op;
4256 else if (ops[0].neg)
4258 lhs = ops[1].op;
4259 rhs = ops[0].op;
4261 else
4263 lhs = ops[0].op;
4264 rhs = ops[1].op;
4267 return simplify_const_binary_operation (code, mode, lhs, rhs);
4270 /* Now simplify each pair of operands until nothing changes. */
4273 /* Insertion sort is good enough for an eight-element array. */
4274 for (i = 1; i < n_ops; i++)
4276 struct simplify_plus_minus_op_data save;
4277 j = i - 1;
4278 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
4279 continue;
4281 canonicalized = 1;
4282 save = ops[i];
4284 ops[j + 1] = ops[j];
4285 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
4286 ops[j + 1] = save;
4289 changed = 0;
4290 for (i = n_ops - 1; i > 0; i--)
4291 for (j = i - 1; j >= 0; j--)
4293 rtx lhs = ops[j].op, rhs = ops[i].op;
4294 int lneg = ops[j].neg, rneg = ops[i].neg;
4296 if (lhs != 0 && rhs != 0)
4298 enum rtx_code ncode = PLUS;
4300 if (lneg != rneg)
4302 ncode = MINUS;
4303 if (lneg)
4304 tem = lhs, lhs = rhs, rhs = tem;
4306 else if (swap_commutative_operands_p (lhs, rhs))
4307 tem = lhs, lhs = rhs, rhs = tem;
4309 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4310 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4312 rtx tem_lhs, tem_rhs;
4314 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4315 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4316 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
4318 if (tem && !CONSTANT_P (tem))
4319 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4321 else
4322 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4324 /* Reject "simplifications" that just wrap the two
4325 arguments in a CONST. Failure to do so can result
4326 in infinite recursion with simplify_binary_operation
4327 when it calls us to simplify CONST operations. */
4328 if (tem
4329 && ! (GET_CODE (tem) == CONST
4330 && GET_CODE (XEXP (tem, 0)) == ncode
4331 && XEXP (XEXP (tem, 0), 0) == lhs
4332 && XEXP (XEXP (tem, 0), 1) == rhs))
4334 lneg &= rneg;
4335 if (GET_CODE (tem) == NEG)
4336 tem = XEXP (tem, 0), lneg = !lneg;
4337 if (CONST_INT_P (tem) && lneg)
4338 tem = neg_const_int (mode, tem), lneg = 0;
4340 ops[i].op = tem;
4341 ops[i].neg = lneg;
4342 ops[j].op = NULL_RTX;
4343 changed = 1;
4344 canonicalized = 1;
4349 /* If nothing changed, fail. */
4350 if (!canonicalized)
4351 return NULL_RTX;
4353 /* Pack all the operands to the lower-numbered entries. */
4354 for (i = 0, j = 0; j < n_ops; j++)
4355 if (ops[j].op)
4357 ops[i] = ops[j];
4358 i++;
4360 n_ops = i;
4362 while (changed);
4364 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4365 if (n_ops == 2
4366 && CONST_INT_P (ops[1].op)
4367 && CONSTANT_P (ops[0].op)
4368 && ops[0].neg)
4369 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4371 /* We suppressed creation of trivial CONST expressions in the
4372 combination loop to avoid recursion. Create one manually now.
4373 The combination loop should have ensured that there is exactly
4374 one CONST_INT, and the sort will have ensured that it is last
4375 in the array and that any other constant will be next-to-last. */
4377 if (n_ops > 1
4378 && CONST_INT_P (ops[n_ops - 1].op)
4379 && CONSTANT_P (ops[n_ops - 2].op))
4381 rtx value = ops[n_ops - 1].op;
4382 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4383 value = neg_const_int (mode, value);
4384 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4385 INTVAL (value));
4386 n_ops--;
4389 /* Put a non-negated operand first, if possible. */
4391 for (i = 0; i < n_ops && ops[i].neg; i++)
4392 continue;
4393 if (i == n_ops)
4394 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4395 else if (i != 0)
4397 tem = ops[0].op;
4398 ops[0] = ops[i];
4399 ops[i].op = tem;
4400 ops[i].neg = 1;
4403 /* Now make the result by performing the requested operations. */
4404 result = ops[0].op;
4405 for (i = 1; i < n_ops; i++)
4406 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4407 mode, result, ops[i].op);
4409 return result;
4412 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4413 static bool
4414 plus_minus_operand_p (const_rtx x)
4416 return GET_CODE (x) == PLUS
4417 || GET_CODE (x) == MINUS
4418 || (GET_CODE (x) == CONST
4419 && GET_CODE (XEXP (x, 0)) == PLUS
4420 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4421 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4424 /* Like simplify_binary_operation except used for relational operators.
4425 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4426 not also be VOIDmode.
4428 CMP_MODE specifies in which mode the comparison is done in, so it is
4429 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4430 the operands or, if both are VOIDmode, the operands are compared in
4431 "infinite precision". */
4433 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
4434 enum machine_mode cmp_mode, rtx op0, rtx op1)
4436 rtx tem, trueop0, trueop1;
4438 if (cmp_mode == VOIDmode)
4439 cmp_mode = GET_MODE (op0);
4440 if (cmp_mode == VOIDmode)
4441 cmp_mode = GET_MODE (op1);
4443 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4444 if (tem)
4446 if (SCALAR_FLOAT_MODE_P (mode))
4448 if (tem == const0_rtx)
4449 return CONST0_RTX (mode);
4450 #ifdef FLOAT_STORE_FLAG_VALUE
4452 REAL_VALUE_TYPE val;
4453 val = FLOAT_STORE_FLAG_VALUE (mode);
4454 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4456 #else
4457 return NULL_RTX;
4458 #endif
4460 if (VECTOR_MODE_P (mode))
4462 if (tem == const0_rtx)
4463 return CONST0_RTX (mode);
4464 #ifdef VECTOR_STORE_FLAG_VALUE
4466 int i, units;
4467 rtvec v;
4469 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4470 if (val == NULL_RTX)
4471 return NULL_RTX;
4472 if (val == const1_rtx)
4473 return CONST1_RTX (mode);
4475 units = GET_MODE_NUNITS (mode);
4476 v = rtvec_alloc (units);
4477 for (i = 0; i < units; i++)
4478 RTVEC_ELT (v, i) = val;
4479 return gen_rtx_raw_CONST_VECTOR (mode, v);
4481 #else
4482 return NULL_RTX;
4483 #endif
4486 return tem;
4489 /* For the following tests, ensure const0_rtx is op1. */
4490 if (swap_commutative_operands_p (op0, op1)
4491 || (op0 == const0_rtx && op1 != const0_rtx))
4492 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4494 /* If op0 is a compare, extract the comparison arguments from it. */
4495 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4496 return simplify_gen_relational (code, mode, VOIDmode,
4497 XEXP (op0, 0), XEXP (op0, 1));
4499 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4500 || CC0_P (op0))
4501 return NULL_RTX;
4503 trueop0 = avoid_constant_pool_reference (op0);
4504 trueop1 = avoid_constant_pool_reference (op1);
4505 return simplify_relational_operation_1 (code, mode, cmp_mode,
4506 trueop0, trueop1);
4509 /* This part of simplify_relational_operation is only used when CMP_MODE
4510 is not in class MODE_CC (i.e. it is a real comparison).
4512 MODE is the mode of the result, while CMP_MODE specifies in which
4513 mode the comparison is done in, so it is the mode of the operands. */
4515 static rtx
4516 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
4517 enum machine_mode cmp_mode, rtx op0, rtx op1)
4519 enum rtx_code op0code = GET_CODE (op0);
4521 if (op1 == const0_rtx && COMPARISON_P (op0))
4523 /* If op0 is a comparison, extract the comparison arguments
4524 from it. */
4525 if (code == NE)
4527 if (GET_MODE (op0) == mode)
4528 return simplify_rtx (op0);
4529 else
4530 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4531 XEXP (op0, 0), XEXP (op0, 1));
4533 else if (code == EQ)
4535 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4536 if (new_code != UNKNOWN)
4537 return simplify_gen_relational (new_code, mode, VOIDmode,
4538 XEXP (op0, 0), XEXP (op0, 1));
4542 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4543 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4544 if ((code == LTU || code == GEU)
4545 && GET_CODE (op0) == PLUS
4546 && CONST_INT_P (XEXP (op0, 1))
4547 && (rtx_equal_p (op1, XEXP (op0, 0))
4548 || rtx_equal_p (op1, XEXP (op0, 1)))
4549 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4550 && XEXP (op0, 1) != const0_rtx)
4552 rtx new_cmp
4553 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4554 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4555 cmp_mode, XEXP (op0, 0), new_cmp);
4558 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4559 if ((code == LTU || code == GEU)
4560 && GET_CODE (op0) == PLUS
4561 && rtx_equal_p (op1, XEXP (op0, 1))
4562 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4563 && !rtx_equal_p (op1, XEXP (op0, 0)))
4564 return simplify_gen_relational (code, mode, cmp_mode, op0,
4565 copy_rtx (XEXP (op0, 0)));
4567 if (op1 == const0_rtx)
4569 /* Canonicalize (GTU x 0) as (NE x 0). */
4570 if (code == GTU)
4571 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4572 /* Canonicalize (LEU x 0) as (EQ x 0). */
4573 if (code == LEU)
4574 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4576 else if (op1 == const1_rtx)
4578 switch (code)
4580 case GE:
4581 /* Canonicalize (GE x 1) as (GT x 0). */
4582 return simplify_gen_relational (GT, mode, cmp_mode,
4583 op0, const0_rtx);
4584 case GEU:
4585 /* Canonicalize (GEU x 1) as (NE x 0). */
4586 return simplify_gen_relational (NE, mode, cmp_mode,
4587 op0, const0_rtx);
4588 case LT:
4589 /* Canonicalize (LT x 1) as (LE x 0). */
4590 return simplify_gen_relational (LE, mode, cmp_mode,
4591 op0, const0_rtx);
4592 case LTU:
4593 /* Canonicalize (LTU x 1) as (EQ x 0). */
4594 return simplify_gen_relational (EQ, mode, cmp_mode,
4595 op0, const0_rtx);
4596 default:
4597 break;
4600 else if (op1 == constm1_rtx)
4602 /* Canonicalize (LE x -1) as (LT x 0). */
4603 if (code == LE)
4604 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4605 /* Canonicalize (GT x -1) as (GE x 0). */
4606 if (code == GT)
4607 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4610 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4611 if ((code == EQ || code == NE)
4612 && (op0code == PLUS || op0code == MINUS)
4613 && CONSTANT_P (op1)
4614 && CONSTANT_P (XEXP (op0, 1))
4615 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4617 rtx x = XEXP (op0, 0);
4618 rtx c = XEXP (op0, 1);
4619 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4620 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4622 /* Detect an infinite recursive condition, where we oscillate at this
4623 simplification case between:
4624 A + B == C <---> C - B == A,
4625 where A, B, and C are all constants with non-simplifiable expressions,
4626 usually SYMBOL_REFs. */
4627 if (GET_CODE (tem) == invcode
4628 && CONSTANT_P (x)
4629 && rtx_equal_p (c, XEXP (tem, 1)))
4630 return NULL_RTX;
4632 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4635 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4636 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4637 if (code == NE
4638 && op1 == const0_rtx
4639 && GET_MODE_CLASS (mode) == MODE_INT
4640 && cmp_mode != VOIDmode
4641 /* ??? Work-around BImode bugs in the ia64 backend. */
4642 && mode != BImode
4643 && cmp_mode != BImode
4644 && nonzero_bits (op0, cmp_mode) == 1
4645 && STORE_FLAG_VALUE == 1)
4646 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4647 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4648 : lowpart_subreg (mode, op0, cmp_mode);
4650 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4651 if ((code == EQ || code == NE)
4652 && op1 == const0_rtx
4653 && op0code == XOR)
4654 return simplify_gen_relational (code, mode, cmp_mode,
4655 XEXP (op0, 0), XEXP (op0, 1));
4657 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4658 if ((code == EQ || code == NE)
4659 && op0code == XOR
4660 && rtx_equal_p (XEXP (op0, 0), op1)
4661 && !side_effects_p (XEXP (op0, 0)))
4662 return simplify_gen_relational (code, mode, cmp_mode,
4663 XEXP (op0, 1), const0_rtx);
4665 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4666 if ((code == EQ || code == NE)
4667 && op0code == XOR
4668 && rtx_equal_p (XEXP (op0, 1), op1)
4669 && !side_effects_p (XEXP (op0, 1)))
4670 return simplify_gen_relational (code, mode, cmp_mode,
4671 XEXP (op0, 0), const0_rtx);
4673 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4674 if ((code == EQ || code == NE)
4675 && op0code == XOR
4676 && CONST_SCALAR_INT_P (op1)
4677 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4678 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4679 simplify_gen_binary (XOR, cmp_mode,
4680 XEXP (op0, 1), op1));
4682 if (op0code == POPCOUNT && op1 == const0_rtx)
4683 switch (code)
4685 case EQ:
4686 case LE:
4687 case LEU:
4688 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4689 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4690 XEXP (op0, 0), const0_rtx);
4692 case NE:
4693 case GT:
4694 case GTU:
4695 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4696 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4697 XEXP (op0, 0), const0_rtx);
4699 default:
4700 break;
4703 return NULL_RTX;
4706 enum
4708 CMP_EQ = 1,
4709 CMP_LT = 2,
4710 CMP_GT = 4,
4711 CMP_LTU = 8,
4712 CMP_GTU = 16
4716 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4717 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4718 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4719 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4720 For floating-point comparisons, assume that the operands were ordered. */
4722 static rtx
4723 comparison_result (enum rtx_code code, int known_results)
4725 switch (code)
4727 case EQ:
4728 case UNEQ:
4729 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4730 case NE:
4731 case LTGT:
4732 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4734 case LT:
4735 case UNLT:
4736 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4737 case GE:
4738 case UNGE:
4739 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4741 case GT:
4742 case UNGT:
4743 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4744 case LE:
4745 case UNLE:
4746 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4748 case LTU:
4749 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4750 case GEU:
4751 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4753 case GTU:
4754 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4755 case LEU:
4756 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4758 case ORDERED:
4759 return const_true_rtx;
4760 case UNORDERED:
4761 return const0_rtx;
4762 default:
4763 gcc_unreachable ();
4767 /* Check if the given comparison (done in the given MODE) is actually a
4768 tautology or a contradiction.
4769 If no simplification is possible, this function returns zero.
4770 Otherwise, it returns either const_true_rtx or const0_rtx. */
4773 simplify_const_relational_operation (enum rtx_code code,
4774 enum machine_mode mode,
4775 rtx op0, rtx op1)
4777 rtx tem;
4778 rtx trueop0;
4779 rtx trueop1;
4781 gcc_assert (mode != VOIDmode
4782 || (GET_MODE (op0) == VOIDmode
4783 && GET_MODE (op1) == VOIDmode));
4785 /* If op0 is a compare, extract the comparison arguments from it. */
4786 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4788 op1 = XEXP (op0, 1);
4789 op0 = XEXP (op0, 0);
4791 if (GET_MODE (op0) != VOIDmode)
4792 mode = GET_MODE (op0);
4793 else if (GET_MODE (op1) != VOIDmode)
4794 mode = GET_MODE (op1);
4795 else
4796 return 0;
4799 /* We can't simplify MODE_CC values since we don't know what the
4800 actual comparison is. */
4801 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4802 return 0;
4804 /* Make sure the constant is second. */
4805 if (swap_commutative_operands_p (op0, op1))
4807 tem = op0, op0 = op1, op1 = tem;
4808 code = swap_condition (code);
4811 trueop0 = avoid_constant_pool_reference (op0);
4812 trueop1 = avoid_constant_pool_reference (op1);
4814 /* For integer comparisons of A and B maybe we can simplify A - B and can
4815 then simplify a comparison of that with zero. If A and B are both either
4816 a register or a CONST_INT, this can't help; testing for these cases will
4817 prevent infinite recursion here and speed things up.
4819 We can only do this for EQ and NE comparisons as otherwise we may
4820 lose or introduce overflow which we cannot disregard as undefined as
4821 we do not know the signedness of the operation on either the left or
4822 the right hand side of the comparison. */
4824 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4825 && (code == EQ || code == NE)
4826 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4827 && (REG_P (op1) || CONST_INT_P (trueop1)))
4828 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4829 /* We cannot do this if tem is a nonzero address. */
4830 && ! nonzero_address_p (tem))
4831 return simplify_const_relational_operation (signed_condition (code),
4832 mode, tem, const0_rtx);
4834 if (! HONOR_NANS (mode) && code == ORDERED)
4835 return const_true_rtx;
4837 if (! HONOR_NANS (mode) && code == UNORDERED)
4838 return const0_rtx;
4840 /* For modes without NaNs, if the two operands are equal, we know the
4841 result except if they have side-effects. Even with NaNs we know
4842 the result of unordered comparisons and, if signaling NaNs are
4843 irrelevant, also the result of LT/GT/LTGT. */
4844 if ((! HONOR_NANS (GET_MODE (trueop0))
4845 || code == UNEQ || code == UNLE || code == UNGE
4846 || ((code == LT || code == GT || code == LTGT)
4847 && ! HONOR_SNANS (GET_MODE (trueop0))))
4848 && rtx_equal_p (trueop0, trueop1)
4849 && ! side_effects_p (trueop0))
4850 return comparison_result (code, CMP_EQ);
4852 /* If the operands are floating-point constants, see if we can fold
4853 the result. */
4854 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
4855 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
4856 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4858 REAL_VALUE_TYPE d0, d1;
4860 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4861 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4863 /* Comparisons are unordered iff at least one of the values is NaN. */
4864 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4865 switch (code)
4867 case UNEQ:
4868 case UNLT:
4869 case UNGT:
4870 case UNLE:
4871 case UNGE:
4872 case NE:
4873 case UNORDERED:
4874 return const_true_rtx;
4875 case EQ:
4876 case LT:
4877 case GT:
4878 case LE:
4879 case GE:
4880 case LTGT:
4881 case ORDERED:
4882 return const0_rtx;
4883 default:
4884 return 0;
4887 return comparison_result (code,
4888 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4889 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4892 /* Otherwise, see if the operands are both integers. */
4893 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4894 && (CONST_DOUBLE_AS_INT_P (trueop0) || CONST_INT_P (trueop0))
4895 && (CONST_DOUBLE_AS_INT_P (trueop1) || CONST_INT_P (trueop1)))
4897 int width = GET_MODE_PRECISION (mode);
4898 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4899 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4901 /* Get the two words comprising each integer constant. */
4902 if (CONST_DOUBLE_AS_INT_P (trueop0))
4904 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4905 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4907 else
4909 l0u = l0s = INTVAL (trueop0);
4910 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4913 if (CONST_DOUBLE_AS_INT_P (trueop1))
4915 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4916 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4918 else
4920 l1u = l1s = INTVAL (trueop1);
4921 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4924 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4925 we have to sign or zero-extend the values. */
4926 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4928 l0u &= GET_MODE_MASK (mode);
4929 l1u &= GET_MODE_MASK (mode);
4931 if (val_signbit_known_set_p (mode, l0s))
4932 l0s |= ~GET_MODE_MASK (mode);
4934 if (val_signbit_known_set_p (mode, l1s))
4935 l1s |= ~GET_MODE_MASK (mode);
4937 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4938 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4940 if (h0u == h1u && l0u == l1u)
4941 return comparison_result (code, CMP_EQ);
4942 else
4944 int cr;
4945 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
4946 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
4947 return comparison_result (code, cr);
4951 /* Optimize comparisons with upper and lower bounds. */
4952 if (HWI_COMPUTABLE_MODE_P (mode)
4953 && CONST_INT_P (trueop1))
4955 int sign;
4956 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4957 HOST_WIDE_INT val = INTVAL (trueop1);
4958 HOST_WIDE_INT mmin, mmax;
4960 if (code == GEU
4961 || code == LEU
4962 || code == GTU
4963 || code == LTU)
4964 sign = 0;
4965 else
4966 sign = 1;
4968 /* Get a reduced range if the sign bit is zero. */
4969 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4971 mmin = 0;
4972 mmax = nonzero;
4974 else
4976 rtx mmin_rtx, mmax_rtx;
4977 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4979 mmin = INTVAL (mmin_rtx);
4980 mmax = INTVAL (mmax_rtx);
4981 if (sign)
4983 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4985 mmin >>= (sign_copies - 1);
4986 mmax >>= (sign_copies - 1);
4990 switch (code)
4992 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4993 case GEU:
4994 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4995 return const_true_rtx;
4996 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4997 return const0_rtx;
4998 break;
4999 case GE:
5000 if (val <= mmin)
5001 return const_true_rtx;
5002 if (val > mmax)
5003 return const0_rtx;
5004 break;
5006 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5007 case LEU:
5008 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5009 return const_true_rtx;
5010 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5011 return const0_rtx;
5012 break;
5013 case LE:
5014 if (val >= mmax)
5015 return const_true_rtx;
5016 if (val < mmin)
5017 return const0_rtx;
5018 break;
5020 case EQ:
5021 /* x == y is always false for y out of range. */
5022 if (val < mmin || val > mmax)
5023 return const0_rtx;
5024 break;
5026 /* x > y is always false for y >= mmax, always true for y < mmin. */
5027 case GTU:
5028 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5029 return const0_rtx;
5030 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5031 return const_true_rtx;
5032 break;
5033 case GT:
5034 if (val >= mmax)
5035 return const0_rtx;
5036 if (val < mmin)
5037 return const_true_rtx;
5038 break;
5040 /* x < y is always false for y <= mmin, always true for y > mmax. */
5041 case LTU:
5042 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5043 return const0_rtx;
5044 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5045 return const_true_rtx;
5046 break;
5047 case LT:
5048 if (val <= mmin)
5049 return const0_rtx;
5050 if (val > mmax)
5051 return const_true_rtx;
5052 break;
5054 case NE:
5055 /* x != y is always true for y out of range. */
5056 if (val < mmin || val > mmax)
5057 return const_true_rtx;
5058 break;
5060 default:
5061 break;
5065 /* Optimize integer comparisons with zero. */
5066 if (trueop1 == const0_rtx)
5068 /* Some addresses are known to be nonzero. We don't know
5069 their sign, but equality comparisons are known. */
5070 if (nonzero_address_p (trueop0))
5072 if (code == EQ || code == LEU)
5073 return const0_rtx;
5074 if (code == NE || code == GTU)
5075 return const_true_rtx;
5078 /* See if the first operand is an IOR with a constant. If so, we
5079 may be able to determine the result of this comparison. */
5080 if (GET_CODE (op0) == IOR)
5082 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5083 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5085 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
5086 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5087 && (UINTVAL (inner_const)
5088 & ((unsigned HOST_WIDE_INT) 1
5089 << sign_bitnum)));
5091 switch (code)
5093 case EQ:
5094 case LEU:
5095 return const0_rtx;
5096 case NE:
5097 case GTU:
5098 return const_true_rtx;
5099 case LT:
5100 case LE:
5101 if (has_sign)
5102 return const_true_rtx;
5103 break;
5104 case GT:
5105 case GE:
5106 if (has_sign)
5107 return const0_rtx;
5108 break;
5109 default:
5110 break;
5116 /* Optimize comparison of ABS with zero. */
5117 if (trueop1 == CONST0_RTX (mode)
5118 && (GET_CODE (trueop0) == ABS
5119 || (GET_CODE (trueop0) == FLOAT_EXTEND
5120 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5122 switch (code)
5124 case LT:
5125 /* Optimize abs(x) < 0.0. */
5126 if (!HONOR_SNANS (mode)
5127 && (!INTEGRAL_MODE_P (mode)
5128 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5130 if (INTEGRAL_MODE_P (mode)
5131 && (issue_strict_overflow_warning
5132 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5133 warning (OPT_Wstrict_overflow,
5134 ("assuming signed overflow does not occur when "
5135 "assuming abs (x) < 0 is false"));
5136 return const0_rtx;
5138 break;
5140 case GE:
5141 /* Optimize abs(x) >= 0.0. */
5142 if (!HONOR_NANS (mode)
5143 && (!INTEGRAL_MODE_P (mode)
5144 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5146 if (INTEGRAL_MODE_P (mode)
5147 && (issue_strict_overflow_warning
5148 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5149 warning (OPT_Wstrict_overflow,
5150 ("assuming signed overflow does not occur when "
5151 "assuming abs (x) >= 0 is true"));
5152 return const_true_rtx;
5154 break;
5156 case UNGE:
5157 /* Optimize ! (abs(x) < 0.0). */
5158 return const_true_rtx;
5160 default:
5161 break;
5165 return 0;
5168 /* Simplify CODE, an operation with result mode MODE and three operands,
5169 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5170 a constant. Return 0 if no simplifications is possible. */
5173 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
5174 enum machine_mode op0_mode, rtx op0, rtx op1,
5175 rtx op2)
5177 unsigned int width = GET_MODE_PRECISION (mode);
5178 bool any_change = false;
5179 rtx tem;
5181 /* VOIDmode means "infinite" precision. */
5182 if (width == 0)
5183 width = HOST_BITS_PER_WIDE_INT;
5185 switch (code)
5187 case FMA:
5188 /* Simplify negations around the multiplication. */
5189 /* -a * -b + c => a * b + c. */
5190 if (GET_CODE (op0) == NEG)
5192 tem = simplify_unary_operation (NEG, mode, op1, mode);
5193 if (tem)
5194 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5196 else if (GET_CODE (op1) == NEG)
5198 tem = simplify_unary_operation (NEG, mode, op0, mode);
5199 if (tem)
5200 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5203 /* Canonicalize the two multiplication operands. */
5204 /* a * -b + c => -b * a + c. */
5205 if (swap_commutative_operands_p (op0, op1))
5206 tem = op0, op0 = op1, op1 = tem, any_change = true;
5208 if (any_change)
5209 return gen_rtx_FMA (mode, op0, op1, op2);
5210 return NULL_RTX;
5212 case SIGN_EXTRACT:
5213 case ZERO_EXTRACT:
5214 if (CONST_INT_P (op0)
5215 && CONST_INT_P (op1)
5216 && CONST_INT_P (op2)
5217 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5218 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5220 /* Extracting a bit-field from a constant */
5221 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5222 HOST_WIDE_INT op1val = INTVAL (op1);
5223 HOST_WIDE_INT op2val = INTVAL (op2);
5224 if (BITS_BIG_ENDIAN)
5225 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5226 else
5227 val >>= op2val;
5229 if (HOST_BITS_PER_WIDE_INT != op1val)
5231 /* First zero-extend. */
5232 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
5233 /* If desired, propagate sign bit. */
5234 if (code == SIGN_EXTRACT
5235 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
5236 != 0)
5237 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5240 return gen_int_mode (val, mode);
5242 break;
5244 case IF_THEN_ELSE:
5245 if (CONST_INT_P (op0))
5246 return op0 != const0_rtx ? op1 : op2;
5248 /* Convert c ? a : a into "a". */
5249 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5250 return op1;
5252 /* Convert a != b ? a : b into "a". */
5253 if (GET_CODE (op0) == NE
5254 && ! side_effects_p (op0)
5255 && ! HONOR_NANS (mode)
5256 && ! HONOR_SIGNED_ZEROS (mode)
5257 && ((rtx_equal_p (XEXP (op0, 0), op1)
5258 && rtx_equal_p (XEXP (op0, 1), op2))
5259 || (rtx_equal_p (XEXP (op0, 0), op2)
5260 && rtx_equal_p (XEXP (op0, 1), op1))))
5261 return op1;
5263 /* Convert a == b ? a : b into "b". */
5264 if (GET_CODE (op0) == EQ
5265 && ! side_effects_p (op0)
5266 && ! HONOR_NANS (mode)
5267 && ! HONOR_SIGNED_ZEROS (mode)
5268 && ((rtx_equal_p (XEXP (op0, 0), op1)
5269 && rtx_equal_p (XEXP (op0, 1), op2))
5270 || (rtx_equal_p (XEXP (op0, 0), op2)
5271 && rtx_equal_p (XEXP (op0, 1), op1))))
5272 return op2;
5274 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5276 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5277 ? GET_MODE (XEXP (op0, 1))
5278 : GET_MODE (XEXP (op0, 0)));
5279 rtx temp;
5281 /* Look for happy constants in op1 and op2. */
5282 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5284 HOST_WIDE_INT t = INTVAL (op1);
5285 HOST_WIDE_INT f = INTVAL (op2);
5287 if (t == STORE_FLAG_VALUE && f == 0)
5288 code = GET_CODE (op0);
5289 else if (t == 0 && f == STORE_FLAG_VALUE)
5291 enum rtx_code tmp;
5292 tmp = reversed_comparison_code (op0, NULL_RTX);
5293 if (tmp == UNKNOWN)
5294 break;
5295 code = tmp;
5297 else
5298 break;
5300 return simplify_gen_relational (code, mode, cmp_mode,
5301 XEXP (op0, 0), XEXP (op0, 1));
5304 if (cmp_mode == VOIDmode)
5305 cmp_mode = op0_mode;
5306 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5307 cmp_mode, XEXP (op0, 0),
5308 XEXP (op0, 1));
5310 /* See if any simplifications were possible. */
5311 if (temp)
5313 if (CONST_INT_P (temp))
5314 return temp == const0_rtx ? op2 : op1;
5315 else if (temp)
5316 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5319 break;
5321 case VEC_MERGE:
5322 gcc_assert (GET_MODE (op0) == mode);
5323 gcc_assert (GET_MODE (op1) == mode);
5324 gcc_assert (VECTOR_MODE_P (mode));
5325 op2 = avoid_constant_pool_reference (op2);
5326 if (CONST_INT_P (op2))
5328 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5329 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5330 int mask = (1 << n_elts) - 1;
5332 if (!(INTVAL (op2) & mask))
5333 return op1;
5334 if ((INTVAL (op2) & mask) == mask)
5335 return op0;
5337 op0 = avoid_constant_pool_reference (op0);
5338 op1 = avoid_constant_pool_reference (op1);
5339 if (GET_CODE (op0) == CONST_VECTOR
5340 && GET_CODE (op1) == CONST_VECTOR)
5342 rtvec v = rtvec_alloc (n_elts);
5343 unsigned int i;
5345 for (i = 0; i < n_elts; i++)
5346 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
5347 ? CONST_VECTOR_ELT (op0, i)
5348 : CONST_VECTOR_ELT (op1, i));
5349 return gen_rtx_CONST_VECTOR (mode, v);
5352 break;
5354 default:
5355 gcc_unreachable ();
5358 return 0;
5361 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
5362 or CONST_VECTOR,
5363 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5365 Works by unpacking OP into a collection of 8-bit values
5366 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5367 and then repacking them again for OUTERMODE. */
5369 static rtx
5370 simplify_immed_subreg (enum machine_mode outermode, rtx op,
5371 enum machine_mode innermode, unsigned int byte)
5373 /* We support up to 512-bit values (for V8DFmode). */
5374 enum {
5375 max_bitsize = 512,
5376 value_bit = 8,
5377 value_mask = (1 << value_bit) - 1
5379 unsigned char value[max_bitsize / value_bit];
5380 int value_start;
5381 int i;
5382 int elem;
5384 int num_elem;
5385 rtx * elems;
5386 int elem_bitsize;
5387 rtx result_s;
5388 rtvec result_v = NULL;
5389 enum mode_class outer_class;
5390 enum machine_mode outer_submode;
5392 /* Some ports misuse CCmode. */
5393 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5394 return op;
5396 /* We have no way to represent a complex constant at the rtl level. */
5397 if (COMPLEX_MODE_P (outermode))
5398 return NULL_RTX;
5400 /* Unpack the value. */
5402 if (GET_CODE (op) == CONST_VECTOR)
5404 num_elem = CONST_VECTOR_NUNITS (op);
5405 elems = &CONST_VECTOR_ELT (op, 0);
5406 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5408 else
5410 num_elem = 1;
5411 elems = &op;
5412 elem_bitsize = max_bitsize;
5414 /* If this asserts, it is too complicated; reducing value_bit may help. */
5415 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5416 /* I don't know how to handle endianness of sub-units. */
5417 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5419 for (elem = 0; elem < num_elem; elem++)
5421 unsigned char * vp;
5422 rtx el = elems[elem];
5424 /* Vectors are kept in target memory order. (This is probably
5425 a mistake.) */
5427 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5428 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5429 / BITS_PER_UNIT);
5430 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5431 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5432 unsigned bytele = (subword_byte % UNITS_PER_WORD
5433 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5434 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5437 switch (GET_CODE (el))
5439 case CONST_INT:
5440 for (i = 0;
5441 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5442 i += value_bit)
5443 *vp++ = INTVAL (el) >> i;
5444 /* CONST_INTs are always logically sign-extended. */
5445 for (; i < elem_bitsize; i += value_bit)
5446 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5447 break;
5449 case CONST_DOUBLE:
5450 if (GET_MODE (el) == VOIDmode)
5452 unsigned char extend = 0;
5453 /* If this triggers, someone should have generated a
5454 CONST_INT instead. */
5455 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5457 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5458 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5459 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5461 *vp++
5462 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5463 i += value_bit;
5466 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5467 extend = -1;
5468 for (; i < elem_bitsize; i += value_bit)
5469 *vp++ = extend;
5471 else
5473 long tmp[max_bitsize / 32];
5474 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5476 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5477 gcc_assert (bitsize <= elem_bitsize);
5478 gcc_assert (bitsize % value_bit == 0);
5480 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5481 GET_MODE (el));
5483 /* real_to_target produces its result in words affected by
5484 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5485 and use WORDS_BIG_ENDIAN instead; see the documentation
5486 of SUBREG in rtl.texi. */
5487 for (i = 0; i < bitsize; i += value_bit)
5489 int ibase;
5490 if (WORDS_BIG_ENDIAN)
5491 ibase = bitsize - 1 - i;
5492 else
5493 ibase = i;
5494 *vp++ = tmp[ibase / 32] >> i % 32;
5497 /* It shouldn't matter what's done here, so fill it with
5498 zero. */
5499 for (; i < elem_bitsize; i += value_bit)
5500 *vp++ = 0;
5502 break;
5504 case CONST_FIXED:
5505 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5507 for (i = 0; i < elem_bitsize; i += value_bit)
5508 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5510 else
5512 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5513 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5514 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5515 i += value_bit)
5516 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5517 >> (i - HOST_BITS_PER_WIDE_INT);
5518 for (; i < elem_bitsize; i += value_bit)
5519 *vp++ = 0;
5521 break;
5523 default:
5524 gcc_unreachable ();
5528 /* Now, pick the right byte to start with. */
5529 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5530 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5531 will already have offset 0. */
5532 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5534 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5535 - byte);
5536 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5537 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5538 byte = (subword_byte % UNITS_PER_WORD
5539 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5542 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5543 so if it's become negative it will instead be very large.) */
5544 gcc_assert (byte < GET_MODE_SIZE (innermode));
5546 /* Convert from bytes to chunks of size value_bit. */
5547 value_start = byte * (BITS_PER_UNIT / value_bit);
5549 /* Re-pack the value. */
5551 if (VECTOR_MODE_P (outermode))
5553 num_elem = GET_MODE_NUNITS (outermode);
5554 result_v = rtvec_alloc (num_elem);
5555 elems = &RTVEC_ELT (result_v, 0);
5556 outer_submode = GET_MODE_INNER (outermode);
5558 else
5560 num_elem = 1;
5561 elems = &result_s;
5562 outer_submode = outermode;
5565 outer_class = GET_MODE_CLASS (outer_submode);
5566 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5568 gcc_assert (elem_bitsize % value_bit == 0);
5569 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5571 for (elem = 0; elem < num_elem; elem++)
5573 unsigned char *vp;
5575 /* Vectors are stored in target memory order. (This is probably
5576 a mistake.) */
5578 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5579 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5580 / BITS_PER_UNIT);
5581 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5582 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5583 unsigned bytele = (subword_byte % UNITS_PER_WORD
5584 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5585 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5588 switch (outer_class)
5590 case MODE_INT:
5591 case MODE_PARTIAL_INT:
5593 unsigned HOST_WIDE_INT hi = 0, lo = 0;
5595 for (i = 0;
5596 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5597 i += value_bit)
5598 lo |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5599 for (; i < elem_bitsize; i += value_bit)
5600 hi |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5601 << (i - HOST_BITS_PER_WIDE_INT);
5603 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5604 know why. */
5605 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5606 elems[elem] = gen_int_mode (lo, outer_submode);
5607 else if (elem_bitsize <= HOST_BITS_PER_DOUBLE_INT)
5608 elems[elem] = immed_double_const (lo, hi, outer_submode);
5609 else
5610 return NULL_RTX;
5612 break;
5614 case MODE_FLOAT:
5615 case MODE_DECIMAL_FLOAT:
5617 REAL_VALUE_TYPE r;
5618 long tmp[max_bitsize / 32];
5620 /* real_from_target wants its input in words affected by
5621 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5622 and use WORDS_BIG_ENDIAN instead; see the documentation
5623 of SUBREG in rtl.texi. */
5624 for (i = 0; i < max_bitsize / 32; i++)
5625 tmp[i] = 0;
5626 for (i = 0; i < elem_bitsize; i += value_bit)
5628 int ibase;
5629 if (WORDS_BIG_ENDIAN)
5630 ibase = elem_bitsize - 1 - i;
5631 else
5632 ibase = i;
5633 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5636 real_from_target (&r, tmp, outer_submode);
5637 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5639 break;
5641 case MODE_FRACT:
5642 case MODE_UFRACT:
5643 case MODE_ACCUM:
5644 case MODE_UACCUM:
5646 FIXED_VALUE_TYPE f;
5647 f.data.low = 0;
5648 f.data.high = 0;
5649 f.mode = outer_submode;
5651 for (i = 0;
5652 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5653 i += value_bit)
5654 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5655 for (; i < elem_bitsize; i += value_bit)
5656 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5657 << (i - HOST_BITS_PER_WIDE_INT));
5659 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5661 break;
5663 default:
5664 gcc_unreachable ();
5667 if (VECTOR_MODE_P (outermode))
5668 return gen_rtx_CONST_VECTOR (outermode, result_v);
5669 else
5670 return result_s;
5673 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5674 Return 0 if no simplifications are possible. */
5676 simplify_subreg (enum machine_mode outermode, rtx op,
5677 enum machine_mode innermode, unsigned int byte)
5679 /* Little bit of sanity checking. */
5680 gcc_assert (innermode != VOIDmode);
5681 gcc_assert (outermode != VOIDmode);
5682 gcc_assert (innermode != BLKmode);
5683 gcc_assert (outermode != BLKmode);
5685 gcc_assert (GET_MODE (op) == innermode
5686 || GET_MODE (op) == VOIDmode);
5688 if ((byte % GET_MODE_SIZE (outermode)) != 0)
5689 return NULL_RTX;
5691 if (byte >= GET_MODE_SIZE (innermode))
5692 return NULL_RTX;
5694 if (outermode == innermode && !byte)
5695 return op;
5697 if (CONST_SCALAR_INT_P (op)
5698 || CONST_DOUBLE_AS_FLOAT_P (op)
5699 || GET_CODE (op) == CONST_FIXED
5700 || GET_CODE (op) == CONST_VECTOR)
5701 return simplify_immed_subreg (outermode, op, innermode, byte);
5703 /* Changing mode twice with SUBREG => just change it once,
5704 or not at all if changing back op starting mode. */
5705 if (GET_CODE (op) == SUBREG)
5707 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5708 int final_offset = byte + SUBREG_BYTE (op);
5709 rtx newx;
5711 if (outermode == innermostmode
5712 && byte == 0 && SUBREG_BYTE (op) == 0)
5713 return SUBREG_REG (op);
5715 /* The SUBREG_BYTE represents offset, as if the value were stored
5716 in memory. Irritating exception is paradoxical subreg, where
5717 we define SUBREG_BYTE to be 0. On big endian machines, this
5718 value should be negative. For a moment, undo this exception. */
5719 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5721 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5722 if (WORDS_BIG_ENDIAN)
5723 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5724 if (BYTES_BIG_ENDIAN)
5725 final_offset += difference % UNITS_PER_WORD;
5727 if (SUBREG_BYTE (op) == 0
5728 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5730 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5731 if (WORDS_BIG_ENDIAN)
5732 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5733 if (BYTES_BIG_ENDIAN)
5734 final_offset += difference % UNITS_PER_WORD;
5737 /* See whether resulting subreg will be paradoxical. */
5738 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5740 /* In nonparadoxical subregs we can't handle negative offsets. */
5741 if (final_offset < 0)
5742 return NULL_RTX;
5743 /* Bail out in case resulting subreg would be incorrect. */
5744 if (final_offset % GET_MODE_SIZE (outermode)
5745 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5746 return NULL_RTX;
5748 else
5750 int offset = 0;
5751 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5753 /* In paradoxical subreg, see if we are still looking on lower part.
5754 If so, our SUBREG_BYTE will be 0. */
5755 if (WORDS_BIG_ENDIAN)
5756 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5757 if (BYTES_BIG_ENDIAN)
5758 offset += difference % UNITS_PER_WORD;
5759 if (offset == final_offset)
5760 final_offset = 0;
5761 else
5762 return NULL_RTX;
5765 /* Recurse for further possible simplifications. */
5766 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5767 final_offset);
5768 if (newx)
5769 return newx;
5770 if (validate_subreg (outermode, innermostmode,
5771 SUBREG_REG (op), final_offset))
5773 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5774 if (SUBREG_PROMOTED_VAR_P (op)
5775 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5776 && GET_MODE_CLASS (outermode) == MODE_INT
5777 && IN_RANGE (GET_MODE_SIZE (outermode),
5778 GET_MODE_SIZE (innermode),
5779 GET_MODE_SIZE (innermostmode))
5780 && subreg_lowpart_p (newx))
5782 SUBREG_PROMOTED_VAR_P (newx) = 1;
5783 SUBREG_PROMOTED_UNSIGNED_SET
5784 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5786 return newx;
5788 return NULL_RTX;
5791 /* SUBREG of a hard register => just change the register number
5792 and/or mode. If the hard register is not valid in that mode,
5793 suppress this simplification. If the hard register is the stack,
5794 frame, or argument pointer, leave this as a SUBREG. */
5796 if (REG_P (op) && HARD_REGISTER_P (op))
5798 unsigned int regno, final_regno;
5800 regno = REGNO (op);
5801 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5802 if (HARD_REGISTER_NUM_P (final_regno))
5804 rtx x;
5805 int final_offset = byte;
5807 /* Adjust offset for paradoxical subregs. */
5808 if (byte == 0
5809 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5811 int difference = (GET_MODE_SIZE (innermode)
5812 - GET_MODE_SIZE (outermode));
5813 if (WORDS_BIG_ENDIAN)
5814 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5815 if (BYTES_BIG_ENDIAN)
5816 final_offset += difference % UNITS_PER_WORD;
5819 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5821 /* Propagate original regno. We don't have any way to specify
5822 the offset inside original regno, so do so only for lowpart.
5823 The information is used only by alias analysis that can not
5824 grog partial register anyway. */
5826 if (subreg_lowpart_offset (outermode, innermode) == byte)
5827 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5828 return x;
5832 /* If we have a SUBREG of a register that we are replacing and we are
5833 replacing it with a MEM, make a new MEM and try replacing the
5834 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5835 or if we would be widening it. */
5837 if (MEM_P (op)
5838 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
5839 /* Allow splitting of volatile memory references in case we don't
5840 have instruction to move the whole thing. */
5841 && (! MEM_VOLATILE_P (op)
5842 || ! have_insn_for (SET, innermode))
5843 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5844 return adjust_address_nv (op, outermode, byte);
5846 /* Handle complex values represented as CONCAT
5847 of real and imaginary part. */
5848 if (GET_CODE (op) == CONCAT)
5850 unsigned int part_size, final_offset;
5851 rtx part, res;
5853 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5854 if (byte < part_size)
5856 part = XEXP (op, 0);
5857 final_offset = byte;
5859 else
5861 part = XEXP (op, 1);
5862 final_offset = byte - part_size;
5865 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5866 return NULL_RTX;
5868 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5869 if (res)
5870 return res;
5871 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5872 return gen_rtx_SUBREG (outermode, part, final_offset);
5873 return NULL_RTX;
5876 /* A SUBREG resulting from a zero extension may fold to zero if
5877 it extracts higher bits that the ZERO_EXTEND's source bits. */
5878 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
5880 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5881 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
5882 return CONST0_RTX (outermode);
5885 if (SCALAR_INT_MODE_P (outermode)
5886 && SCALAR_INT_MODE_P (innermode)
5887 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5888 && byte == subreg_lowpart_offset (outermode, innermode))
5890 rtx tem = simplify_truncation (outermode, op, innermode);
5891 if (tem)
5892 return tem;
5895 return NULL_RTX;
5898 /* Make a SUBREG operation or equivalent if it folds. */
5901 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5902 enum machine_mode innermode, unsigned int byte)
5904 rtx newx;
5906 newx = simplify_subreg (outermode, op, innermode, byte);
5907 if (newx)
5908 return newx;
5910 if (GET_CODE (op) == SUBREG
5911 || GET_CODE (op) == CONCAT
5912 || GET_MODE (op) == VOIDmode)
5913 return NULL_RTX;
5915 if (validate_subreg (outermode, innermode, op, byte))
5916 return gen_rtx_SUBREG (outermode, op, byte);
5918 return NULL_RTX;
5921 /* Simplify X, an rtx expression.
5923 Return the simplified expression or NULL if no simplifications
5924 were possible.
5926 This is the preferred entry point into the simplification routines;
5927 however, we still allow passes to call the more specific routines.
5929 Right now GCC has three (yes, three) major bodies of RTL simplification
5930 code that need to be unified.
5932 1. fold_rtx in cse.c. This code uses various CSE specific
5933 information to aid in RTL simplification.
5935 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5936 it uses combine specific information to aid in RTL
5937 simplification.
5939 3. The routines in this file.
5942 Long term we want to only have one body of simplification code; to
5943 get to that state I recommend the following steps:
5945 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5946 which are not pass dependent state into these routines.
5948 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5949 use this routine whenever possible.
5951 3. Allow for pass dependent state to be provided to these
5952 routines and add simplifications based on the pass dependent
5953 state. Remove code from cse.c & combine.c that becomes
5954 redundant/dead.
5956 It will take time, but ultimately the compiler will be easier to
5957 maintain and improve. It's totally silly that when we add a
5958 simplification that it needs to be added to 4 places (3 for RTL
5959 simplification and 1 for tree simplification. */
5962 simplify_rtx (const_rtx x)
5964 const enum rtx_code code = GET_CODE (x);
5965 const enum machine_mode mode = GET_MODE (x);
5967 switch (GET_RTX_CLASS (code))
5969 case RTX_UNARY:
5970 return simplify_unary_operation (code, mode,
5971 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5972 case RTX_COMM_ARITH:
5973 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5974 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5976 /* Fall through.... */
5978 case RTX_BIN_ARITH:
5979 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5981 case RTX_TERNARY:
5982 case RTX_BITFIELD_OPS:
5983 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5984 XEXP (x, 0), XEXP (x, 1),
5985 XEXP (x, 2));
5987 case RTX_COMPARE:
5988 case RTX_COMM_COMPARE:
5989 return simplify_relational_operation (code, mode,
5990 ((GET_MODE (XEXP (x, 0))
5991 != VOIDmode)
5992 ? GET_MODE (XEXP (x, 0))
5993 : GET_MODE (XEXP (x, 1))),
5994 XEXP (x, 0),
5995 XEXP (x, 1));
5997 case RTX_EXTRA:
5998 if (code == SUBREG)
5999 return simplify_subreg (mode, SUBREG_REG (x),
6000 GET_MODE (SUBREG_REG (x)),
6001 SUBREG_BYTE (x));
6002 break;
6004 case RTX_OBJ:
6005 if (code == LO_SUM)
6007 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6008 if (GET_CODE (XEXP (x, 0)) == HIGH
6009 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6010 return XEXP (x, 1);
6012 break;
6014 default:
6015 break;
6017 return NULL;