PR debug/54693
[official-gcc.git] / gcc / simplify-rtx.c
blobcb63b80500e52296716644f208272b997e130f25
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011, 2012 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "insn-config.h"
34 #include "recog.h"
35 #include "function.h"
36 #include "expr.h"
37 #include "diagnostic-core.h"
38 #include "ggc.h"
39 #include "target.h"
41 /* Simplification and canonicalization of RTL. */
43 /* Much code operates on (low, high) pairs; the low value is an
44 unsigned wide int, the high value a signed wide int. We
45 occasionally need to sign extend from low to high as if low were a
46 signed wide int. */
47 #define HWI_SIGN_EXTEND(low) \
48 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
50 static rtx neg_const_int (enum machine_mode, const_rtx);
51 static bool plus_minus_operand_p (const_rtx);
52 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
53 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
54 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
55 unsigned int);
56 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
57 rtx, rtx);
58 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
59 enum machine_mode, rtx, rtx);
60 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
61 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
62 rtx, rtx, rtx, rtx);
64 /* Negate a CONST_INT rtx, truncating (because a conversion from a
65 maximally negative number can overflow). */
66 static rtx
67 neg_const_int (enum machine_mode mode, const_rtx i)
69 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
75 bool
76 mode_signbit_p (enum machine_mode mode, const_rtx x)
78 unsigned HOST_WIDE_INT val;
79 unsigned int width;
81 if (GET_MODE_CLASS (mode) != MODE_INT)
82 return false;
84 width = GET_MODE_PRECISION (mode);
85 if (width == 0)
86 return false;
88 if (width <= HOST_BITS_PER_WIDE_INT
89 && CONST_INT_P (x))
90 val = INTVAL (x);
91 else if (width <= HOST_BITS_PER_DOUBLE_INT
92 && CONST_DOUBLE_AS_INT_P (x)
93 && CONST_DOUBLE_LOW (x) == 0)
95 val = CONST_DOUBLE_HIGH (x);
96 width -= HOST_BITS_PER_WIDE_INT;
98 else
99 /* FIXME: We don't yet have a representation for wider modes. */
100 return false;
102 if (width < HOST_BITS_PER_WIDE_INT)
103 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
104 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
107 /* Test whether VAL is equal to the most significant bit of mode MODE
108 (after masking with the mode mask of MODE). Returns false if the
109 precision of MODE is too large to handle. */
111 bool
112 val_signbit_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
114 unsigned int width;
116 if (GET_MODE_CLASS (mode) != MODE_INT)
117 return false;
119 width = GET_MODE_PRECISION (mode);
120 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
121 return false;
123 val &= GET_MODE_MASK (mode);
124 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
127 /* Test whether the most significant bit of mode MODE is set in VAL.
128 Returns false if the precision of MODE is too large to handle. */
129 bool
130 val_signbit_known_set_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
132 unsigned int width;
134 if (GET_MODE_CLASS (mode) != MODE_INT)
135 return false;
137 width = GET_MODE_PRECISION (mode);
138 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
139 return false;
141 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
142 return val != 0;
145 /* Test whether the most significant bit of mode MODE is clear in VAL.
146 Returns false if the precision of MODE is too large to handle. */
147 bool
148 val_signbit_known_clear_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
150 unsigned int width;
152 if (GET_MODE_CLASS (mode) != MODE_INT)
153 return false;
155 width = GET_MODE_PRECISION (mode);
156 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
157 return false;
159 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
160 return val == 0;
163 /* Make a binary operation by properly ordering the operands and
164 seeing if the expression folds. */
167 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
168 rtx op1)
170 rtx tem;
172 /* If this simplifies, do it. */
173 tem = simplify_binary_operation (code, mode, op0, op1);
174 if (tem)
175 return tem;
177 /* Put complex operands first and constants second if commutative. */
178 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
179 && swap_commutative_operands_p (op0, op1))
180 tem = op0, op0 = op1, op1 = tem;
182 return gen_rtx_fmt_ee (code, mode, op0, op1);
185 /* If X is a MEM referencing the constant pool, return the real value.
186 Otherwise return X. */
188 avoid_constant_pool_reference (rtx x)
190 rtx c, tmp, addr;
191 enum machine_mode cmode;
192 HOST_WIDE_INT offset = 0;
194 switch (GET_CODE (x))
196 case MEM:
197 break;
199 case FLOAT_EXTEND:
200 /* Handle float extensions of constant pool references. */
201 tmp = XEXP (x, 0);
202 c = avoid_constant_pool_reference (tmp);
203 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
205 REAL_VALUE_TYPE d;
207 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
208 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
210 return x;
212 default:
213 return x;
216 if (GET_MODE (x) == BLKmode)
217 return x;
219 addr = XEXP (x, 0);
221 /* Call target hook to avoid the effects of -fpic etc.... */
222 addr = targetm.delegitimize_address (addr);
224 /* Split the address into a base and integer offset. */
225 if (GET_CODE (addr) == CONST
226 && GET_CODE (XEXP (addr, 0)) == PLUS
227 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
229 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
230 addr = XEXP (XEXP (addr, 0), 0);
233 if (GET_CODE (addr) == LO_SUM)
234 addr = XEXP (addr, 1);
236 /* If this is a constant pool reference, we can turn it into its
237 constant and hope that simplifications happen. */
238 if (GET_CODE (addr) == SYMBOL_REF
239 && CONSTANT_POOL_ADDRESS_P (addr))
241 c = get_pool_constant (addr);
242 cmode = get_pool_mode (addr);
244 /* If we're accessing the constant in a different mode than it was
245 originally stored, attempt to fix that up via subreg simplifications.
246 If that fails we have no choice but to return the original memory. */
247 if (offset != 0 || cmode != GET_MODE (x))
249 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
250 if (tem && CONSTANT_P (tem))
251 return tem;
253 else
254 return c;
257 return x;
260 /* Simplify a MEM based on its attributes. This is the default
261 delegitimize_address target hook, and it's recommended that every
262 overrider call it. */
265 delegitimize_mem_from_attrs (rtx x)
267 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
268 use their base addresses as equivalent. */
269 if (MEM_P (x)
270 && MEM_EXPR (x)
271 && MEM_OFFSET_KNOWN_P (x))
273 tree decl = MEM_EXPR (x);
274 enum machine_mode mode = GET_MODE (x);
275 HOST_WIDE_INT offset = 0;
277 switch (TREE_CODE (decl))
279 default:
280 decl = NULL;
281 break;
283 case VAR_DECL:
284 break;
286 case ARRAY_REF:
287 case ARRAY_RANGE_REF:
288 case COMPONENT_REF:
289 case BIT_FIELD_REF:
290 case REALPART_EXPR:
291 case IMAGPART_EXPR:
292 case VIEW_CONVERT_EXPR:
294 HOST_WIDE_INT bitsize, bitpos;
295 tree toffset;
296 int unsignedp, volatilep = 0;
298 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
299 &mode, &unsignedp, &volatilep, false);
300 if (bitsize != GET_MODE_BITSIZE (mode)
301 || (bitpos % BITS_PER_UNIT)
302 || (toffset && !host_integerp (toffset, 0)))
303 decl = NULL;
304 else
306 offset += bitpos / BITS_PER_UNIT;
307 if (toffset)
308 offset += TREE_INT_CST_LOW (toffset);
310 break;
314 if (decl
315 && mode == GET_MODE (x)
316 && TREE_CODE (decl) == VAR_DECL
317 && (TREE_STATIC (decl)
318 || DECL_THREAD_LOCAL_P (decl))
319 && DECL_RTL_SET_P (decl)
320 && MEM_P (DECL_RTL (decl)))
322 rtx newx;
324 offset += MEM_OFFSET (x);
326 newx = DECL_RTL (decl);
328 if (MEM_P (newx))
330 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
332 /* Avoid creating a new MEM needlessly if we already had
333 the same address. We do if there's no OFFSET and the
334 old address X is identical to NEWX, or if X is of the
335 form (plus NEWX OFFSET), or the NEWX is of the form
336 (plus Y (const_int Z)) and X is that with the offset
337 added: (plus Y (const_int Z+OFFSET)). */
338 if (!((offset == 0
339 || (GET_CODE (o) == PLUS
340 && GET_CODE (XEXP (o, 1)) == CONST_INT
341 && (offset == INTVAL (XEXP (o, 1))
342 || (GET_CODE (n) == PLUS
343 && GET_CODE (XEXP (n, 1)) == CONST_INT
344 && (INTVAL (XEXP (n, 1)) + offset
345 == INTVAL (XEXP (o, 1)))
346 && (n = XEXP (n, 0))))
347 && (o = XEXP (o, 0))))
348 && rtx_equal_p (o, n)))
349 x = adjust_address_nv (newx, mode, offset);
351 else if (GET_MODE (x) == GET_MODE (newx)
352 && offset == 0)
353 x = newx;
357 return x;
360 /* Make a unary operation by first seeing if it folds and otherwise making
361 the specified operation. */
364 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
365 enum machine_mode op_mode)
367 rtx tem;
369 /* If this simplifies, use it. */
370 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
371 return tem;
373 return gen_rtx_fmt_e (code, mode, op);
376 /* Likewise for ternary operations. */
379 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
380 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
382 rtx tem;
384 /* If this simplifies, use it. */
385 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
386 op0, op1, op2)))
387 return tem;
389 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
392 /* Likewise, for relational operations.
393 CMP_MODE specifies mode comparison is done in. */
396 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
397 enum machine_mode cmp_mode, rtx op0, rtx op1)
399 rtx tem;
401 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
402 op0, op1)))
403 return tem;
405 return gen_rtx_fmt_ee (code, mode, op0, op1);
408 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
409 and simplify the result. If FN is non-NULL, call this callback on each
410 X, if it returns non-NULL, replace X with its return value and simplify the
411 result. */
414 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
415 rtx (*fn) (rtx, const_rtx, void *), void *data)
417 enum rtx_code code = GET_CODE (x);
418 enum machine_mode mode = GET_MODE (x);
419 enum machine_mode op_mode;
420 const char *fmt;
421 rtx op0, op1, op2, newx, op;
422 rtvec vec, newvec;
423 int i, j;
425 if (__builtin_expect (fn != NULL, 0))
427 newx = fn (x, old_rtx, data);
428 if (newx)
429 return newx;
431 else if (rtx_equal_p (x, old_rtx))
432 return copy_rtx ((rtx) data);
434 switch (GET_RTX_CLASS (code))
436 case RTX_UNARY:
437 op0 = XEXP (x, 0);
438 op_mode = GET_MODE (op0);
439 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
440 if (op0 == XEXP (x, 0))
441 return x;
442 return simplify_gen_unary (code, mode, op0, op_mode);
444 case RTX_BIN_ARITH:
445 case RTX_COMM_ARITH:
446 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
447 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
448 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
449 return x;
450 return simplify_gen_binary (code, mode, op0, op1);
452 case RTX_COMPARE:
453 case RTX_COMM_COMPARE:
454 op0 = XEXP (x, 0);
455 op1 = XEXP (x, 1);
456 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
457 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
458 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
459 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
460 return x;
461 return simplify_gen_relational (code, mode, op_mode, op0, op1);
463 case RTX_TERNARY:
464 case RTX_BITFIELD_OPS:
465 op0 = XEXP (x, 0);
466 op_mode = GET_MODE (op0);
467 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
468 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
469 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
470 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
471 return x;
472 if (op_mode == VOIDmode)
473 op_mode = GET_MODE (op0);
474 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
476 case RTX_EXTRA:
477 if (code == SUBREG)
479 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
480 if (op0 == SUBREG_REG (x))
481 return x;
482 op0 = simplify_gen_subreg (GET_MODE (x), op0,
483 GET_MODE (SUBREG_REG (x)),
484 SUBREG_BYTE (x));
485 return op0 ? op0 : x;
487 break;
489 case RTX_OBJ:
490 if (code == MEM)
492 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
493 if (op0 == XEXP (x, 0))
494 return x;
495 return replace_equiv_address_nv (x, op0);
497 else if (code == LO_SUM)
499 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
500 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
502 /* (lo_sum (high x) x) -> x */
503 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
504 return op1;
506 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
507 return x;
508 return gen_rtx_LO_SUM (mode, op0, op1);
510 break;
512 default:
513 break;
516 newx = x;
517 fmt = GET_RTX_FORMAT (code);
518 for (i = 0; fmt[i]; i++)
519 switch (fmt[i])
521 case 'E':
522 vec = XVEC (x, i);
523 newvec = XVEC (newx, i);
524 for (j = 0; j < GET_NUM_ELEM (vec); j++)
526 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
527 old_rtx, fn, data);
528 if (op != RTVEC_ELT (vec, j))
530 if (newvec == vec)
532 newvec = shallow_copy_rtvec (vec);
533 if (x == newx)
534 newx = shallow_copy_rtx (x);
535 XVEC (newx, i) = newvec;
537 RTVEC_ELT (newvec, j) = op;
540 break;
542 case 'e':
543 if (XEXP (x, i))
545 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
546 if (op != XEXP (x, i))
548 if (x == newx)
549 newx = shallow_copy_rtx (x);
550 XEXP (newx, i) = op;
553 break;
555 return newx;
558 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
559 resulting RTX. Return a new RTX which is as simplified as possible. */
562 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
564 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
567 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
568 Only handle cases where the truncated value is inherently an rvalue.
570 RTL provides two ways of truncating a value:
572 1. a lowpart subreg. This form is only a truncation when both
573 the outer and inner modes (here MODE and OP_MODE respectively)
574 are scalar integers, and only then when the subreg is used as
575 an rvalue.
577 It is only valid to form such truncating subregs if the
578 truncation requires no action by the target. The onus for
579 proving this is on the creator of the subreg -- e.g. the
580 caller to simplify_subreg or simplify_gen_subreg -- and typically
581 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
583 2. a TRUNCATE. This form handles both scalar and compound integers.
585 The first form is preferred where valid. However, the TRUNCATE
586 handling in simplify_unary_operation turns the second form into the
587 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
588 so it is generally safe to form rvalue truncations using:
590 simplify_gen_unary (TRUNCATE, ...)
592 and leave simplify_unary_operation to work out which representation
593 should be used.
595 Because of the proof requirements on (1), simplify_truncation must
596 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
597 regardless of whether the outer truncation came from a SUBREG or a
598 TRUNCATE. For example, if the caller has proven that an SImode
599 truncation of:
601 (and:DI X Y)
603 is a no-op and can be represented as a subreg, it does not follow
604 that SImode truncations of X and Y are also no-ops. On a target
605 like 64-bit MIPS that requires SImode values to be stored in
606 sign-extended form, an SImode truncation of:
608 (and:DI (reg:DI X) (const_int 63))
610 is trivially a no-op because only the lower 6 bits can be set.
611 However, X is still an arbitrary 64-bit number and so we cannot
612 assume that truncating it too is a no-op. */
614 static rtx
615 simplify_truncation (enum machine_mode mode, rtx op,
616 enum machine_mode op_mode)
618 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
619 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
620 gcc_assert (precision <= op_precision);
622 /* Optimize truncations of zero and sign extended values. */
623 if (GET_CODE (op) == ZERO_EXTEND
624 || GET_CODE (op) == SIGN_EXTEND)
626 /* There are three possibilities. If MODE is the same as the
627 origmode, we can omit both the extension and the subreg.
628 If MODE is not larger than the origmode, we can apply the
629 truncation without the extension. Finally, if the outermode
630 is larger than the origmode, we can just extend to the appropriate
631 mode. */
632 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
633 if (mode == origmode)
634 return XEXP (op, 0);
635 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
636 return simplify_gen_unary (TRUNCATE, mode,
637 XEXP (op, 0), origmode);
638 else
639 return simplify_gen_unary (GET_CODE (op), mode,
640 XEXP (op, 0), origmode);
643 /* Simplify (truncate:SI (op:DI (x:DI) (y:DI)))
644 to (op:SI (truncate:SI (x:DI)) (truncate:SI (x:DI))). */
645 if (GET_CODE (op) == PLUS
646 || GET_CODE (op) == MINUS
647 || GET_CODE (op) == MULT)
649 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
650 if (op0)
652 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
653 if (op1)
654 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
658 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
659 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
660 the outer subreg is effectively a truncation to the original mode. */
661 if ((GET_CODE (op) == LSHIFTRT
662 || GET_CODE (op) == ASHIFTRT)
663 /* Ensure that OP_MODE is at least twice as wide as MODE
664 to avoid the possibility that an outer LSHIFTRT shifts by more
665 than the sign extension's sign_bit_copies and introduces zeros
666 into the high bits of the result. */
667 && 2 * precision <= op_precision
668 && CONST_INT_P (XEXP (op, 1))
669 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
670 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
671 && UINTVAL (XEXP (op, 1)) < precision)
672 return simplify_gen_binary (ASHIFTRT, mode,
673 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
675 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
676 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
677 the outer subreg is effectively a truncation to the original mode. */
678 if ((GET_CODE (op) == LSHIFTRT
679 || GET_CODE (op) == ASHIFTRT)
680 && CONST_INT_P (XEXP (op, 1))
681 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
682 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
683 && UINTVAL (XEXP (op, 1)) < precision)
684 return simplify_gen_binary (LSHIFTRT, mode,
685 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
687 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
688 to (ashift:QI (x:QI) C), where C is a suitable small constant and
689 the outer subreg is effectively a truncation to the original mode. */
690 if (GET_CODE (op) == ASHIFT
691 && CONST_INT_P (XEXP (op, 1))
692 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
693 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
694 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
695 && UINTVAL (XEXP (op, 1)) < precision)
696 return simplify_gen_binary (ASHIFT, mode,
697 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
699 /* Recognize a word extraction from a multi-word subreg. */
700 if ((GET_CODE (op) == LSHIFTRT
701 || GET_CODE (op) == ASHIFTRT)
702 && SCALAR_INT_MODE_P (mode)
703 && SCALAR_INT_MODE_P (op_mode)
704 && precision >= BITS_PER_WORD
705 && 2 * precision <= op_precision
706 && CONST_INT_P (XEXP (op, 1))
707 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
708 && UINTVAL (XEXP (op, 1)) < op_precision)
710 int byte = subreg_lowpart_offset (mode, op_mode);
711 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
712 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
713 (WORDS_BIG_ENDIAN
714 ? byte - shifted_bytes
715 : byte + shifted_bytes));
718 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
719 and try replacing the TRUNCATE and shift with it. Don't do this
720 if the MEM has a mode-dependent address. */
721 if ((GET_CODE (op) == LSHIFTRT
722 || GET_CODE (op) == ASHIFTRT)
723 && SCALAR_INT_MODE_P (op_mode)
724 && MEM_P (XEXP (op, 0))
725 && CONST_INT_P (XEXP (op, 1))
726 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
727 && INTVAL (XEXP (op, 1)) > 0
728 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
729 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
730 MEM_ADDR_SPACE (XEXP (op, 0)))
731 && ! MEM_VOLATILE_P (XEXP (op, 0))
732 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
733 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
735 int byte = subreg_lowpart_offset (mode, op_mode);
736 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
737 return adjust_address_nv (XEXP (op, 0), mode,
738 (WORDS_BIG_ENDIAN
739 ? byte - shifted_bytes
740 : byte + shifted_bytes));
743 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
744 (OP:SI foo:SI) if OP is NEG or ABS. */
745 if ((GET_CODE (op) == ABS
746 || GET_CODE (op) == NEG)
747 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
748 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
749 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
750 return simplify_gen_unary (GET_CODE (op), mode,
751 XEXP (XEXP (op, 0), 0), mode);
753 /* (truncate:A (subreg:B (truncate:C X) 0)) is
754 (truncate:A X). */
755 if (GET_CODE (op) == SUBREG
756 && SCALAR_INT_MODE_P (mode)
757 && SCALAR_INT_MODE_P (op_mode)
758 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
759 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
760 && subreg_lowpart_p (op))
761 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
762 GET_MODE (XEXP (SUBREG_REG (op), 0)));
764 /* (truncate:A (truncate:B X)) is (truncate:A X). */
765 if (GET_CODE (op) == TRUNCATE)
766 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
767 GET_MODE (XEXP (op, 0)));
769 return NULL_RTX;
772 /* Try to simplify a unary operation CODE whose output mode is to be
773 MODE with input operand OP whose mode was originally OP_MODE.
774 Return zero if no simplification can be made. */
776 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
777 rtx op, enum machine_mode op_mode)
779 rtx trueop, tem;
781 trueop = avoid_constant_pool_reference (op);
783 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
784 if (tem)
785 return tem;
787 return simplify_unary_operation_1 (code, mode, op);
790 /* Perform some simplifications we can do even if the operands
791 aren't constant. */
792 static rtx
793 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
795 enum rtx_code reversed;
796 rtx temp;
798 switch (code)
800 case NOT:
801 /* (not (not X)) == X. */
802 if (GET_CODE (op) == NOT)
803 return XEXP (op, 0);
805 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
806 comparison is all ones. */
807 if (COMPARISON_P (op)
808 && (mode == BImode || STORE_FLAG_VALUE == -1)
809 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
810 return simplify_gen_relational (reversed, mode, VOIDmode,
811 XEXP (op, 0), XEXP (op, 1));
813 /* (not (plus X -1)) can become (neg X). */
814 if (GET_CODE (op) == PLUS
815 && XEXP (op, 1) == constm1_rtx)
816 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
818 /* Similarly, (not (neg X)) is (plus X -1). */
819 if (GET_CODE (op) == NEG)
820 return plus_constant (mode, XEXP (op, 0), -1);
822 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
823 if (GET_CODE (op) == XOR
824 && CONST_INT_P (XEXP (op, 1))
825 && (temp = simplify_unary_operation (NOT, mode,
826 XEXP (op, 1), mode)) != 0)
827 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
829 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
830 if (GET_CODE (op) == PLUS
831 && CONST_INT_P (XEXP (op, 1))
832 && mode_signbit_p (mode, XEXP (op, 1))
833 && (temp = simplify_unary_operation (NOT, mode,
834 XEXP (op, 1), mode)) != 0)
835 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
838 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
839 operands other than 1, but that is not valid. We could do a
840 similar simplification for (not (lshiftrt C X)) where C is
841 just the sign bit, but this doesn't seem common enough to
842 bother with. */
843 if (GET_CODE (op) == ASHIFT
844 && XEXP (op, 0) == const1_rtx)
846 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
847 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
850 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
851 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
852 so we can perform the above simplification. */
854 if (STORE_FLAG_VALUE == -1
855 && GET_CODE (op) == ASHIFTRT
856 && GET_CODE (XEXP (op, 1))
857 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
858 return simplify_gen_relational (GE, mode, VOIDmode,
859 XEXP (op, 0), const0_rtx);
862 if (GET_CODE (op) == SUBREG
863 && subreg_lowpart_p (op)
864 && (GET_MODE_SIZE (GET_MODE (op))
865 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
866 && GET_CODE (SUBREG_REG (op)) == ASHIFT
867 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
869 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
870 rtx x;
872 x = gen_rtx_ROTATE (inner_mode,
873 simplify_gen_unary (NOT, inner_mode, const1_rtx,
874 inner_mode),
875 XEXP (SUBREG_REG (op), 1));
876 return rtl_hooks.gen_lowpart_no_emit (mode, x);
879 /* Apply De Morgan's laws to reduce number of patterns for machines
880 with negating logical insns (and-not, nand, etc.). If result has
881 only one NOT, put it first, since that is how the patterns are
882 coded. */
884 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
886 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
887 enum machine_mode op_mode;
889 op_mode = GET_MODE (in1);
890 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
892 op_mode = GET_MODE (in2);
893 if (op_mode == VOIDmode)
894 op_mode = mode;
895 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
897 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
899 rtx tem = in2;
900 in2 = in1; in1 = tem;
903 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
904 mode, in1, in2);
906 break;
908 case NEG:
909 /* (neg (neg X)) == X. */
910 if (GET_CODE (op) == NEG)
911 return XEXP (op, 0);
913 /* (neg (plus X 1)) can become (not X). */
914 if (GET_CODE (op) == PLUS
915 && XEXP (op, 1) == const1_rtx)
916 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
918 /* Similarly, (neg (not X)) is (plus X 1). */
919 if (GET_CODE (op) == NOT)
920 return plus_constant (mode, XEXP (op, 0), 1);
922 /* (neg (minus X Y)) can become (minus Y X). This transformation
923 isn't safe for modes with signed zeros, since if X and Y are
924 both +0, (minus Y X) is the same as (minus X Y). If the
925 rounding mode is towards +infinity (or -infinity) then the two
926 expressions will be rounded differently. */
927 if (GET_CODE (op) == MINUS
928 && !HONOR_SIGNED_ZEROS (mode)
929 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
930 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
932 if (GET_CODE (op) == PLUS
933 && !HONOR_SIGNED_ZEROS (mode)
934 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
936 /* (neg (plus A C)) is simplified to (minus -C A). */
937 if (CONST_INT_P (XEXP (op, 1))
938 || CONST_DOUBLE_P (XEXP (op, 1)))
940 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
941 if (temp)
942 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
945 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
946 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
947 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
950 /* (neg (mult A B)) becomes (mult A (neg B)).
951 This works even for floating-point values. */
952 if (GET_CODE (op) == MULT
953 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
955 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
956 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
959 /* NEG commutes with ASHIFT since it is multiplication. Only do
960 this if we can then eliminate the NEG (e.g., if the operand
961 is a constant). */
962 if (GET_CODE (op) == ASHIFT)
964 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
965 if (temp)
966 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
969 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
970 C is equal to the width of MODE minus 1. */
971 if (GET_CODE (op) == ASHIFTRT
972 && CONST_INT_P (XEXP (op, 1))
973 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
974 return simplify_gen_binary (LSHIFTRT, mode,
975 XEXP (op, 0), XEXP (op, 1));
977 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
978 C is equal to the width of MODE minus 1. */
979 if (GET_CODE (op) == LSHIFTRT
980 && CONST_INT_P (XEXP (op, 1))
981 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
982 return simplify_gen_binary (ASHIFTRT, mode,
983 XEXP (op, 0), XEXP (op, 1));
985 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
986 if (GET_CODE (op) == XOR
987 && XEXP (op, 1) == const1_rtx
988 && nonzero_bits (XEXP (op, 0), mode) == 1)
989 return plus_constant (mode, XEXP (op, 0), -1);
991 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
992 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
993 if (GET_CODE (op) == LT
994 && XEXP (op, 1) == const0_rtx
995 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
997 enum machine_mode inner = GET_MODE (XEXP (op, 0));
998 int isize = GET_MODE_PRECISION (inner);
999 if (STORE_FLAG_VALUE == 1)
1001 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1002 GEN_INT (isize - 1));
1003 if (mode == inner)
1004 return temp;
1005 if (GET_MODE_PRECISION (mode) > isize)
1006 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1007 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1009 else if (STORE_FLAG_VALUE == -1)
1011 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1012 GEN_INT (isize - 1));
1013 if (mode == inner)
1014 return temp;
1015 if (GET_MODE_PRECISION (mode) > isize)
1016 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1017 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1020 break;
1022 case TRUNCATE:
1023 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1024 with the umulXi3_highpart patterns. */
1025 if (GET_CODE (op) == LSHIFTRT
1026 && GET_CODE (XEXP (op, 0)) == MULT)
1027 break;
1029 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1031 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1032 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1033 /* We can't handle truncation to a partial integer mode here
1034 because we don't know the real bitsize of the partial
1035 integer mode. */
1036 break;
1039 if (GET_MODE (op) != VOIDmode)
1041 temp = simplify_truncation (mode, op, GET_MODE (op));
1042 if (temp)
1043 return temp;
1046 /* If we know that the value is already truncated, we can
1047 replace the TRUNCATE with a SUBREG. */
1048 if (GET_MODE_NUNITS (mode) == 1
1049 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1050 || truncated_to_mode (mode, op)))
1051 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1053 /* A truncate of a comparison can be replaced with a subreg if
1054 STORE_FLAG_VALUE permits. This is like the previous test,
1055 but it works even if the comparison is done in a mode larger
1056 than HOST_BITS_PER_WIDE_INT. */
1057 if (HWI_COMPUTABLE_MODE_P (mode)
1058 && COMPARISON_P (op)
1059 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1060 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1062 /* A truncate of a memory is just loading the low part of the memory
1063 if we are not changing the meaning of the address. */
1064 if (GET_CODE (op) == MEM
1065 && !VECTOR_MODE_P (mode)
1066 && !MEM_VOLATILE_P (op)
1067 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1068 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1070 break;
1072 case FLOAT_TRUNCATE:
1073 if (DECIMAL_FLOAT_MODE_P (mode))
1074 break;
1076 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1077 if (GET_CODE (op) == FLOAT_EXTEND
1078 && GET_MODE (XEXP (op, 0)) == mode)
1079 return XEXP (op, 0);
1081 /* (float_truncate:SF (float_truncate:DF foo:XF))
1082 = (float_truncate:SF foo:XF).
1083 This may eliminate double rounding, so it is unsafe.
1085 (float_truncate:SF (float_extend:XF foo:DF))
1086 = (float_truncate:SF foo:DF).
1088 (float_truncate:DF (float_extend:XF foo:SF))
1089 = (float_extend:SF foo:DF). */
1090 if ((GET_CODE (op) == FLOAT_TRUNCATE
1091 && flag_unsafe_math_optimizations)
1092 || GET_CODE (op) == FLOAT_EXTEND)
1093 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1094 0)))
1095 > GET_MODE_SIZE (mode)
1096 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1097 mode,
1098 XEXP (op, 0), mode);
1100 /* (float_truncate (float x)) is (float x) */
1101 if (GET_CODE (op) == FLOAT
1102 && (flag_unsafe_math_optimizations
1103 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
1104 && ((unsigned)significand_size (GET_MODE (op))
1105 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1106 - num_sign_bit_copies (XEXP (op, 0),
1107 GET_MODE (XEXP (op, 0))))))))
1108 return simplify_gen_unary (FLOAT, mode,
1109 XEXP (op, 0),
1110 GET_MODE (XEXP (op, 0)));
1112 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1113 (OP:SF foo:SF) if OP is NEG or ABS. */
1114 if ((GET_CODE (op) == ABS
1115 || GET_CODE (op) == NEG)
1116 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1117 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1118 return simplify_gen_unary (GET_CODE (op), mode,
1119 XEXP (XEXP (op, 0), 0), mode);
1121 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1122 is (float_truncate:SF x). */
1123 if (GET_CODE (op) == SUBREG
1124 && subreg_lowpart_p (op)
1125 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1126 return SUBREG_REG (op);
1127 break;
1129 case FLOAT_EXTEND:
1130 if (DECIMAL_FLOAT_MODE_P (mode))
1131 break;
1133 /* (float_extend (float_extend x)) is (float_extend x)
1135 (float_extend (float x)) is (float x) assuming that double
1136 rounding can't happen.
1138 if (GET_CODE (op) == FLOAT_EXTEND
1139 || (GET_CODE (op) == FLOAT
1140 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1141 && ((unsigned)significand_size (GET_MODE (op))
1142 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1143 - num_sign_bit_copies (XEXP (op, 0),
1144 GET_MODE (XEXP (op, 0)))))))
1145 return simplify_gen_unary (GET_CODE (op), mode,
1146 XEXP (op, 0),
1147 GET_MODE (XEXP (op, 0)));
1149 break;
1151 case ABS:
1152 /* (abs (neg <foo>)) -> (abs <foo>) */
1153 if (GET_CODE (op) == NEG)
1154 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1155 GET_MODE (XEXP (op, 0)));
1157 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1158 do nothing. */
1159 if (GET_MODE (op) == VOIDmode)
1160 break;
1162 /* If operand is something known to be positive, ignore the ABS. */
1163 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1164 || val_signbit_known_clear_p (GET_MODE (op),
1165 nonzero_bits (op, GET_MODE (op))))
1166 return op;
1168 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1169 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1170 return gen_rtx_NEG (mode, op);
1172 break;
1174 case FFS:
1175 /* (ffs (*_extend <X>)) = (ffs <X>) */
1176 if (GET_CODE (op) == SIGN_EXTEND
1177 || GET_CODE (op) == ZERO_EXTEND)
1178 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1179 GET_MODE (XEXP (op, 0)));
1180 break;
1182 case POPCOUNT:
1183 switch (GET_CODE (op))
1185 case BSWAP:
1186 case ZERO_EXTEND:
1187 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1188 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1189 GET_MODE (XEXP (op, 0)));
1191 case ROTATE:
1192 case ROTATERT:
1193 /* Rotations don't affect popcount. */
1194 if (!side_effects_p (XEXP (op, 1)))
1195 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1196 GET_MODE (XEXP (op, 0)));
1197 break;
1199 default:
1200 break;
1202 break;
1204 case PARITY:
1205 switch (GET_CODE (op))
1207 case NOT:
1208 case BSWAP:
1209 case ZERO_EXTEND:
1210 case SIGN_EXTEND:
1211 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1212 GET_MODE (XEXP (op, 0)));
1214 case ROTATE:
1215 case ROTATERT:
1216 /* Rotations don't affect parity. */
1217 if (!side_effects_p (XEXP (op, 1)))
1218 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1219 GET_MODE (XEXP (op, 0)));
1220 break;
1222 default:
1223 break;
1225 break;
1227 case BSWAP:
1228 /* (bswap (bswap x)) -> x. */
1229 if (GET_CODE (op) == BSWAP)
1230 return XEXP (op, 0);
1231 break;
1233 case FLOAT:
1234 /* (float (sign_extend <X>)) = (float <X>). */
1235 if (GET_CODE (op) == SIGN_EXTEND)
1236 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1237 GET_MODE (XEXP (op, 0)));
1238 break;
1240 case SIGN_EXTEND:
1241 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1242 becomes just the MINUS if its mode is MODE. This allows
1243 folding switch statements on machines using casesi (such as
1244 the VAX). */
1245 if (GET_CODE (op) == TRUNCATE
1246 && GET_MODE (XEXP (op, 0)) == mode
1247 && GET_CODE (XEXP (op, 0)) == MINUS
1248 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1249 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1250 return XEXP (op, 0);
1252 /* Extending a widening multiplication should be canonicalized to
1253 a wider widening multiplication. */
1254 if (GET_CODE (op) == MULT)
1256 rtx lhs = XEXP (op, 0);
1257 rtx rhs = XEXP (op, 1);
1258 enum rtx_code lcode = GET_CODE (lhs);
1259 enum rtx_code rcode = GET_CODE (rhs);
1261 /* Widening multiplies usually extend both operands, but sometimes
1262 they use a shift to extract a portion of a register. */
1263 if ((lcode == SIGN_EXTEND
1264 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1265 && (rcode == SIGN_EXTEND
1266 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1268 enum machine_mode lmode = GET_MODE (lhs);
1269 enum machine_mode rmode = GET_MODE (rhs);
1270 int bits;
1272 if (lcode == ASHIFTRT)
1273 /* Number of bits not shifted off the end. */
1274 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1275 else /* lcode == SIGN_EXTEND */
1276 /* Size of inner mode. */
1277 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1279 if (rcode == ASHIFTRT)
1280 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1281 else /* rcode == SIGN_EXTEND */
1282 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1284 /* We can only widen multiplies if the result is mathematiclly
1285 equivalent. I.e. if overflow was impossible. */
1286 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1287 return simplify_gen_binary
1288 (MULT, mode,
1289 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1290 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1294 /* Check for a sign extension of a subreg of a promoted
1295 variable, where the promotion is sign-extended, and the
1296 target mode is the same as the variable's promotion. */
1297 if (GET_CODE (op) == SUBREG
1298 && SUBREG_PROMOTED_VAR_P (op)
1299 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1300 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1301 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1303 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1304 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1305 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1307 gcc_assert (GET_MODE_BITSIZE (mode)
1308 > GET_MODE_BITSIZE (GET_MODE (op)));
1309 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1310 GET_MODE (XEXP (op, 0)));
1313 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1314 is (sign_extend:M (subreg:O <X>)) if there is mode with
1315 GET_MODE_BITSIZE (N) - I bits.
1316 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1317 is similarly (zero_extend:M (subreg:O <X>)). */
1318 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1319 && GET_CODE (XEXP (op, 0)) == ASHIFT
1320 && CONST_INT_P (XEXP (op, 1))
1321 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1322 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1324 enum machine_mode tmode
1325 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1326 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1327 gcc_assert (GET_MODE_BITSIZE (mode)
1328 > GET_MODE_BITSIZE (GET_MODE (op)));
1329 if (tmode != BLKmode)
1331 rtx inner =
1332 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1333 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1334 ? SIGN_EXTEND : ZERO_EXTEND,
1335 mode, inner, tmode);
1339 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1340 /* As we do not know which address space the pointer is referring to,
1341 we can do this only if the target does not support different pointer
1342 or address modes depending on the address space. */
1343 if (target_default_pointer_address_modes_p ()
1344 && ! POINTERS_EXTEND_UNSIGNED
1345 && mode == Pmode && GET_MODE (op) == ptr_mode
1346 && (CONSTANT_P (op)
1347 || (GET_CODE (op) == SUBREG
1348 && REG_P (SUBREG_REG (op))
1349 && REG_POINTER (SUBREG_REG (op))
1350 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1351 return convert_memory_address (Pmode, op);
1352 #endif
1353 break;
1355 case ZERO_EXTEND:
1356 /* Check for a zero extension of a subreg of a promoted
1357 variable, where the promotion is zero-extended, and the
1358 target mode is the same as the variable's promotion. */
1359 if (GET_CODE (op) == SUBREG
1360 && SUBREG_PROMOTED_VAR_P (op)
1361 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1362 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1363 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1365 /* Extending a widening multiplication should be canonicalized to
1366 a wider widening multiplication. */
1367 if (GET_CODE (op) == MULT)
1369 rtx lhs = XEXP (op, 0);
1370 rtx rhs = XEXP (op, 1);
1371 enum rtx_code lcode = GET_CODE (lhs);
1372 enum rtx_code rcode = GET_CODE (rhs);
1374 /* Widening multiplies usually extend both operands, but sometimes
1375 they use a shift to extract a portion of a register. */
1376 if ((lcode == ZERO_EXTEND
1377 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1378 && (rcode == ZERO_EXTEND
1379 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1381 enum machine_mode lmode = GET_MODE (lhs);
1382 enum machine_mode rmode = GET_MODE (rhs);
1383 int bits;
1385 if (lcode == LSHIFTRT)
1386 /* Number of bits not shifted off the end. */
1387 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1388 else /* lcode == ZERO_EXTEND */
1389 /* Size of inner mode. */
1390 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1392 if (rcode == LSHIFTRT)
1393 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1394 else /* rcode == ZERO_EXTEND */
1395 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1397 /* We can only widen multiplies if the result is mathematiclly
1398 equivalent. I.e. if overflow was impossible. */
1399 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1400 return simplify_gen_binary
1401 (MULT, mode,
1402 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1403 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1407 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1408 if (GET_CODE (op) == ZERO_EXTEND)
1409 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1410 GET_MODE (XEXP (op, 0)));
1412 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1413 is (zero_extend:M (subreg:O <X>)) if there is mode with
1414 GET_MODE_BITSIZE (N) - I bits. */
1415 if (GET_CODE (op) == LSHIFTRT
1416 && GET_CODE (XEXP (op, 0)) == ASHIFT
1417 && CONST_INT_P (XEXP (op, 1))
1418 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1419 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1421 enum machine_mode tmode
1422 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1423 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1424 if (tmode != BLKmode)
1426 rtx inner =
1427 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1428 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1432 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1433 /* As we do not know which address space the pointer is referring to,
1434 we can do this only if the target does not support different pointer
1435 or address modes depending on the address space. */
1436 if (target_default_pointer_address_modes_p ()
1437 && POINTERS_EXTEND_UNSIGNED > 0
1438 && mode == Pmode && GET_MODE (op) == ptr_mode
1439 && (CONSTANT_P (op)
1440 || (GET_CODE (op) == SUBREG
1441 && REG_P (SUBREG_REG (op))
1442 && REG_POINTER (SUBREG_REG (op))
1443 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1444 return convert_memory_address (Pmode, op);
1445 #endif
1446 break;
1448 default:
1449 break;
1452 return 0;
1455 /* Try to compute the value of a unary operation CODE whose output mode is to
1456 be MODE with input operand OP whose mode was originally OP_MODE.
1457 Return zero if the value cannot be computed. */
1459 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1460 rtx op, enum machine_mode op_mode)
1462 unsigned int width = GET_MODE_PRECISION (mode);
1463 unsigned int op_width = GET_MODE_PRECISION (op_mode);
1465 if (code == VEC_DUPLICATE)
1467 gcc_assert (VECTOR_MODE_P (mode));
1468 if (GET_MODE (op) != VOIDmode)
1470 if (!VECTOR_MODE_P (GET_MODE (op)))
1471 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1472 else
1473 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1474 (GET_MODE (op)));
1476 if (CONST_INT_P (op) || CONST_DOUBLE_P (op)
1477 || GET_CODE (op) == CONST_VECTOR)
1479 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1480 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1481 rtvec v = rtvec_alloc (n_elts);
1482 unsigned int i;
1484 if (GET_CODE (op) != CONST_VECTOR)
1485 for (i = 0; i < n_elts; i++)
1486 RTVEC_ELT (v, i) = op;
1487 else
1489 enum machine_mode inmode = GET_MODE (op);
1490 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1491 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1493 gcc_assert (in_n_elts < n_elts);
1494 gcc_assert ((n_elts % in_n_elts) == 0);
1495 for (i = 0; i < n_elts; i++)
1496 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1498 return gen_rtx_CONST_VECTOR (mode, v);
1502 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1504 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1505 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1506 enum machine_mode opmode = GET_MODE (op);
1507 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1508 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1509 rtvec v = rtvec_alloc (n_elts);
1510 unsigned int i;
1512 gcc_assert (op_n_elts == n_elts);
1513 for (i = 0; i < n_elts; i++)
1515 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1516 CONST_VECTOR_ELT (op, i),
1517 GET_MODE_INNER (opmode));
1518 if (!x)
1519 return 0;
1520 RTVEC_ELT (v, i) = x;
1522 return gen_rtx_CONST_VECTOR (mode, v);
1525 /* The order of these tests is critical so that, for example, we don't
1526 check the wrong mode (input vs. output) for a conversion operation,
1527 such as FIX. At some point, this should be simplified. */
1529 if (code == FLOAT && (CONST_DOUBLE_AS_INT_P (op) || CONST_INT_P (op)))
1531 HOST_WIDE_INT hv, lv;
1532 REAL_VALUE_TYPE d;
1534 if (CONST_INT_P (op))
1535 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1536 else
1537 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1539 REAL_VALUE_FROM_INT (d, lv, hv, mode);
1540 d = real_value_truncate (mode, d);
1541 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1543 else if (code == UNSIGNED_FLOAT
1544 && (CONST_DOUBLE_AS_INT_P (op) || CONST_INT_P (op)))
1546 HOST_WIDE_INT hv, lv;
1547 REAL_VALUE_TYPE d;
1549 if (CONST_INT_P (op))
1550 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1551 else
1552 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1554 if (op_mode == VOIDmode
1555 || GET_MODE_PRECISION (op_mode) > HOST_BITS_PER_DOUBLE_INT)
1556 /* We should never get a negative number. */
1557 gcc_assert (hv >= 0);
1558 else if (GET_MODE_PRECISION (op_mode) <= HOST_BITS_PER_WIDE_INT)
1559 hv = 0, lv &= GET_MODE_MASK (op_mode);
1561 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1562 d = real_value_truncate (mode, d);
1563 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1566 if (CONST_INT_P (op)
1567 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1569 HOST_WIDE_INT arg0 = INTVAL (op);
1570 HOST_WIDE_INT val;
1572 switch (code)
1574 case NOT:
1575 val = ~ arg0;
1576 break;
1578 case NEG:
1579 val = - arg0;
1580 break;
1582 case ABS:
1583 val = (arg0 >= 0 ? arg0 : - arg0);
1584 break;
1586 case FFS:
1587 arg0 &= GET_MODE_MASK (mode);
1588 val = ffs_hwi (arg0);
1589 break;
1591 case CLZ:
1592 arg0 &= GET_MODE_MASK (mode);
1593 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1595 else
1596 val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 1;
1597 break;
1599 case CLRSB:
1600 arg0 &= GET_MODE_MASK (mode);
1601 if (arg0 == 0)
1602 val = GET_MODE_PRECISION (mode) - 1;
1603 else if (arg0 >= 0)
1604 val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 2;
1605 else if (arg0 < 0)
1606 val = GET_MODE_PRECISION (mode) - floor_log2 (~arg0) - 2;
1607 break;
1609 case CTZ:
1610 arg0 &= GET_MODE_MASK (mode);
1611 if (arg0 == 0)
1613 /* Even if the value at zero is undefined, we have to come
1614 up with some replacement. Seems good enough. */
1615 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1616 val = GET_MODE_PRECISION (mode);
1618 else
1619 val = ctz_hwi (arg0);
1620 break;
1622 case POPCOUNT:
1623 arg0 &= GET_MODE_MASK (mode);
1624 val = 0;
1625 while (arg0)
1626 val++, arg0 &= arg0 - 1;
1627 break;
1629 case PARITY:
1630 arg0 &= GET_MODE_MASK (mode);
1631 val = 0;
1632 while (arg0)
1633 val++, arg0 &= arg0 - 1;
1634 val &= 1;
1635 break;
1637 case BSWAP:
1639 unsigned int s;
1641 val = 0;
1642 for (s = 0; s < width; s += 8)
1644 unsigned int d = width - s - 8;
1645 unsigned HOST_WIDE_INT byte;
1646 byte = (arg0 >> s) & 0xff;
1647 val |= byte << d;
1650 break;
1652 case TRUNCATE:
1653 val = arg0;
1654 break;
1656 case ZERO_EXTEND:
1657 /* When zero-extending a CONST_INT, we need to know its
1658 original mode. */
1659 gcc_assert (op_mode != VOIDmode);
1660 if (op_width == HOST_BITS_PER_WIDE_INT)
1662 /* If we were really extending the mode,
1663 we would have to distinguish between zero-extension
1664 and sign-extension. */
1665 gcc_assert (width == op_width);
1666 val = arg0;
1668 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1669 val = arg0 & GET_MODE_MASK (op_mode);
1670 else
1671 return 0;
1672 break;
1674 case SIGN_EXTEND:
1675 if (op_mode == VOIDmode)
1676 op_mode = mode;
1677 op_width = GET_MODE_PRECISION (op_mode);
1678 if (op_width == HOST_BITS_PER_WIDE_INT)
1680 /* If we were really extending the mode,
1681 we would have to distinguish between zero-extension
1682 and sign-extension. */
1683 gcc_assert (width == op_width);
1684 val = arg0;
1686 else if (op_width < HOST_BITS_PER_WIDE_INT)
1688 val = arg0 & GET_MODE_MASK (op_mode);
1689 if (val_signbit_known_set_p (op_mode, val))
1690 val |= ~GET_MODE_MASK (op_mode);
1692 else
1693 return 0;
1694 break;
1696 case SQRT:
1697 case FLOAT_EXTEND:
1698 case FLOAT_TRUNCATE:
1699 case SS_TRUNCATE:
1700 case US_TRUNCATE:
1701 case SS_NEG:
1702 case US_NEG:
1703 case SS_ABS:
1704 return 0;
1706 default:
1707 gcc_unreachable ();
1710 return gen_int_mode (val, mode);
1713 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1714 for a DImode operation on a CONST_INT. */
1715 else if (width <= HOST_BITS_PER_DOUBLE_INT
1716 && (CONST_DOUBLE_AS_INT_P (op) || CONST_INT_P (op)))
1718 double_int first, value;
1720 if (CONST_DOUBLE_AS_INT_P (op))
1721 first = double_int::from_pair (CONST_DOUBLE_HIGH (op),
1722 CONST_DOUBLE_LOW (op));
1723 else
1724 first = double_int::from_shwi (INTVAL (op));
1726 switch (code)
1728 case NOT:
1729 value = ~first;
1730 break;
1732 case NEG:
1733 value = -first;
1734 break;
1736 case ABS:
1737 if (first.is_negative ())
1738 value = -first;
1739 else
1740 value = first;
1741 break;
1743 case FFS:
1744 value.high = 0;
1745 if (first.low != 0)
1746 value.low = ffs_hwi (first.low);
1747 else if (first.high != 0)
1748 value.low = HOST_BITS_PER_WIDE_INT + ffs_hwi (first.high);
1749 else
1750 value.low = 0;
1751 break;
1753 case CLZ:
1754 value.high = 0;
1755 if (first.high != 0)
1756 value.low = GET_MODE_PRECISION (mode) - floor_log2 (first.high) - 1
1757 - HOST_BITS_PER_WIDE_INT;
1758 else if (first.low != 0)
1759 value.low = GET_MODE_PRECISION (mode) - floor_log2 (first.low) - 1;
1760 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, value.low))
1761 value.low = GET_MODE_PRECISION (mode);
1762 break;
1764 case CTZ:
1765 value.high = 0;
1766 if (first.low != 0)
1767 value.low = ctz_hwi (first.low);
1768 else if (first.high != 0)
1769 value.low = HOST_BITS_PER_WIDE_INT + ctz_hwi (first.high);
1770 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, value.low))
1771 value.low = GET_MODE_PRECISION (mode);
1772 break;
1774 case POPCOUNT:
1775 value = double_int_zero;
1776 while (first.low)
1778 value.low++;
1779 first.low &= first.low - 1;
1781 while (first.high)
1783 value.low++;
1784 first.high &= first.high - 1;
1786 break;
1788 case PARITY:
1789 value = double_int_zero;
1790 while (first.low)
1792 value.low++;
1793 first.low &= first.low - 1;
1795 while (first.high)
1797 value.low++;
1798 first.high &= first.high - 1;
1800 value.low &= 1;
1801 break;
1803 case BSWAP:
1805 unsigned int s;
1807 value = double_int_zero;
1808 for (s = 0; s < width; s += 8)
1810 unsigned int d = width - s - 8;
1811 unsigned HOST_WIDE_INT byte;
1813 if (s < HOST_BITS_PER_WIDE_INT)
1814 byte = (first.low >> s) & 0xff;
1815 else
1816 byte = (first.high >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1818 if (d < HOST_BITS_PER_WIDE_INT)
1819 value.low |= byte << d;
1820 else
1821 value.high |= byte << (d - HOST_BITS_PER_WIDE_INT);
1824 break;
1826 case TRUNCATE:
1827 /* This is just a change-of-mode, so do nothing. */
1828 value = first;
1829 break;
1831 case ZERO_EXTEND:
1832 gcc_assert (op_mode != VOIDmode);
1834 if (op_width > HOST_BITS_PER_WIDE_INT)
1835 return 0;
1837 value = double_int::from_uhwi (first.low & GET_MODE_MASK (op_mode));
1838 break;
1840 case SIGN_EXTEND:
1841 if (op_mode == VOIDmode
1842 || op_width > HOST_BITS_PER_WIDE_INT)
1843 return 0;
1844 else
1846 value.low = first.low & GET_MODE_MASK (op_mode);
1847 if (val_signbit_known_set_p (op_mode, value.low))
1848 value.low |= ~GET_MODE_MASK (op_mode);
1850 value.high = HWI_SIGN_EXTEND (value.low);
1852 break;
1854 case SQRT:
1855 return 0;
1857 default:
1858 return 0;
1861 return immed_double_int_const (value, mode);
1864 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1865 && SCALAR_FLOAT_MODE_P (mode)
1866 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1868 REAL_VALUE_TYPE d, t;
1869 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1871 switch (code)
1873 case SQRT:
1874 if (HONOR_SNANS (mode) && real_isnan (&d))
1875 return 0;
1876 real_sqrt (&t, mode, &d);
1877 d = t;
1878 break;
1879 case ABS:
1880 d = real_value_abs (&d);
1881 break;
1882 case NEG:
1883 d = real_value_negate (&d);
1884 break;
1885 case FLOAT_TRUNCATE:
1886 d = real_value_truncate (mode, d);
1887 break;
1888 case FLOAT_EXTEND:
1889 /* All this does is change the mode, unless changing
1890 mode class. */
1891 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1892 real_convert (&d, mode, &d);
1893 break;
1894 case FIX:
1895 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1896 break;
1897 case NOT:
1899 long tmp[4];
1900 int i;
1902 real_to_target (tmp, &d, GET_MODE (op));
1903 for (i = 0; i < 4; i++)
1904 tmp[i] = ~tmp[i];
1905 real_from_target (&d, tmp, mode);
1906 break;
1908 default:
1909 gcc_unreachable ();
1911 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1914 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1915 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1916 && GET_MODE_CLASS (mode) == MODE_INT
1917 && width <= HOST_BITS_PER_DOUBLE_INT && width > 0)
1919 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1920 operators are intentionally left unspecified (to ease implementation
1921 by target backends), for consistency, this routine implements the
1922 same semantics for constant folding as used by the middle-end. */
1924 /* This was formerly used only for non-IEEE float.
1925 eggert@twinsun.com says it is safe for IEEE also. */
1926 HOST_WIDE_INT xh, xl, th, tl;
1927 REAL_VALUE_TYPE x, t;
1928 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1929 switch (code)
1931 case FIX:
1932 if (REAL_VALUE_ISNAN (x))
1933 return const0_rtx;
1935 /* Test against the signed upper bound. */
1936 if (width > HOST_BITS_PER_WIDE_INT)
1938 th = ((unsigned HOST_WIDE_INT) 1
1939 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1940 tl = -1;
1942 else
1944 th = 0;
1945 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1947 real_from_integer (&t, VOIDmode, tl, th, 0);
1948 if (REAL_VALUES_LESS (t, x))
1950 xh = th;
1951 xl = tl;
1952 break;
1955 /* Test against the signed lower bound. */
1956 if (width > HOST_BITS_PER_WIDE_INT)
1958 th = (unsigned HOST_WIDE_INT) (-1)
1959 << (width - HOST_BITS_PER_WIDE_INT - 1);
1960 tl = 0;
1962 else
1964 th = -1;
1965 tl = (unsigned HOST_WIDE_INT) (-1) << (width - 1);
1967 real_from_integer (&t, VOIDmode, tl, th, 0);
1968 if (REAL_VALUES_LESS (x, t))
1970 xh = th;
1971 xl = tl;
1972 break;
1974 REAL_VALUE_TO_INT (&xl, &xh, x);
1975 break;
1977 case UNSIGNED_FIX:
1978 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1979 return const0_rtx;
1981 /* Test against the unsigned upper bound. */
1982 if (width == HOST_BITS_PER_DOUBLE_INT)
1984 th = -1;
1985 tl = -1;
1987 else if (width >= HOST_BITS_PER_WIDE_INT)
1989 th = ((unsigned HOST_WIDE_INT) 1
1990 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1991 tl = -1;
1993 else
1995 th = 0;
1996 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1998 real_from_integer (&t, VOIDmode, tl, th, 1);
1999 if (REAL_VALUES_LESS (t, x))
2001 xh = th;
2002 xl = tl;
2003 break;
2006 REAL_VALUE_TO_INT (&xl, &xh, x);
2007 break;
2009 default:
2010 gcc_unreachable ();
2012 return immed_double_const (xl, xh, mode);
2015 return NULL_RTX;
2018 /* Subroutine of simplify_binary_operation to simplify a commutative,
2019 associative binary operation CODE with result mode MODE, operating
2020 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2021 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2022 canonicalization is possible. */
2024 static rtx
2025 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
2026 rtx op0, rtx op1)
2028 rtx tem;
2030 /* Linearize the operator to the left. */
2031 if (GET_CODE (op1) == code)
2033 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2034 if (GET_CODE (op0) == code)
2036 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2037 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2040 /* "a op (b op c)" becomes "(b op c) op a". */
2041 if (! swap_commutative_operands_p (op1, op0))
2042 return simplify_gen_binary (code, mode, op1, op0);
2044 tem = op0;
2045 op0 = op1;
2046 op1 = tem;
2049 if (GET_CODE (op0) == code)
2051 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2052 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2054 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2055 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2058 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2059 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2060 if (tem != 0)
2061 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2063 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2064 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2065 if (tem != 0)
2066 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2069 return 0;
2073 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2074 and OP1. Return 0 if no simplification is possible.
2076 Don't use this for relational operations such as EQ or LT.
2077 Use simplify_relational_operation instead. */
2079 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
2080 rtx op0, rtx op1)
2082 rtx trueop0, trueop1;
2083 rtx tem;
2085 /* Relational operations don't work here. We must know the mode
2086 of the operands in order to do the comparison correctly.
2087 Assuming a full word can give incorrect results.
2088 Consider comparing 128 with -128 in QImode. */
2089 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2090 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2092 /* Make sure the constant is second. */
2093 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2094 && swap_commutative_operands_p (op0, op1))
2096 tem = op0, op0 = op1, op1 = tem;
2099 trueop0 = avoid_constant_pool_reference (op0);
2100 trueop1 = avoid_constant_pool_reference (op1);
2102 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2103 if (tem)
2104 return tem;
2105 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2108 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2109 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2110 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2111 actual constants. */
2113 static rtx
2114 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
2115 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2117 rtx tem, reversed, opleft, opright;
2118 HOST_WIDE_INT val;
2119 unsigned int width = GET_MODE_PRECISION (mode);
2121 /* Even if we can't compute a constant result,
2122 there are some cases worth simplifying. */
2124 switch (code)
2126 case PLUS:
2127 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2128 when x is NaN, infinite, or finite and nonzero. They aren't
2129 when x is -0 and the rounding mode is not towards -infinity,
2130 since (-0) + 0 is then 0. */
2131 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2132 return op0;
2134 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2135 transformations are safe even for IEEE. */
2136 if (GET_CODE (op0) == NEG)
2137 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2138 else if (GET_CODE (op1) == NEG)
2139 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2141 /* (~a) + 1 -> -a */
2142 if (INTEGRAL_MODE_P (mode)
2143 && GET_CODE (op0) == NOT
2144 && trueop1 == const1_rtx)
2145 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2147 /* Handle both-operands-constant cases. We can only add
2148 CONST_INTs to constants since the sum of relocatable symbols
2149 can't be handled by most assemblers. Don't add CONST_INT
2150 to CONST_INT since overflow won't be computed properly if wider
2151 than HOST_BITS_PER_WIDE_INT. */
2153 if ((GET_CODE (op0) == CONST
2154 || GET_CODE (op0) == SYMBOL_REF
2155 || GET_CODE (op0) == LABEL_REF)
2156 && CONST_INT_P (op1))
2157 return plus_constant (mode, op0, INTVAL (op1));
2158 else if ((GET_CODE (op1) == CONST
2159 || GET_CODE (op1) == SYMBOL_REF
2160 || GET_CODE (op1) == LABEL_REF)
2161 && CONST_INT_P (op0))
2162 return plus_constant (mode, op1, INTVAL (op0));
2164 /* See if this is something like X * C - X or vice versa or
2165 if the multiplication is written as a shift. If so, we can
2166 distribute and make a new multiply, shift, or maybe just
2167 have X (if C is 2 in the example above). But don't make
2168 something more expensive than we had before. */
2170 if (SCALAR_INT_MODE_P (mode))
2172 double_int coeff0, coeff1;
2173 rtx lhs = op0, rhs = op1;
2175 coeff0 = double_int_one;
2176 coeff1 = double_int_one;
2178 if (GET_CODE (lhs) == NEG)
2180 coeff0 = double_int_minus_one;
2181 lhs = XEXP (lhs, 0);
2183 else if (GET_CODE (lhs) == MULT
2184 && CONST_INT_P (XEXP (lhs, 1)))
2186 coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1)));
2187 lhs = XEXP (lhs, 0);
2189 else if (GET_CODE (lhs) == ASHIFT
2190 && CONST_INT_P (XEXP (lhs, 1))
2191 && INTVAL (XEXP (lhs, 1)) >= 0
2192 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2194 coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1)));
2195 lhs = XEXP (lhs, 0);
2198 if (GET_CODE (rhs) == NEG)
2200 coeff1 = double_int_minus_one;
2201 rhs = XEXP (rhs, 0);
2203 else if (GET_CODE (rhs) == MULT
2204 && CONST_INT_P (XEXP (rhs, 1)))
2206 coeff1 = double_int::from_shwi (INTVAL (XEXP (rhs, 1)));
2207 rhs = XEXP (rhs, 0);
2209 else if (GET_CODE (rhs) == ASHIFT
2210 && CONST_INT_P (XEXP (rhs, 1))
2211 && INTVAL (XEXP (rhs, 1)) >= 0
2212 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2214 coeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1)));
2215 rhs = XEXP (rhs, 0);
2218 if (rtx_equal_p (lhs, rhs))
2220 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2221 rtx coeff;
2222 double_int val;
2223 bool speed = optimize_function_for_speed_p (cfun);
2225 val = coeff0 + coeff1;
2226 coeff = immed_double_int_const (val, mode);
2228 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2229 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2230 ? tem : 0;
2234 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2235 if ((CONST_INT_P (op1) || CONST_DOUBLE_AS_INT_P (op1))
2236 && GET_CODE (op0) == XOR
2237 && (CONST_INT_P (XEXP (op0, 1))
2238 || CONST_DOUBLE_AS_INT_P (XEXP (op0, 1)))
2239 && mode_signbit_p (mode, op1))
2240 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2241 simplify_gen_binary (XOR, mode, op1,
2242 XEXP (op0, 1)));
2244 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2245 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2246 && GET_CODE (op0) == MULT
2247 && GET_CODE (XEXP (op0, 0)) == NEG)
2249 rtx in1, in2;
2251 in1 = XEXP (XEXP (op0, 0), 0);
2252 in2 = XEXP (op0, 1);
2253 return simplify_gen_binary (MINUS, mode, op1,
2254 simplify_gen_binary (MULT, mode,
2255 in1, in2));
2258 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2259 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2260 is 1. */
2261 if (COMPARISON_P (op0)
2262 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2263 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2264 && (reversed = reversed_comparison (op0, mode)))
2265 return
2266 simplify_gen_unary (NEG, mode, reversed, mode);
2268 /* If one of the operands is a PLUS or a MINUS, see if we can
2269 simplify this by the associative law.
2270 Don't use the associative law for floating point.
2271 The inaccuracy makes it nonassociative,
2272 and subtle programs can break if operations are associated. */
2274 if (INTEGRAL_MODE_P (mode)
2275 && (plus_minus_operand_p (op0)
2276 || plus_minus_operand_p (op1))
2277 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2278 return tem;
2280 /* Reassociate floating point addition only when the user
2281 specifies associative math operations. */
2282 if (FLOAT_MODE_P (mode)
2283 && flag_associative_math)
2285 tem = simplify_associative_operation (code, mode, op0, op1);
2286 if (tem)
2287 return tem;
2289 break;
2291 case COMPARE:
2292 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2293 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2294 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2295 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2297 rtx xop00 = XEXP (op0, 0);
2298 rtx xop10 = XEXP (op1, 0);
2300 #ifdef HAVE_cc0
2301 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2302 #else
2303 if (REG_P (xop00) && REG_P (xop10)
2304 && GET_MODE (xop00) == GET_MODE (xop10)
2305 && REGNO (xop00) == REGNO (xop10)
2306 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2307 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2308 #endif
2309 return xop00;
2311 break;
2313 case MINUS:
2314 /* We can't assume x-x is 0 even with non-IEEE floating point,
2315 but since it is zero except in very strange circumstances, we
2316 will treat it as zero with -ffinite-math-only. */
2317 if (rtx_equal_p (trueop0, trueop1)
2318 && ! side_effects_p (op0)
2319 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2320 return CONST0_RTX (mode);
2322 /* Change subtraction from zero into negation. (0 - x) is the
2323 same as -x when x is NaN, infinite, or finite and nonzero.
2324 But if the mode has signed zeros, and does not round towards
2325 -infinity, then 0 - 0 is 0, not -0. */
2326 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2327 return simplify_gen_unary (NEG, mode, op1, mode);
2329 /* (-1 - a) is ~a. */
2330 if (trueop0 == constm1_rtx)
2331 return simplify_gen_unary (NOT, mode, op1, mode);
2333 /* Subtracting 0 has no effect unless the mode has signed zeros
2334 and supports rounding towards -infinity. In such a case,
2335 0 - 0 is -0. */
2336 if (!(HONOR_SIGNED_ZEROS (mode)
2337 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2338 && trueop1 == CONST0_RTX (mode))
2339 return op0;
2341 /* See if this is something like X * C - X or vice versa or
2342 if the multiplication is written as a shift. If so, we can
2343 distribute and make a new multiply, shift, or maybe just
2344 have X (if C is 2 in the example above). But don't make
2345 something more expensive than we had before. */
2347 if (SCALAR_INT_MODE_P (mode))
2349 double_int coeff0, negcoeff1;
2350 rtx lhs = op0, rhs = op1;
2352 coeff0 = double_int_one;
2353 negcoeff1 = double_int_minus_one;
2355 if (GET_CODE (lhs) == NEG)
2357 coeff0 = double_int_minus_one;
2358 lhs = XEXP (lhs, 0);
2360 else if (GET_CODE (lhs) == MULT
2361 && CONST_INT_P (XEXP (lhs, 1)))
2363 coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1)));
2364 lhs = XEXP (lhs, 0);
2366 else if (GET_CODE (lhs) == ASHIFT
2367 && CONST_INT_P (XEXP (lhs, 1))
2368 && INTVAL (XEXP (lhs, 1)) >= 0
2369 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2371 coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1)));
2372 lhs = XEXP (lhs, 0);
2375 if (GET_CODE (rhs) == NEG)
2377 negcoeff1 = double_int_one;
2378 rhs = XEXP (rhs, 0);
2380 else if (GET_CODE (rhs) == MULT
2381 && CONST_INT_P (XEXP (rhs, 1)))
2383 negcoeff1 = double_int::from_shwi (-INTVAL (XEXP (rhs, 1)));
2384 rhs = XEXP (rhs, 0);
2386 else if (GET_CODE (rhs) == ASHIFT
2387 && CONST_INT_P (XEXP (rhs, 1))
2388 && INTVAL (XEXP (rhs, 1)) >= 0
2389 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2391 negcoeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1)));
2392 negcoeff1 = -negcoeff1;
2393 rhs = XEXP (rhs, 0);
2396 if (rtx_equal_p (lhs, rhs))
2398 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2399 rtx coeff;
2400 double_int val;
2401 bool speed = optimize_function_for_speed_p (cfun);
2403 val = coeff0 + negcoeff1;
2404 coeff = immed_double_int_const (val, mode);
2406 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2407 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2408 ? tem : 0;
2412 /* (a - (-b)) -> (a + b). True even for IEEE. */
2413 if (GET_CODE (op1) == NEG)
2414 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2416 /* (-x - c) may be simplified as (-c - x). */
2417 if (GET_CODE (op0) == NEG
2418 && (CONST_INT_P (op1) || CONST_DOUBLE_P (op1)))
2420 tem = simplify_unary_operation (NEG, mode, op1, mode);
2421 if (tem)
2422 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2425 /* Don't let a relocatable value get a negative coeff. */
2426 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2427 return simplify_gen_binary (PLUS, mode,
2428 op0,
2429 neg_const_int (mode, op1));
2431 /* (x - (x & y)) -> (x & ~y) */
2432 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2434 if (rtx_equal_p (op0, XEXP (op1, 0)))
2436 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2437 GET_MODE (XEXP (op1, 1)));
2438 return simplify_gen_binary (AND, mode, op0, tem);
2440 if (rtx_equal_p (op0, XEXP (op1, 1)))
2442 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2443 GET_MODE (XEXP (op1, 0)));
2444 return simplify_gen_binary (AND, mode, op0, tem);
2448 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2449 by reversing the comparison code if valid. */
2450 if (STORE_FLAG_VALUE == 1
2451 && trueop0 == const1_rtx
2452 && COMPARISON_P (op1)
2453 && (reversed = reversed_comparison (op1, mode)))
2454 return reversed;
2456 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2457 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2458 && GET_CODE (op1) == MULT
2459 && GET_CODE (XEXP (op1, 0)) == NEG)
2461 rtx in1, in2;
2463 in1 = XEXP (XEXP (op1, 0), 0);
2464 in2 = XEXP (op1, 1);
2465 return simplify_gen_binary (PLUS, mode,
2466 simplify_gen_binary (MULT, mode,
2467 in1, in2),
2468 op0);
2471 /* Canonicalize (minus (neg A) (mult B C)) to
2472 (minus (mult (neg B) C) A). */
2473 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2474 && GET_CODE (op1) == MULT
2475 && GET_CODE (op0) == NEG)
2477 rtx in1, in2;
2479 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2480 in2 = XEXP (op1, 1);
2481 return simplify_gen_binary (MINUS, mode,
2482 simplify_gen_binary (MULT, mode,
2483 in1, in2),
2484 XEXP (op0, 0));
2487 /* If one of the operands is a PLUS or a MINUS, see if we can
2488 simplify this by the associative law. This will, for example,
2489 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2490 Don't use the associative law for floating point.
2491 The inaccuracy makes it nonassociative,
2492 and subtle programs can break if operations are associated. */
2494 if (INTEGRAL_MODE_P (mode)
2495 && (plus_minus_operand_p (op0)
2496 || plus_minus_operand_p (op1))
2497 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2498 return tem;
2499 break;
2501 case MULT:
2502 if (trueop1 == constm1_rtx)
2503 return simplify_gen_unary (NEG, mode, op0, mode);
2505 if (GET_CODE (op0) == NEG)
2507 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2508 /* If op1 is a MULT as well and simplify_unary_operation
2509 just moved the NEG to the second operand, simplify_gen_binary
2510 below could through simplify_associative_operation move
2511 the NEG around again and recurse endlessly. */
2512 if (temp
2513 && GET_CODE (op1) == MULT
2514 && GET_CODE (temp) == MULT
2515 && XEXP (op1, 0) == XEXP (temp, 0)
2516 && GET_CODE (XEXP (temp, 1)) == NEG
2517 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2518 temp = NULL_RTX;
2519 if (temp)
2520 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2522 if (GET_CODE (op1) == NEG)
2524 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2525 /* If op0 is a MULT as well and simplify_unary_operation
2526 just moved the NEG to the second operand, simplify_gen_binary
2527 below could through simplify_associative_operation move
2528 the NEG around again and recurse endlessly. */
2529 if (temp
2530 && GET_CODE (op0) == MULT
2531 && GET_CODE (temp) == MULT
2532 && XEXP (op0, 0) == XEXP (temp, 0)
2533 && GET_CODE (XEXP (temp, 1)) == NEG
2534 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2535 temp = NULL_RTX;
2536 if (temp)
2537 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2540 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2541 x is NaN, since x * 0 is then also NaN. Nor is it valid
2542 when the mode has signed zeros, since multiplying a negative
2543 number by 0 will give -0, not 0. */
2544 if (!HONOR_NANS (mode)
2545 && !HONOR_SIGNED_ZEROS (mode)
2546 && trueop1 == CONST0_RTX (mode)
2547 && ! side_effects_p (op0))
2548 return op1;
2550 /* In IEEE floating point, x*1 is not equivalent to x for
2551 signalling NaNs. */
2552 if (!HONOR_SNANS (mode)
2553 && trueop1 == CONST1_RTX (mode))
2554 return op0;
2556 /* Convert multiply by constant power of two into shift unless
2557 we are still generating RTL. This test is a kludge. */
2558 if (CONST_INT_P (trueop1)
2559 && (val = exact_log2 (UINTVAL (trueop1))) >= 0
2560 /* If the mode is larger than the host word size, and the
2561 uppermost bit is set, then this isn't a power of two due
2562 to implicit sign extension. */
2563 && (width <= HOST_BITS_PER_WIDE_INT
2564 || val != HOST_BITS_PER_WIDE_INT - 1))
2565 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2567 /* Likewise for multipliers wider than a word. */
2568 if (CONST_DOUBLE_AS_INT_P (trueop1)
2569 && GET_MODE (op0) == mode
2570 && CONST_DOUBLE_LOW (trueop1) == 0
2571 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0
2572 && (val < HOST_BITS_PER_DOUBLE_INT - 1
2573 || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_DOUBLE_INT))
2574 return simplify_gen_binary (ASHIFT, mode, op0,
2575 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2577 /* x*2 is x+x and x*(-1) is -x */
2578 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2579 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2580 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2581 && GET_MODE (op0) == mode)
2583 REAL_VALUE_TYPE d;
2584 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2586 if (REAL_VALUES_EQUAL (d, dconst2))
2587 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2589 if (!HONOR_SNANS (mode)
2590 && REAL_VALUES_EQUAL (d, dconstm1))
2591 return simplify_gen_unary (NEG, mode, op0, mode);
2594 /* Optimize -x * -x as x * x. */
2595 if (FLOAT_MODE_P (mode)
2596 && GET_CODE (op0) == NEG
2597 && GET_CODE (op1) == NEG
2598 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2599 && !side_effects_p (XEXP (op0, 0)))
2600 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2602 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2603 if (SCALAR_FLOAT_MODE_P (mode)
2604 && GET_CODE (op0) == ABS
2605 && GET_CODE (op1) == ABS
2606 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2607 && !side_effects_p (XEXP (op0, 0)))
2608 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2610 /* Reassociate multiplication, but for floating point MULTs
2611 only when the user specifies unsafe math optimizations. */
2612 if (! FLOAT_MODE_P (mode)
2613 || flag_unsafe_math_optimizations)
2615 tem = simplify_associative_operation (code, mode, op0, op1);
2616 if (tem)
2617 return tem;
2619 break;
2621 case IOR:
2622 if (trueop1 == CONST0_RTX (mode))
2623 return op0;
2624 if (INTEGRAL_MODE_P (mode)
2625 && trueop1 == CONSTM1_RTX (mode)
2626 && !side_effects_p (op0))
2627 return op1;
2628 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2629 return op0;
2630 /* A | (~A) -> -1 */
2631 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2632 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2633 && ! side_effects_p (op0)
2634 && SCALAR_INT_MODE_P (mode))
2635 return constm1_rtx;
2637 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2638 if (CONST_INT_P (op1)
2639 && HWI_COMPUTABLE_MODE_P (mode)
2640 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2641 && !side_effects_p (op0))
2642 return op1;
2644 /* Canonicalize (X & C1) | C2. */
2645 if (GET_CODE (op0) == AND
2646 && CONST_INT_P (trueop1)
2647 && CONST_INT_P (XEXP (op0, 1)))
2649 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2650 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2651 HOST_WIDE_INT c2 = INTVAL (trueop1);
2653 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2654 if ((c1 & c2) == c1
2655 && !side_effects_p (XEXP (op0, 0)))
2656 return trueop1;
2658 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2659 if (((c1|c2) & mask) == mask)
2660 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2662 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2663 if (((c1 & ~c2) & mask) != (c1 & mask))
2665 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2666 gen_int_mode (c1 & ~c2, mode));
2667 return simplify_gen_binary (IOR, mode, tem, op1);
2671 /* Convert (A & B) | A to A. */
2672 if (GET_CODE (op0) == AND
2673 && (rtx_equal_p (XEXP (op0, 0), op1)
2674 || rtx_equal_p (XEXP (op0, 1), op1))
2675 && ! side_effects_p (XEXP (op0, 0))
2676 && ! side_effects_p (XEXP (op0, 1)))
2677 return op1;
2679 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2680 mode size to (rotate A CX). */
2682 if (GET_CODE (op1) == ASHIFT
2683 || GET_CODE (op1) == SUBREG)
2685 opleft = op1;
2686 opright = op0;
2688 else
2690 opright = op1;
2691 opleft = op0;
2694 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2695 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2696 && CONST_INT_P (XEXP (opleft, 1))
2697 && CONST_INT_P (XEXP (opright, 1))
2698 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2699 == GET_MODE_PRECISION (mode)))
2700 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2702 /* Same, but for ashift that has been "simplified" to a wider mode
2703 by simplify_shift_const. */
2705 if (GET_CODE (opleft) == SUBREG
2706 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2707 && GET_CODE (opright) == LSHIFTRT
2708 && GET_CODE (XEXP (opright, 0)) == SUBREG
2709 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2710 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2711 && (GET_MODE_SIZE (GET_MODE (opleft))
2712 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2713 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2714 SUBREG_REG (XEXP (opright, 0)))
2715 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2716 && CONST_INT_P (XEXP (opright, 1))
2717 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2718 == GET_MODE_PRECISION (mode)))
2719 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2720 XEXP (SUBREG_REG (opleft), 1));
2722 /* If we have (ior (and (X C1) C2)), simplify this by making
2723 C1 as small as possible if C1 actually changes. */
2724 if (CONST_INT_P (op1)
2725 && (HWI_COMPUTABLE_MODE_P (mode)
2726 || INTVAL (op1) > 0)
2727 && GET_CODE (op0) == AND
2728 && CONST_INT_P (XEXP (op0, 1))
2729 && CONST_INT_P (op1)
2730 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2731 return simplify_gen_binary (IOR, mode,
2732 simplify_gen_binary
2733 (AND, mode, XEXP (op0, 0),
2734 GEN_INT (UINTVAL (XEXP (op0, 1))
2735 & ~UINTVAL (op1))),
2736 op1);
2738 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2739 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2740 the PLUS does not affect any of the bits in OP1: then we can do
2741 the IOR as a PLUS and we can associate. This is valid if OP1
2742 can be safely shifted left C bits. */
2743 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2744 && GET_CODE (XEXP (op0, 0)) == PLUS
2745 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2746 && CONST_INT_P (XEXP (op0, 1))
2747 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2749 int count = INTVAL (XEXP (op0, 1));
2750 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2752 if (mask >> count == INTVAL (trueop1)
2753 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2754 return simplify_gen_binary (ASHIFTRT, mode,
2755 plus_constant (mode, XEXP (op0, 0),
2756 mask),
2757 XEXP (op0, 1));
2760 tem = simplify_associative_operation (code, mode, op0, op1);
2761 if (tem)
2762 return tem;
2763 break;
2765 case XOR:
2766 if (trueop1 == CONST0_RTX (mode))
2767 return op0;
2768 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2769 return simplify_gen_unary (NOT, mode, op0, mode);
2770 if (rtx_equal_p (trueop0, trueop1)
2771 && ! side_effects_p (op0)
2772 && GET_MODE_CLASS (mode) != MODE_CC)
2773 return CONST0_RTX (mode);
2775 /* Canonicalize XOR of the most significant bit to PLUS. */
2776 if ((CONST_INT_P (op1) || CONST_DOUBLE_AS_INT_P (op1))
2777 && mode_signbit_p (mode, op1))
2778 return simplify_gen_binary (PLUS, mode, op0, op1);
2779 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2780 if ((CONST_INT_P (op1) || CONST_DOUBLE_AS_INT_P (op1))
2781 && GET_CODE (op0) == PLUS
2782 && (CONST_INT_P (XEXP (op0, 1))
2783 || CONST_DOUBLE_AS_INT_P (XEXP (op0, 1)))
2784 && mode_signbit_p (mode, XEXP (op0, 1)))
2785 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2786 simplify_gen_binary (XOR, mode, op1,
2787 XEXP (op0, 1)));
2789 /* If we are XORing two things that have no bits in common,
2790 convert them into an IOR. This helps to detect rotation encoded
2791 using those methods and possibly other simplifications. */
2793 if (HWI_COMPUTABLE_MODE_P (mode)
2794 && (nonzero_bits (op0, mode)
2795 & nonzero_bits (op1, mode)) == 0)
2796 return (simplify_gen_binary (IOR, mode, op0, op1));
2798 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2799 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2800 (NOT y). */
2802 int num_negated = 0;
2804 if (GET_CODE (op0) == NOT)
2805 num_negated++, op0 = XEXP (op0, 0);
2806 if (GET_CODE (op1) == NOT)
2807 num_negated++, op1 = XEXP (op1, 0);
2809 if (num_negated == 2)
2810 return simplify_gen_binary (XOR, mode, op0, op1);
2811 else if (num_negated == 1)
2812 return simplify_gen_unary (NOT, mode,
2813 simplify_gen_binary (XOR, mode, op0, op1),
2814 mode);
2817 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2818 correspond to a machine insn or result in further simplifications
2819 if B is a constant. */
2821 if (GET_CODE (op0) == AND
2822 && rtx_equal_p (XEXP (op0, 1), op1)
2823 && ! side_effects_p (op1))
2824 return simplify_gen_binary (AND, mode,
2825 simplify_gen_unary (NOT, mode,
2826 XEXP (op0, 0), mode),
2827 op1);
2829 else if (GET_CODE (op0) == AND
2830 && rtx_equal_p (XEXP (op0, 0), op1)
2831 && ! side_effects_p (op1))
2832 return simplify_gen_binary (AND, mode,
2833 simplify_gen_unary (NOT, mode,
2834 XEXP (op0, 1), mode),
2835 op1);
2837 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2838 we can transform like this:
2839 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2840 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2841 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2842 Attempt a few simplifications when B and C are both constants. */
2843 if (GET_CODE (op0) == AND
2844 && CONST_INT_P (op1)
2845 && CONST_INT_P (XEXP (op0, 1)))
2847 rtx a = XEXP (op0, 0);
2848 rtx b = XEXP (op0, 1);
2849 rtx c = op1;
2850 HOST_WIDE_INT bval = INTVAL (b);
2851 HOST_WIDE_INT cval = INTVAL (c);
2853 rtx na_c
2854 = simplify_binary_operation (AND, mode,
2855 simplify_gen_unary (NOT, mode, a, mode),
2857 if ((~cval & bval) == 0)
2859 /* Try to simplify ~A&C | ~B&C. */
2860 if (na_c != NULL_RTX)
2861 return simplify_gen_binary (IOR, mode, na_c,
2862 GEN_INT (~bval & cval));
2864 else
2866 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2867 if (na_c == const0_rtx)
2869 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2870 GEN_INT (~cval & bval));
2871 return simplify_gen_binary (IOR, mode, a_nc_b,
2872 GEN_INT (~bval & cval));
2877 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2878 comparison if STORE_FLAG_VALUE is 1. */
2879 if (STORE_FLAG_VALUE == 1
2880 && trueop1 == const1_rtx
2881 && COMPARISON_P (op0)
2882 && (reversed = reversed_comparison (op0, mode)))
2883 return reversed;
2885 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2886 is (lt foo (const_int 0)), so we can perform the above
2887 simplification if STORE_FLAG_VALUE is 1. */
2889 if (STORE_FLAG_VALUE == 1
2890 && trueop1 == const1_rtx
2891 && GET_CODE (op0) == LSHIFTRT
2892 && CONST_INT_P (XEXP (op0, 1))
2893 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2894 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2896 /* (xor (comparison foo bar) (const_int sign-bit))
2897 when STORE_FLAG_VALUE is the sign bit. */
2898 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2899 && trueop1 == const_true_rtx
2900 && COMPARISON_P (op0)
2901 && (reversed = reversed_comparison (op0, mode)))
2902 return reversed;
2904 tem = simplify_associative_operation (code, mode, op0, op1);
2905 if (tem)
2906 return tem;
2907 break;
2909 case AND:
2910 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2911 return trueop1;
2912 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2913 return op0;
2914 if (HWI_COMPUTABLE_MODE_P (mode))
2916 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2917 HOST_WIDE_INT nzop1;
2918 if (CONST_INT_P (trueop1))
2920 HOST_WIDE_INT val1 = INTVAL (trueop1);
2921 /* If we are turning off bits already known off in OP0, we need
2922 not do an AND. */
2923 if ((nzop0 & ~val1) == 0)
2924 return op0;
2926 nzop1 = nonzero_bits (trueop1, mode);
2927 /* If we are clearing all the nonzero bits, the result is zero. */
2928 if ((nzop1 & nzop0) == 0
2929 && !side_effects_p (op0) && !side_effects_p (op1))
2930 return CONST0_RTX (mode);
2932 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2933 && GET_MODE_CLASS (mode) != MODE_CC)
2934 return op0;
2935 /* A & (~A) -> 0 */
2936 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2937 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2938 && ! side_effects_p (op0)
2939 && GET_MODE_CLASS (mode) != MODE_CC)
2940 return CONST0_RTX (mode);
2942 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2943 there are no nonzero bits of C outside of X's mode. */
2944 if ((GET_CODE (op0) == SIGN_EXTEND
2945 || GET_CODE (op0) == ZERO_EXTEND)
2946 && CONST_INT_P (trueop1)
2947 && HWI_COMPUTABLE_MODE_P (mode)
2948 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2949 & UINTVAL (trueop1)) == 0)
2951 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2952 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2953 gen_int_mode (INTVAL (trueop1),
2954 imode));
2955 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2958 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2959 we might be able to further simplify the AND with X and potentially
2960 remove the truncation altogether. */
2961 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2963 rtx x = XEXP (op0, 0);
2964 enum machine_mode xmode = GET_MODE (x);
2965 tem = simplify_gen_binary (AND, xmode, x,
2966 gen_int_mode (INTVAL (trueop1), xmode));
2967 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2970 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2971 if (GET_CODE (op0) == IOR
2972 && CONST_INT_P (trueop1)
2973 && CONST_INT_P (XEXP (op0, 1)))
2975 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2976 return simplify_gen_binary (IOR, mode,
2977 simplify_gen_binary (AND, mode,
2978 XEXP (op0, 0), op1),
2979 gen_int_mode (tmp, mode));
2982 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2983 insn (and may simplify more). */
2984 if (GET_CODE (op0) == XOR
2985 && rtx_equal_p (XEXP (op0, 0), op1)
2986 && ! side_effects_p (op1))
2987 return simplify_gen_binary (AND, mode,
2988 simplify_gen_unary (NOT, mode,
2989 XEXP (op0, 1), mode),
2990 op1);
2992 if (GET_CODE (op0) == XOR
2993 && rtx_equal_p (XEXP (op0, 1), op1)
2994 && ! side_effects_p (op1))
2995 return simplify_gen_binary (AND, mode,
2996 simplify_gen_unary (NOT, mode,
2997 XEXP (op0, 0), mode),
2998 op1);
3000 /* Similarly for (~(A ^ B)) & A. */
3001 if (GET_CODE (op0) == NOT
3002 && GET_CODE (XEXP (op0, 0)) == XOR
3003 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3004 && ! side_effects_p (op1))
3005 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3007 if (GET_CODE (op0) == NOT
3008 && GET_CODE (XEXP (op0, 0)) == XOR
3009 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3010 && ! side_effects_p (op1))
3011 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3013 /* Convert (A | B) & A to A. */
3014 if (GET_CODE (op0) == IOR
3015 && (rtx_equal_p (XEXP (op0, 0), op1)
3016 || rtx_equal_p (XEXP (op0, 1), op1))
3017 && ! side_effects_p (XEXP (op0, 0))
3018 && ! side_effects_p (XEXP (op0, 1)))
3019 return op1;
3021 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3022 ((A & N) + B) & M -> (A + B) & M
3023 Similarly if (N & M) == 0,
3024 ((A | N) + B) & M -> (A + B) & M
3025 and for - instead of + and/or ^ instead of |.
3026 Also, if (N & M) == 0, then
3027 (A +- N) & M -> A & M. */
3028 if (CONST_INT_P (trueop1)
3029 && HWI_COMPUTABLE_MODE_P (mode)
3030 && ~UINTVAL (trueop1)
3031 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3032 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3034 rtx pmop[2];
3035 int which;
3037 pmop[0] = XEXP (op0, 0);
3038 pmop[1] = XEXP (op0, 1);
3040 if (CONST_INT_P (pmop[1])
3041 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3042 return simplify_gen_binary (AND, mode, pmop[0], op1);
3044 for (which = 0; which < 2; which++)
3046 tem = pmop[which];
3047 switch (GET_CODE (tem))
3049 case AND:
3050 if (CONST_INT_P (XEXP (tem, 1))
3051 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3052 == UINTVAL (trueop1))
3053 pmop[which] = XEXP (tem, 0);
3054 break;
3055 case IOR:
3056 case XOR:
3057 if (CONST_INT_P (XEXP (tem, 1))
3058 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3059 pmop[which] = XEXP (tem, 0);
3060 break;
3061 default:
3062 break;
3066 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3068 tem = simplify_gen_binary (GET_CODE (op0), mode,
3069 pmop[0], pmop[1]);
3070 return simplify_gen_binary (code, mode, tem, op1);
3074 /* (and X (ior (not X) Y) -> (and X Y) */
3075 if (GET_CODE (op1) == IOR
3076 && GET_CODE (XEXP (op1, 0)) == NOT
3077 && op0 == XEXP (XEXP (op1, 0), 0))
3078 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3080 /* (and (ior (not X) Y) X) -> (and X Y) */
3081 if (GET_CODE (op0) == IOR
3082 && GET_CODE (XEXP (op0, 0)) == NOT
3083 && op1 == XEXP (XEXP (op0, 0), 0))
3084 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3086 tem = simplify_associative_operation (code, mode, op0, op1);
3087 if (tem)
3088 return tem;
3089 break;
3091 case UDIV:
3092 /* 0/x is 0 (or x&0 if x has side-effects). */
3093 if (trueop0 == CONST0_RTX (mode))
3095 if (side_effects_p (op1))
3096 return simplify_gen_binary (AND, mode, op1, trueop0);
3097 return trueop0;
3099 /* x/1 is x. */
3100 if (trueop1 == CONST1_RTX (mode))
3101 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
3102 /* Convert divide by power of two into shift. */
3103 if (CONST_INT_P (trueop1)
3104 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3105 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3106 break;
3108 case DIV:
3109 /* Handle floating point and integers separately. */
3110 if (SCALAR_FLOAT_MODE_P (mode))
3112 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3113 safe for modes with NaNs, since 0.0 / 0.0 will then be
3114 NaN rather than 0.0. Nor is it safe for modes with signed
3115 zeros, since dividing 0 by a negative number gives -0.0 */
3116 if (trueop0 == CONST0_RTX (mode)
3117 && !HONOR_NANS (mode)
3118 && !HONOR_SIGNED_ZEROS (mode)
3119 && ! side_effects_p (op1))
3120 return op0;
3121 /* x/1.0 is x. */
3122 if (trueop1 == CONST1_RTX (mode)
3123 && !HONOR_SNANS (mode))
3124 return op0;
3126 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3127 && trueop1 != CONST0_RTX (mode))
3129 REAL_VALUE_TYPE d;
3130 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
3132 /* x/-1.0 is -x. */
3133 if (REAL_VALUES_EQUAL (d, dconstm1)
3134 && !HONOR_SNANS (mode))
3135 return simplify_gen_unary (NEG, mode, op0, mode);
3137 /* Change FP division by a constant into multiplication.
3138 Only do this with -freciprocal-math. */
3139 if (flag_reciprocal_math
3140 && !REAL_VALUES_EQUAL (d, dconst0))
3142 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
3143 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3144 return simplify_gen_binary (MULT, mode, op0, tem);
3148 else if (SCALAR_INT_MODE_P (mode))
3150 /* 0/x is 0 (or x&0 if x has side-effects). */
3151 if (trueop0 == CONST0_RTX (mode)
3152 && !cfun->can_throw_non_call_exceptions)
3154 if (side_effects_p (op1))
3155 return simplify_gen_binary (AND, mode, op1, trueop0);
3156 return trueop0;
3158 /* x/1 is x. */
3159 if (trueop1 == CONST1_RTX (mode))
3160 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
3161 /* x/-1 is -x. */
3162 if (trueop1 == constm1_rtx)
3164 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3165 return simplify_gen_unary (NEG, mode, x, mode);
3168 break;
3170 case UMOD:
3171 /* 0%x is 0 (or x&0 if x has side-effects). */
3172 if (trueop0 == CONST0_RTX (mode))
3174 if (side_effects_p (op1))
3175 return simplify_gen_binary (AND, mode, op1, trueop0);
3176 return trueop0;
3178 /* x%1 is 0 (of x&0 if x has side-effects). */
3179 if (trueop1 == CONST1_RTX (mode))
3181 if (side_effects_p (op0))
3182 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3183 return CONST0_RTX (mode);
3185 /* Implement modulus by power of two as AND. */
3186 if (CONST_INT_P (trueop1)
3187 && exact_log2 (UINTVAL (trueop1)) > 0)
3188 return simplify_gen_binary (AND, mode, op0,
3189 GEN_INT (INTVAL (op1) - 1));
3190 break;
3192 case MOD:
3193 /* 0%x is 0 (or x&0 if x has side-effects). */
3194 if (trueop0 == CONST0_RTX (mode))
3196 if (side_effects_p (op1))
3197 return simplify_gen_binary (AND, mode, op1, trueop0);
3198 return trueop0;
3200 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3201 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3203 if (side_effects_p (op0))
3204 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3205 return CONST0_RTX (mode);
3207 break;
3209 case ROTATERT:
3210 case ROTATE:
3211 case ASHIFTRT:
3212 if (trueop1 == CONST0_RTX (mode))
3213 return op0;
3214 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3215 return op0;
3216 /* Rotating ~0 always results in ~0. */
3217 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3218 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3219 && ! side_effects_p (op1))
3220 return op0;
3221 canonicalize_shift:
3222 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3224 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
3225 if (val != INTVAL (op1))
3226 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3228 break;
3230 case ASHIFT:
3231 case SS_ASHIFT:
3232 case US_ASHIFT:
3233 if (trueop1 == CONST0_RTX (mode))
3234 return op0;
3235 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3236 return op0;
3237 goto canonicalize_shift;
3239 case LSHIFTRT:
3240 if (trueop1 == CONST0_RTX (mode))
3241 return op0;
3242 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3243 return op0;
3244 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3245 if (GET_CODE (op0) == CLZ
3246 && CONST_INT_P (trueop1)
3247 && STORE_FLAG_VALUE == 1
3248 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3250 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
3251 unsigned HOST_WIDE_INT zero_val = 0;
3253 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3254 && zero_val == GET_MODE_PRECISION (imode)
3255 && INTVAL (trueop1) == exact_log2 (zero_val))
3256 return simplify_gen_relational (EQ, mode, imode,
3257 XEXP (op0, 0), const0_rtx);
3259 goto canonicalize_shift;
3261 case SMIN:
3262 if (width <= HOST_BITS_PER_WIDE_INT
3263 && mode_signbit_p (mode, trueop1)
3264 && ! side_effects_p (op0))
3265 return op1;
3266 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3267 return op0;
3268 tem = simplify_associative_operation (code, mode, op0, op1);
3269 if (tem)
3270 return tem;
3271 break;
3273 case SMAX:
3274 if (width <= HOST_BITS_PER_WIDE_INT
3275 && CONST_INT_P (trueop1)
3276 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3277 && ! side_effects_p (op0))
3278 return op1;
3279 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3280 return op0;
3281 tem = simplify_associative_operation (code, mode, op0, op1);
3282 if (tem)
3283 return tem;
3284 break;
3286 case UMIN:
3287 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3288 return op1;
3289 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3290 return op0;
3291 tem = simplify_associative_operation (code, mode, op0, op1);
3292 if (tem)
3293 return tem;
3294 break;
3296 case UMAX:
3297 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3298 return op1;
3299 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3300 return op0;
3301 tem = simplify_associative_operation (code, mode, op0, op1);
3302 if (tem)
3303 return tem;
3304 break;
3306 case SS_PLUS:
3307 case US_PLUS:
3308 case SS_MINUS:
3309 case US_MINUS:
3310 case SS_MULT:
3311 case US_MULT:
3312 case SS_DIV:
3313 case US_DIV:
3314 /* ??? There are simplifications that can be done. */
3315 return 0;
3317 case VEC_SELECT:
3318 if (!VECTOR_MODE_P (mode))
3320 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3321 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3322 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3323 gcc_assert (XVECLEN (trueop1, 0) == 1);
3324 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3326 if (GET_CODE (trueop0) == CONST_VECTOR)
3327 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3328 (trueop1, 0, 0)));
3330 /* Extract a scalar element from a nested VEC_SELECT expression
3331 (with optional nested VEC_CONCAT expression). Some targets
3332 (i386) extract scalar element from a vector using chain of
3333 nested VEC_SELECT expressions. When input operand is a memory
3334 operand, this operation can be simplified to a simple scalar
3335 load from an offseted memory address. */
3336 if (GET_CODE (trueop0) == VEC_SELECT)
3338 rtx op0 = XEXP (trueop0, 0);
3339 rtx op1 = XEXP (trueop0, 1);
3341 enum machine_mode opmode = GET_MODE (op0);
3342 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3343 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3345 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3346 int elem;
3348 rtvec vec;
3349 rtx tmp_op, tmp;
3351 gcc_assert (GET_CODE (op1) == PARALLEL);
3352 gcc_assert (i < n_elts);
3354 /* Select element, pointed by nested selector. */
3355 elem = INTVAL (XVECEXP (op1, 0, i));
3357 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3358 if (GET_CODE (op0) == VEC_CONCAT)
3360 rtx op00 = XEXP (op0, 0);
3361 rtx op01 = XEXP (op0, 1);
3363 enum machine_mode mode00, mode01;
3364 int n_elts00, n_elts01;
3366 mode00 = GET_MODE (op00);
3367 mode01 = GET_MODE (op01);
3369 /* Find out number of elements of each operand. */
3370 if (VECTOR_MODE_P (mode00))
3372 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3373 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3375 else
3376 n_elts00 = 1;
3378 if (VECTOR_MODE_P (mode01))
3380 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3381 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3383 else
3384 n_elts01 = 1;
3386 gcc_assert (n_elts == n_elts00 + n_elts01);
3388 /* Select correct operand of VEC_CONCAT
3389 and adjust selector. */
3390 if (elem < n_elts01)
3391 tmp_op = op00;
3392 else
3394 tmp_op = op01;
3395 elem -= n_elts00;
3398 else
3399 tmp_op = op0;
3401 vec = rtvec_alloc (1);
3402 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3404 tmp = gen_rtx_fmt_ee (code, mode,
3405 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3406 return tmp;
3408 if (GET_CODE (trueop0) == VEC_DUPLICATE
3409 && GET_MODE (XEXP (trueop0, 0)) == mode)
3410 return XEXP (trueop0, 0);
3412 else
3414 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3415 gcc_assert (GET_MODE_INNER (mode)
3416 == GET_MODE_INNER (GET_MODE (trueop0)));
3417 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3419 if (GET_CODE (trueop0) == CONST_VECTOR)
3421 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3422 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3423 rtvec v = rtvec_alloc (n_elts);
3424 unsigned int i;
3426 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3427 for (i = 0; i < n_elts; i++)
3429 rtx x = XVECEXP (trueop1, 0, i);
3431 gcc_assert (CONST_INT_P (x));
3432 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3433 INTVAL (x));
3436 return gen_rtx_CONST_VECTOR (mode, v);
3439 /* Recognize the identity. */
3440 if (GET_MODE (trueop0) == mode)
3442 bool maybe_ident = true;
3443 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3445 rtx j = XVECEXP (trueop1, 0, i);
3446 if (!CONST_INT_P (j) || INTVAL (j) != i)
3448 maybe_ident = false;
3449 break;
3452 if (maybe_ident)
3453 return trueop0;
3456 /* If we build {a,b} then permute it, build the result directly. */
3457 if (XVECLEN (trueop1, 0) == 2
3458 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3459 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3460 && GET_CODE (trueop0) == VEC_CONCAT
3461 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3462 && GET_MODE (XEXP (trueop0, 0)) == mode
3463 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3464 && GET_MODE (XEXP (trueop0, 1)) == mode)
3466 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3467 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3468 rtx subop0, subop1;
3470 gcc_assert (i0 < 4 && i1 < 4);
3471 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3472 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3474 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3477 if (XVECLEN (trueop1, 0) == 2
3478 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3479 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3480 && GET_CODE (trueop0) == VEC_CONCAT
3481 && GET_MODE (trueop0) == mode)
3483 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3484 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3485 rtx subop0, subop1;
3487 gcc_assert (i0 < 2 && i1 < 2);
3488 subop0 = XEXP (trueop0, i0);
3489 subop1 = XEXP (trueop0, i1);
3491 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3495 if (XVECLEN (trueop1, 0) == 1
3496 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3497 && GET_CODE (trueop0) == VEC_CONCAT)
3499 rtx vec = trueop0;
3500 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3502 /* Try to find the element in the VEC_CONCAT. */
3503 while (GET_MODE (vec) != mode
3504 && GET_CODE (vec) == VEC_CONCAT)
3506 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3507 if (offset < vec_size)
3508 vec = XEXP (vec, 0);
3509 else
3511 offset -= vec_size;
3512 vec = XEXP (vec, 1);
3514 vec = avoid_constant_pool_reference (vec);
3517 if (GET_MODE (vec) == mode)
3518 return vec;
3521 return 0;
3522 case VEC_CONCAT:
3524 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3525 ? GET_MODE (trueop0)
3526 : GET_MODE_INNER (mode));
3527 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3528 ? GET_MODE (trueop1)
3529 : GET_MODE_INNER (mode));
3531 gcc_assert (VECTOR_MODE_P (mode));
3532 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3533 == GET_MODE_SIZE (mode));
3535 if (VECTOR_MODE_P (op0_mode))
3536 gcc_assert (GET_MODE_INNER (mode)
3537 == GET_MODE_INNER (op0_mode));
3538 else
3539 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3541 if (VECTOR_MODE_P (op1_mode))
3542 gcc_assert (GET_MODE_INNER (mode)
3543 == GET_MODE_INNER (op1_mode));
3544 else
3545 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3547 if ((GET_CODE (trueop0) == CONST_VECTOR
3548 || CONST_INT_P (trueop0) || CONST_DOUBLE_P (trueop0))
3549 && (GET_CODE (trueop1) == CONST_VECTOR
3550 || CONST_INT_P (trueop1) || CONST_DOUBLE_P (trueop1)))
3552 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3553 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3554 rtvec v = rtvec_alloc (n_elts);
3555 unsigned int i;
3556 unsigned in_n_elts = 1;
3558 if (VECTOR_MODE_P (op0_mode))
3559 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3560 for (i = 0; i < n_elts; i++)
3562 if (i < in_n_elts)
3564 if (!VECTOR_MODE_P (op0_mode))
3565 RTVEC_ELT (v, i) = trueop0;
3566 else
3567 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3569 else
3571 if (!VECTOR_MODE_P (op1_mode))
3572 RTVEC_ELT (v, i) = trueop1;
3573 else
3574 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3575 i - in_n_elts);
3579 return gen_rtx_CONST_VECTOR (mode, v);
3582 /* Try to merge VEC_SELECTs from the same vector into a single one. */
3583 if (GET_CODE (trueop0) == VEC_SELECT
3584 && GET_CODE (trueop1) == VEC_SELECT
3585 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0)))
3587 rtx par0 = XEXP (trueop0, 1);
3588 rtx par1 = XEXP (trueop1, 1);
3589 int len0 = XVECLEN (par0, 0);
3590 int len1 = XVECLEN (par1, 0);
3591 rtvec vec = rtvec_alloc (len0 + len1);
3592 for (int i = 0; i < len0; i++)
3593 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3594 for (int i = 0; i < len1; i++)
3595 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3596 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3597 gen_rtx_PARALLEL (VOIDmode, vec));
3600 return 0;
3602 default:
3603 gcc_unreachable ();
3606 return 0;
3610 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3611 rtx op0, rtx op1)
3613 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3614 HOST_WIDE_INT val;
3615 unsigned int width = GET_MODE_PRECISION (mode);
3617 if (VECTOR_MODE_P (mode)
3618 && code != VEC_CONCAT
3619 && GET_CODE (op0) == CONST_VECTOR
3620 && GET_CODE (op1) == CONST_VECTOR)
3622 unsigned n_elts = GET_MODE_NUNITS (mode);
3623 enum machine_mode op0mode = GET_MODE (op0);
3624 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3625 enum machine_mode op1mode = GET_MODE (op1);
3626 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3627 rtvec v = rtvec_alloc (n_elts);
3628 unsigned int i;
3630 gcc_assert (op0_n_elts == n_elts);
3631 gcc_assert (op1_n_elts == n_elts);
3632 for (i = 0; i < n_elts; i++)
3634 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3635 CONST_VECTOR_ELT (op0, i),
3636 CONST_VECTOR_ELT (op1, i));
3637 if (!x)
3638 return 0;
3639 RTVEC_ELT (v, i) = x;
3642 return gen_rtx_CONST_VECTOR (mode, v);
3645 if (VECTOR_MODE_P (mode)
3646 && code == VEC_CONCAT
3647 && (CONST_INT_P (op0)
3648 || GET_CODE (op0) == CONST_FIXED
3649 || CONST_DOUBLE_P (op0))
3650 && (CONST_INT_P (op1)
3651 || CONST_DOUBLE_P (op1)
3652 || GET_CODE (op1) == CONST_FIXED))
3654 unsigned n_elts = GET_MODE_NUNITS (mode);
3655 rtvec v = rtvec_alloc (n_elts);
3657 gcc_assert (n_elts >= 2);
3658 if (n_elts == 2)
3660 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3661 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3663 RTVEC_ELT (v, 0) = op0;
3664 RTVEC_ELT (v, 1) = op1;
3666 else
3668 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3669 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3670 unsigned i;
3672 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3673 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3674 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3676 for (i = 0; i < op0_n_elts; ++i)
3677 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3678 for (i = 0; i < op1_n_elts; ++i)
3679 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3682 return gen_rtx_CONST_VECTOR (mode, v);
3685 if (SCALAR_FLOAT_MODE_P (mode)
3686 && CONST_DOUBLE_AS_FLOAT_P (op0)
3687 && CONST_DOUBLE_AS_FLOAT_P (op1)
3688 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3690 if (code == AND
3691 || code == IOR
3692 || code == XOR)
3694 long tmp0[4];
3695 long tmp1[4];
3696 REAL_VALUE_TYPE r;
3697 int i;
3699 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3700 GET_MODE (op0));
3701 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3702 GET_MODE (op1));
3703 for (i = 0; i < 4; i++)
3705 switch (code)
3707 case AND:
3708 tmp0[i] &= tmp1[i];
3709 break;
3710 case IOR:
3711 tmp0[i] |= tmp1[i];
3712 break;
3713 case XOR:
3714 tmp0[i] ^= tmp1[i];
3715 break;
3716 default:
3717 gcc_unreachable ();
3720 real_from_target (&r, tmp0, mode);
3721 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3723 else
3725 REAL_VALUE_TYPE f0, f1, value, result;
3726 bool inexact;
3728 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3729 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3730 real_convert (&f0, mode, &f0);
3731 real_convert (&f1, mode, &f1);
3733 if (HONOR_SNANS (mode)
3734 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3735 return 0;
3737 if (code == DIV
3738 && REAL_VALUES_EQUAL (f1, dconst0)
3739 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3740 return 0;
3742 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3743 && flag_trapping_math
3744 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3746 int s0 = REAL_VALUE_NEGATIVE (f0);
3747 int s1 = REAL_VALUE_NEGATIVE (f1);
3749 switch (code)
3751 case PLUS:
3752 /* Inf + -Inf = NaN plus exception. */
3753 if (s0 != s1)
3754 return 0;
3755 break;
3756 case MINUS:
3757 /* Inf - Inf = NaN plus exception. */
3758 if (s0 == s1)
3759 return 0;
3760 break;
3761 case DIV:
3762 /* Inf / Inf = NaN plus exception. */
3763 return 0;
3764 default:
3765 break;
3769 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3770 && flag_trapping_math
3771 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3772 || (REAL_VALUE_ISINF (f1)
3773 && REAL_VALUES_EQUAL (f0, dconst0))))
3774 /* Inf * 0 = NaN plus exception. */
3775 return 0;
3777 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3778 &f0, &f1);
3779 real_convert (&result, mode, &value);
3781 /* Don't constant fold this floating point operation if
3782 the result has overflowed and flag_trapping_math. */
3784 if (flag_trapping_math
3785 && MODE_HAS_INFINITIES (mode)
3786 && REAL_VALUE_ISINF (result)
3787 && !REAL_VALUE_ISINF (f0)
3788 && !REAL_VALUE_ISINF (f1))
3789 /* Overflow plus exception. */
3790 return 0;
3792 /* Don't constant fold this floating point operation if the
3793 result may dependent upon the run-time rounding mode and
3794 flag_rounding_math is set, or if GCC's software emulation
3795 is unable to accurately represent the result. */
3797 if ((flag_rounding_math
3798 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3799 && (inexact || !real_identical (&result, &value)))
3800 return NULL_RTX;
3802 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3806 /* We can fold some multi-word operations. */
3807 if (GET_MODE_CLASS (mode) == MODE_INT
3808 && width == HOST_BITS_PER_DOUBLE_INT
3809 && (CONST_DOUBLE_AS_INT_P (op0) || CONST_INT_P (op0))
3810 && (CONST_DOUBLE_AS_INT_P (op1) || CONST_INT_P (op1)))
3812 double_int o0, o1, res, tmp;
3813 bool overflow;
3815 o0 = rtx_to_double_int (op0);
3816 o1 = rtx_to_double_int (op1);
3818 switch (code)
3820 case MINUS:
3821 /* A - B == A + (-B). */
3822 o1 = -o1;
3824 /* Fall through.... */
3826 case PLUS:
3827 res = o0 + o1;
3828 break;
3830 case MULT:
3831 res = o0 * o1;
3832 break;
3834 case DIV:
3835 res = o0.divmod_with_overflow (o1, false, TRUNC_DIV_EXPR,
3836 &tmp, &overflow);
3837 if (overflow)
3838 return 0;
3839 break;
3841 case MOD:
3842 tmp = o0.divmod_with_overflow (o1, false, TRUNC_DIV_EXPR,
3843 &res, &overflow);
3844 if (overflow)
3845 return 0;
3846 break;
3848 case UDIV:
3849 res = o0.divmod_with_overflow (o1, true, TRUNC_DIV_EXPR,
3850 &tmp, &overflow);
3851 if (overflow)
3852 return 0;
3853 break;
3855 case UMOD:
3856 tmp = o0.divmod_with_overflow (o1, true, TRUNC_DIV_EXPR,
3857 &res, &overflow);
3858 if (overflow)
3859 return 0;
3860 break;
3862 case AND:
3863 res = o0 & o1;
3864 break;
3866 case IOR:
3867 res = o0 | o1;
3868 break;
3870 case XOR:
3871 res = o0 ^ o1;
3872 break;
3874 case SMIN:
3875 res = o0.smin (o1);
3876 break;
3878 case SMAX:
3879 res = o0.smax (o1);
3880 break;
3882 case UMIN:
3883 res = o0.umin (o1);
3884 break;
3886 case UMAX:
3887 res = o0.umax (o1);
3888 break;
3890 case LSHIFTRT: case ASHIFTRT:
3891 case ASHIFT:
3892 case ROTATE: case ROTATERT:
3894 unsigned HOST_WIDE_INT cnt;
3896 if (SHIFT_COUNT_TRUNCATED)
3898 o1.high = 0;
3899 o1.low &= GET_MODE_PRECISION (mode) - 1;
3902 if (!o1.fits_uhwi ()
3903 || o1.to_uhwi () >= GET_MODE_PRECISION (mode))
3904 return 0;
3906 cnt = o1.to_uhwi ();
3907 unsigned short prec = GET_MODE_PRECISION (mode);
3909 if (code == LSHIFTRT || code == ASHIFTRT)
3910 res = o0.rshift (cnt, prec, code == ASHIFTRT);
3911 else if (code == ASHIFT)
3912 res = o0.alshift (cnt, prec);
3913 else if (code == ROTATE)
3914 res = o0.lrotate (cnt, prec);
3915 else /* code == ROTATERT */
3916 res = o0.rrotate (cnt, prec);
3918 break;
3920 default:
3921 return 0;
3924 return immed_double_int_const (res, mode);
3927 if (CONST_INT_P (op0) && CONST_INT_P (op1)
3928 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3930 /* Get the integer argument values in two forms:
3931 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3933 arg0 = INTVAL (op0);
3934 arg1 = INTVAL (op1);
3936 if (width < HOST_BITS_PER_WIDE_INT)
3938 arg0 &= GET_MODE_MASK (mode);
3939 arg1 &= GET_MODE_MASK (mode);
3941 arg0s = arg0;
3942 if (val_signbit_known_set_p (mode, arg0s))
3943 arg0s |= ~GET_MODE_MASK (mode);
3945 arg1s = arg1;
3946 if (val_signbit_known_set_p (mode, arg1s))
3947 arg1s |= ~GET_MODE_MASK (mode);
3949 else
3951 arg0s = arg0;
3952 arg1s = arg1;
3955 /* Compute the value of the arithmetic. */
3957 switch (code)
3959 case PLUS:
3960 val = arg0s + arg1s;
3961 break;
3963 case MINUS:
3964 val = arg0s - arg1s;
3965 break;
3967 case MULT:
3968 val = arg0s * arg1s;
3969 break;
3971 case DIV:
3972 if (arg1s == 0
3973 || ((unsigned HOST_WIDE_INT) arg0s
3974 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3975 && arg1s == -1))
3976 return 0;
3977 val = arg0s / arg1s;
3978 break;
3980 case MOD:
3981 if (arg1s == 0
3982 || ((unsigned HOST_WIDE_INT) arg0s
3983 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3984 && arg1s == -1))
3985 return 0;
3986 val = arg0s % arg1s;
3987 break;
3989 case UDIV:
3990 if (arg1 == 0
3991 || ((unsigned HOST_WIDE_INT) arg0s
3992 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3993 && arg1s == -1))
3994 return 0;
3995 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3996 break;
3998 case UMOD:
3999 if (arg1 == 0
4000 || ((unsigned HOST_WIDE_INT) arg0s
4001 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4002 && arg1s == -1))
4003 return 0;
4004 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
4005 break;
4007 case AND:
4008 val = arg0 & arg1;
4009 break;
4011 case IOR:
4012 val = arg0 | arg1;
4013 break;
4015 case XOR:
4016 val = arg0 ^ arg1;
4017 break;
4019 case LSHIFTRT:
4020 case ASHIFT:
4021 case ASHIFTRT:
4022 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
4023 the value is in range. We can't return any old value for
4024 out-of-range arguments because either the middle-end (via
4025 shift_truncation_mask) or the back-end might be relying on
4026 target-specific knowledge. Nor can we rely on
4027 shift_truncation_mask, since the shift might not be part of an
4028 ashlM3, lshrM3 or ashrM3 instruction. */
4029 if (SHIFT_COUNT_TRUNCATED)
4030 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
4031 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
4032 return 0;
4034 val = (code == ASHIFT
4035 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
4036 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
4038 /* Sign-extend the result for arithmetic right shifts. */
4039 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
4040 val |= ((unsigned HOST_WIDE_INT) (-1)) << (width - arg1);
4041 break;
4043 case ROTATERT:
4044 if (arg1 < 0)
4045 return 0;
4047 arg1 %= width;
4048 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
4049 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
4050 break;
4052 case ROTATE:
4053 if (arg1 < 0)
4054 return 0;
4056 arg1 %= width;
4057 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
4058 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
4059 break;
4061 case COMPARE:
4062 /* Do nothing here. */
4063 return 0;
4065 case SMIN:
4066 val = arg0s <= arg1s ? arg0s : arg1s;
4067 break;
4069 case UMIN:
4070 val = ((unsigned HOST_WIDE_INT) arg0
4071 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4072 break;
4074 case SMAX:
4075 val = arg0s > arg1s ? arg0s : arg1s;
4076 break;
4078 case UMAX:
4079 val = ((unsigned HOST_WIDE_INT) arg0
4080 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4081 break;
4083 case SS_PLUS:
4084 case US_PLUS:
4085 case SS_MINUS:
4086 case US_MINUS:
4087 case SS_MULT:
4088 case US_MULT:
4089 case SS_DIV:
4090 case US_DIV:
4091 case SS_ASHIFT:
4092 case US_ASHIFT:
4093 /* ??? There are simplifications that can be done. */
4094 return 0;
4096 default:
4097 gcc_unreachable ();
4100 return gen_int_mode (val, mode);
4103 return NULL_RTX;
4108 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
4109 PLUS or MINUS.
4111 Rather than test for specific case, we do this by a brute-force method
4112 and do all possible simplifications until no more changes occur. Then
4113 we rebuild the operation. */
4115 struct simplify_plus_minus_op_data
4117 rtx op;
4118 short neg;
4121 static bool
4122 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4124 int result;
4126 result = (commutative_operand_precedence (y)
4127 - commutative_operand_precedence (x));
4128 if (result)
4129 return result > 0;
4131 /* Group together equal REGs to do more simplification. */
4132 if (REG_P (x) && REG_P (y))
4133 return REGNO (x) > REGNO (y);
4134 else
4135 return false;
4138 static rtx
4139 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
4140 rtx op1)
4142 struct simplify_plus_minus_op_data ops[8];
4143 rtx result, tem;
4144 int n_ops = 2, input_ops = 2;
4145 int changed, n_constants = 0, canonicalized = 0;
4146 int i, j;
4148 memset (ops, 0, sizeof ops);
4150 /* Set up the two operands and then expand them until nothing has been
4151 changed. If we run out of room in our array, give up; this should
4152 almost never happen. */
4154 ops[0].op = op0;
4155 ops[0].neg = 0;
4156 ops[1].op = op1;
4157 ops[1].neg = (code == MINUS);
4161 changed = 0;
4163 for (i = 0; i < n_ops; i++)
4165 rtx this_op = ops[i].op;
4166 int this_neg = ops[i].neg;
4167 enum rtx_code this_code = GET_CODE (this_op);
4169 switch (this_code)
4171 case PLUS:
4172 case MINUS:
4173 if (n_ops == 7)
4174 return NULL_RTX;
4176 ops[n_ops].op = XEXP (this_op, 1);
4177 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4178 n_ops++;
4180 ops[i].op = XEXP (this_op, 0);
4181 input_ops++;
4182 changed = 1;
4183 canonicalized |= this_neg;
4184 break;
4186 case NEG:
4187 ops[i].op = XEXP (this_op, 0);
4188 ops[i].neg = ! this_neg;
4189 changed = 1;
4190 canonicalized = 1;
4191 break;
4193 case CONST:
4194 if (n_ops < 7
4195 && GET_CODE (XEXP (this_op, 0)) == PLUS
4196 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4197 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4199 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4200 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4201 ops[n_ops].neg = this_neg;
4202 n_ops++;
4203 changed = 1;
4204 canonicalized = 1;
4206 break;
4208 case NOT:
4209 /* ~a -> (-a - 1) */
4210 if (n_ops != 7)
4212 ops[n_ops].op = CONSTM1_RTX (mode);
4213 ops[n_ops++].neg = this_neg;
4214 ops[i].op = XEXP (this_op, 0);
4215 ops[i].neg = !this_neg;
4216 changed = 1;
4217 canonicalized = 1;
4219 break;
4221 case CONST_INT:
4222 n_constants++;
4223 if (this_neg)
4225 ops[i].op = neg_const_int (mode, this_op);
4226 ops[i].neg = 0;
4227 changed = 1;
4228 canonicalized = 1;
4230 break;
4232 default:
4233 break;
4237 while (changed);
4239 if (n_constants > 1)
4240 canonicalized = 1;
4242 gcc_assert (n_ops >= 2);
4244 /* If we only have two operands, we can avoid the loops. */
4245 if (n_ops == 2)
4247 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4248 rtx lhs, rhs;
4250 /* Get the two operands. Be careful with the order, especially for
4251 the cases where code == MINUS. */
4252 if (ops[0].neg && ops[1].neg)
4254 lhs = gen_rtx_NEG (mode, ops[0].op);
4255 rhs = ops[1].op;
4257 else if (ops[0].neg)
4259 lhs = ops[1].op;
4260 rhs = ops[0].op;
4262 else
4264 lhs = ops[0].op;
4265 rhs = ops[1].op;
4268 return simplify_const_binary_operation (code, mode, lhs, rhs);
4271 /* Now simplify each pair of operands until nothing changes. */
4274 /* Insertion sort is good enough for an eight-element array. */
4275 for (i = 1; i < n_ops; i++)
4277 struct simplify_plus_minus_op_data save;
4278 j = i - 1;
4279 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
4280 continue;
4282 canonicalized = 1;
4283 save = ops[i];
4285 ops[j + 1] = ops[j];
4286 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
4287 ops[j + 1] = save;
4290 changed = 0;
4291 for (i = n_ops - 1; i > 0; i--)
4292 for (j = i - 1; j >= 0; j--)
4294 rtx lhs = ops[j].op, rhs = ops[i].op;
4295 int lneg = ops[j].neg, rneg = ops[i].neg;
4297 if (lhs != 0 && rhs != 0)
4299 enum rtx_code ncode = PLUS;
4301 if (lneg != rneg)
4303 ncode = MINUS;
4304 if (lneg)
4305 tem = lhs, lhs = rhs, rhs = tem;
4307 else if (swap_commutative_operands_p (lhs, rhs))
4308 tem = lhs, lhs = rhs, rhs = tem;
4310 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4311 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4313 rtx tem_lhs, tem_rhs;
4315 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4316 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4317 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
4319 if (tem && !CONSTANT_P (tem))
4320 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4322 else
4323 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4325 /* Reject "simplifications" that just wrap the two
4326 arguments in a CONST. Failure to do so can result
4327 in infinite recursion with simplify_binary_operation
4328 when it calls us to simplify CONST operations. */
4329 if (tem
4330 && ! (GET_CODE (tem) == CONST
4331 && GET_CODE (XEXP (tem, 0)) == ncode
4332 && XEXP (XEXP (tem, 0), 0) == lhs
4333 && XEXP (XEXP (tem, 0), 1) == rhs))
4335 lneg &= rneg;
4336 if (GET_CODE (tem) == NEG)
4337 tem = XEXP (tem, 0), lneg = !lneg;
4338 if (CONST_INT_P (tem) && lneg)
4339 tem = neg_const_int (mode, tem), lneg = 0;
4341 ops[i].op = tem;
4342 ops[i].neg = lneg;
4343 ops[j].op = NULL_RTX;
4344 changed = 1;
4345 canonicalized = 1;
4350 /* If nothing changed, fail. */
4351 if (!canonicalized)
4352 return NULL_RTX;
4354 /* Pack all the operands to the lower-numbered entries. */
4355 for (i = 0, j = 0; j < n_ops; j++)
4356 if (ops[j].op)
4358 ops[i] = ops[j];
4359 i++;
4361 n_ops = i;
4363 while (changed);
4365 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4366 if (n_ops == 2
4367 && CONST_INT_P (ops[1].op)
4368 && CONSTANT_P (ops[0].op)
4369 && ops[0].neg)
4370 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4372 /* We suppressed creation of trivial CONST expressions in the
4373 combination loop to avoid recursion. Create one manually now.
4374 The combination loop should have ensured that there is exactly
4375 one CONST_INT, and the sort will have ensured that it is last
4376 in the array and that any other constant will be next-to-last. */
4378 if (n_ops > 1
4379 && CONST_INT_P (ops[n_ops - 1].op)
4380 && CONSTANT_P (ops[n_ops - 2].op))
4382 rtx value = ops[n_ops - 1].op;
4383 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4384 value = neg_const_int (mode, value);
4385 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4386 INTVAL (value));
4387 n_ops--;
4390 /* Put a non-negated operand first, if possible. */
4392 for (i = 0; i < n_ops && ops[i].neg; i++)
4393 continue;
4394 if (i == n_ops)
4395 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4396 else if (i != 0)
4398 tem = ops[0].op;
4399 ops[0] = ops[i];
4400 ops[i].op = tem;
4401 ops[i].neg = 1;
4404 /* Now make the result by performing the requested operations. */
4405 result = ops[0].op;
4406 for (i = 1; i < n_ops; i++)
4407 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4408 mode, result, ops[i].op);
4410 return result;
4413 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4414 static bool
4415 plus_minus_operand_p (const_rtx x)
4417 return GET_CODE (x) == PLUS
4418 || GET_CODE (x) == MINUS
4419 || (GET_CODE (x) == CONST
4420 && GET_CODE (XEXP (x, 0)) == PLUS
4421 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4422 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4425 /* Like simplify_binary_operation except used for relational operators.
4426 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4427 not also be VOIDmode.
4429 CMP_MODE specifies in which mode the comparison is done in, so it is
4430 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4431 the operands or, if both are VOIDmode, the operands are compared in
4432 "infinite precision". */
4434 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
4435 enum machine_mode cmp_mode, rtx op0, rtx op1)
4437 rtx tem, trueop0, trueop1;
4439 if (cmp_mode == VOIDmode)
4440 cmp_mode = GET_MODE (op0);
4441 if (cmp_mode == VOIDmode)
4442 cmp_mode = GET_MODE (op1);
4444 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4445 if (tem)
4447 if (SCALAR_FLOAT_MODE_P (mode))
4449 if (tem == const0_rtx)
4450 return CONST0_RTX (mode);
4451 #ifdef FLOAT_STORE_FLAG_VALUE
4453 REAL_VALUE_TYPE val;
4454 val = FLOAT_STORE_FLAG_VALUE (mode);
4455 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4457 #else
4458 return NULL_RTX;
4459 #endif
4461 if (VECTOR_MODE_P (mode))
4463 if (tem == const0_rtx)
4464 return CONST0_RTX (mode);
4465 #ifdef VECTOR_STORE_FLAG_VALUE
4467 int i, units;
4468 rtvec v;
4470 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4471 if (val == NULL_RTX)
4472 return NULL_RTX;
4473 if (val == const1_rtx)
4474 return CONST1_RTX (mode);
4476 units = GET_MODE_NUNITS (mode);
4477 v = rtvec_alloc (units);
4478 for (i = 0; i < units; i++)
4479 RTVEC_ELT (v, i) = val;
4480 return gen_rtx_raw_CONST_VECTOR (mode, v);
4482 #else
4483 return NULL_RTX;
4484 #endif
4487 return tem;
4490 /* For the following tests, ensure const0_rtx is op1. */
4491 if (swap_commutative_operands_p (op0, op1)
4492 || (op0 == const0_rtx && op1 != const0_rtx))
4493 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4495 /* If op0 is a compare, extract the comparison arguments from it. */
4496 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4497 return simplify_gen_relational (code, mode, VOIDmode,
4498 XEXP (op0, 0), XEXP (op0, 1));
4500 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4501 || CC0_P (op0))
4502 return NULL_RTX;
4504 trueop0 = avoid_constant_pool_reference (op0);
4505 trueop1 = avoid_constant_pool_reference (op1);
4506 return simplify_relational_operation_1 (code, mode, cmp_mode,
4507 trueop0, trueop1);
4510 /* This part of simplify_relational_operation is only used when CMP_MODE
4511 is not in class MODE_CC (i.e. it is a real comparison).
4513 MODE is the mode of the result, while CMP_MODE specifies in which
4514 mode the comparison is done in, so it is the mode of the operands. */
4516 static rtx
4517 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
4518 enum machine_mode cmp_mode, rtx op0, rtx op1)
4520 enum rtx_code op0code = GET_CODE (op0);
4522 if (op1 == const0_rtx && COMPARISON_P (op0))
4524 /* If op0 is a comparison, extract the comparison arguments
4525 from it. */
4526 if (code == NE)
4528 if (GET_MODE (op0) == mode)
4529 return simplify_rtx (op0);
4530 else
4531 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4532 XEXP (op0, 0), XEXP (op0, 1));
4534 else if (code == EQ)
4536 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4537 if (new_code != UNKNOWN)
4538 return simplify_gen_relational (new_code, mode, VOIDmode,
4539 XEXP (op0, 0), XEXP (op0, 1));
4543 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4544 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4545 if ((code == LTU || code == GEU)
4546 && GET_CODE (op0) == PLUS
4547 && CONST_INT_P (XEXP (op0, 1))
4548 && (rtx_equal_p (op1, XEXP (op0, 0))
4549 || rtx_equal_p (op1, XEXP (op0, 1))))
4551 rtx new_cmp
4552 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4553 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4554 cmp_mode, XEXP (op0, 0), new_cmp);
4557 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4558 if ((code == LTU || code == GEU)
4559 && GET_CODE (op0) == PLUS
4560 && rtx_equal_p (op1, XEXP (op0, 1))
4561 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4562 && !rtx_equal_p (op1, XEXP (op0, 0)))
4563 return simplify_gen_relational (code, mode, cmp_mode, op0,
4564 copy_rtx (XEXP (op0, 0)));
4566 if (op1 == const0_rtx)
4568 /* Canonicalize (GTU x 0) as (NE x 0). */
4569 if (code == GTU)
4570 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4571 /* Canonicalize (LEU x 0) as (EQ x 0). */
4572 if (code == LEU)
4573 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4575 else if (op1 == const1_rtx)
4577 switch (code)
4579 case GE:
4580 /* Canonicalize (GE x 1) as (GT x 0). */
4581 return simplify_gen_relational (GT, mode, cmp_mode,
4582 op0, const0_rtx);
4583 case GEU:
4584 /* Canonicalize (GEU x 1) as (NE x 0). */
4585 return simplify_gen_relational (NE, mode, cmp_mode,
4586 op0, const0_rtx);
4587 case LT:
4588 /* Canonicalize (LT x 1) as (LE x 0). */
4589 return simplify_gen_relational (LE, mode, cmp_mode,
4590 op0, const0_rtx);
4591 case LTU:
4592 /* Canonicalize (LTU x 1) as (EQ x 0). */
4593 return simplify_gen_relational (EQ, mode, cmp_mode,
4594 op0, const0_rtx);
4595 default:
4596 break;
4599 else if (op1 == constm1_rtx)
4601 /* Canonicalize (LE x -1) as (LT x 0). */
4602 if (code == LE)
4603 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4604 /* Canonicalize (GT x -1) as (GE x 0). */
4605 if (code == GT)
4606 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4609 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4610 if ((code == EQ || code == NE)
4611 && (op0code == PLUS || op0code == MINUS)
4612 && CONSTANT_P (op1)
4613 && CONSTANT_P (XEXP (op0, 1))
4614 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4616 rtx x = XEXP (op0, 0);
4617 rtx c = XEXP (op0, 1);
4618 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4619 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4621 /* Detect an infinite recursive condition, where we oscillate at this
4622 simplification case between:
4623 A + B == C <---> C - B == A,
4624 where A, B, and C are all constants with non-simplifiable expressions,
4625 usually SYMBOL_REFs. */
4626 if (GET_CODE (tem) == invcode
4627 && CONSTANT_P (x)
4628 && rtx_equal_p (c, XEXP (tem, 1)))
4629 return NULL_RTX;
4631 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4634 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4635 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4636 if (code == NE
4637 && op1 == const0_rtx
4638 && GET_MODE_CLASS (mode) == MODE_INT
4639 && cmp_mode != VOIDmode
4640 /* ??? Work-around BImode bugs in the ia64 backend. */
4641 && mode != BImode
4642 && cmp_mode != BImode
4643 && nonzero_bits (op0, cmp_mode) == 1
4644 && STORE_FLAG_VALUE == 1)
4645 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4646 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4647 : lowpart_subreg (mode, op0, cmp_mode);
4649 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4650 if ((code == EQ || code == NE)
4651 && op1 == const0_rtx
4652 && op0code == XOR)
4653 return simplify_gen_relational (code, mode, cmp_mode,
4654 XEXP (op0, 0), XEXP (op0, 1));
4656 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4657 if ((code == EQ || code == NE)
4658 && op0code == XOR
4659 && rtx_equal_p (XEXP (op0, 0), op1)
4660 && !side_effects_p (XEXP (op0, 0)))
4661 return simplify_gen_relational (code, mode, cmp_mode,
4662 XEXP (op0, 1), const0_rtx);
4664 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4665 if ((code == EQ || code == NE)
4666 && op0code == XOR
4667 && rtx_equal_p (XEXP (op0, 1), op1)
4668 && !side_effects_p (XEXP (op0, 1)))
4669 return simplify_gen_relational (code, mode, cmp_mode,
4670 XEXP (op0, 0), const0_rtx);
4672 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4673 if ((code == EQ || code == NE)
4674 && op0code == XOR
4675 && (CONST_INT_P (op1) || CONST_DOUBLE_AS_INT_P (op1))
4676 && (CONST_INT_P (XEXP (op0, 1))
4677 || CONST_DOUBLE_AS_INT_P (XEXP (op0, 1))))
4678 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4679 simplify_gen_binary (XOR, cmp_mode,
4680 XEXP (op0, 1), op1));
4682 if (op0code == POPCOUNT && op1 == const0_rtx)
4683 switch (code)
4685 case EQ:
4686 case LE:
4687 case LEU:
4688 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4689 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4690 XEXP (op0, 0), const0_rtx);
4692 case NE:
4693 case GT:
4694 case GTU:
4695 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4696 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4697 XEXP (op0, 0), const0_rtx);
4699 default:
4700 break;
4703 return NULL_RTX;
4706 enum
4708 CMP_EQ = 1,
4709 CMP_LT = 2,
4710 CMP_GT = 4,
4711 CMP_LTU = 8,
4712 CMP_GTU = 16
4716 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4717 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4718 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4719 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4720 For floating-point comparisons, assume that the operands were ordered. */
4722 static rtx
4723 comparison_result (enum rtx_code code, int known_results)
4725 switch (code)
4727 case EQ:
4728 case UNEQ:
4729 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4730 case NE:
4731 case LTGT:
4732 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4734 case LT:
4735 case UNLT:
4736 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4737 case GE:
4738 case UNGE:
4739 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4741 case GT:
4742 case UNGT:
4743 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4744 case LE:
4745 case UNLE:
4746 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4748 case LTU:
4749 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4750 case GEU:
4751 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4753 case GTU:
4754 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4755 case LEU:
4756 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4758 case ORDERED:
4759 return const_true_rtx;
4760 case UNORDERED:
4761 return const0_rtx;
4762 default:
4763 gcc_unreachable ();
4767 /* Check if the given comparison (done in the given MODE) is actually a
4768 tautology or a contradiction.
4769 If no simplification is possible, this function returns zero.
4770 Otherwise, it returns either const_true_rtx or const0_rtx. */
4773 simplify_const_relational_operation (enum rtx_code code,
4774 enum machine_mode mode,
4775 rtx op0, rtx op1)
4777 rtx tem;
4778 rtx trueop0;
4779 rtx trueop1;
4781 gcc_assert (mode != VOIDmode
4782 || (GET_MODE (op0) == VOIDmode
4783 && GET_MODE (op1) == VOIDmode));
4785 /* If op0 is a compare, extract the comparison arguments from it. */
4786 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4788 op1 = XEXP (op0, 1);
4789 op0 = XEXP (op0, 0);
4791 if (GET_MODE (op0) != VOIDmode)
4792 mode = GET_MODE (op0);
4793 else if (GET_MODE (op1) != VOIDmode)
4794 mode = GET_MODE (op1);
4795 else
4796 return 0;
4799 /* We can't simplify MODE_CC values since we don't know what the
4800 actual comparison is. */
4801 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4802 return 0;
4804 /* Make sure the constant is second. */
4805 if (swap_commutative_operands_p (op0, op1))
4807 tem = op0, op0 = op1, op1 = tem;
4808 code = swap_condition (code);
4811 trueop0 = avoid_constant_pool_reference (op0);
4812 trueop1 = avoid_constant_pool_reference (op1);
4814 /* For integer comparisons of A and B maybe we can simplify A - B and can
4815 then simplify a comparison of that with zero. If A and B are both either
4816 a register or a CONST_INT, this can't help; testing for these cases will
4817 prevent infinite recursion here and speed things up.
4819 We can only do this for EQ and NE comparisons as otherwise we may
4820 lose or introduce overflow which we cannot disregard as undefined as
4821 we do not know the signedness of the operation on either the left or
4822 the right hand side of the comparison. */
4824 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4825 && (code == EQ || code == NE)
4826 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4827 && (REG_P (op1) || CONST_INT_P (trueop1)))
4828 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4829 /* We cannot do this if tem is a nonzero address. */
4830 && ! nonzero_address_p (tem))
4831 return simplify_const_relational_operation (signed_condition (code),
4832 mode, tem, const0_rtx);
4834 if (! HONOR_NANS (mode) && code == ORDERED)
4835 return const_true_rtx;
4837 if (! HONOR_NANS (mode) && code == UNORDERED)
4838 return const0_rtx;
4840 /* For modes without NaNs, if the two operands are equal, we know the
4841 result except if they have side-effects. Even with NaNs we know
4842 the result of unordered comparisons and, if signaling NaNs are
4843 irrelevant, also the result of LT/GT/LTGT. */
4844 if ((! HONOR_NANS (GET_MODE (trueop0))
4845 || code == UNEQ || code == UNLE || code == UNGE
4846 || ((code == LT || code == GT || code == LTGT)
4847 && ! HONOR_SNANS (GET_MODE (trueop0))))
4848 && rtx_equal_p (trueop0, trueop1)
4849 && ! side_effects_p (trueop0))
4850 return comparison_result (code, CMP_EQ);
4852 /* If the operands are floating-point constants, see if we can fold
4853 the result. */
4854 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
4855 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
4856 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4858 REAL_VALUE_TYPE d0, d1;
4860 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4861 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4863 /* Comparisons are unordered iff at least one of the values is NaN. */
4864 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4865 switch (code)
4867 case UNEQ:
4868 case UNLT:
4869 case UNGT:
4870 case UNLE:
4871 case UNGE:
4872 case NE:
4873 case UNORDERED:
4874 return const_true_rtx;
4875 case EQ:
4876 case LT:
4877 case GT:
4878 case LE:
4879 case GE:
4880 case LTGT:
4881 case ORDERED:
4882 return const0_rtx;
4883 default:
4884 return 0;
4887 return comparison_result (code,
4888 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4889 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4892 /* Otherwise, see if the operands are both integers. */
4893 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4894 && (CONST_DOUBLE_AS_INT_P (trueop0) || CONST_INT_P (trueop0))
4895 && (CONST_DOUBLE_AS_INT_P (trueop1) || CONST_INT_P (trueop1)))
4897 int width = GET_MODE_PRECISION (mode);
4898 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4899 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4901 /* Get the two words comprising each integer constant. */
4902 if (CONST_DOUBLE_AS_INT_P (trueop0))
4904 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4905 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4907 else
4909 l0u = l0s = INTVAL (trueop0);
4910 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4913 if (CONST_DOUBLE_AS_INT_P (trueop1))
4915 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4916 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4918 else
4920 l1u = l1s = INTVAL (trueop1);
4921 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4924 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4925 we have to sign or zero-extend the values. */
4926 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4928 l0u &= GET_MODE_MASK (mode);
4929 l1u &= GET_MODE_MASK (mode);
4931 if (val_signbit_known_set_p (mode, l0s))
4932 l0s |= ~GET_MODE_MASK (mode);
4934 if (val_signbit_known_set_p (mode, l1s))
4935 l1s |= ~GET_MODE_MASK (mode);
4937 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4938 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4940 if (h0u == h1u && l0u == l1u)
4941 return comparison_result (code, CMP_EQ);
4942 else
4944 int cr;
4945 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
4946 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
4947 return comparison_result (code, cr);
4951 /* Optimize comparisons with upper and lower bounds. */
4952 if (HWI_COMPUTABLE_MODE_P (mode)
4953 && CONST_INT_P (trueop1))
4955 int sign;
4956 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4957 HOST_WIDE_INT val = INTVAL (trueop1);
4958 HOST_WIDE_INT mmin, mmax;
4960 if (code == GEU
4961 || code == LEU
4962 || code == GTU
4963 || code == LTU)
4964 sign = 0;
4965 else
4966 sign = 1;
4968 /* Get a reduced range if the sign bit is zero. */
4969 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4971 mmin = 0;
4972 mmax = nonzero;
4974 else
4976 rtx mmin_rtx, mmax_rtx;
4977 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4979 mmin = INTVAL (mmin_rtx);
4980 mmax = INTVAL (mmax_rtx);
4981 if (sign)
4983 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4985 mmin >>= (sign_copies - 1);
4986 mmax >>= (sign_copies - 1);
4990 switch (code)
4992 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4993 case GEU:
4994 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4995 return const_true_rtx;
4996 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4997 return const0_rtx;
4998 break;
4999 case GE:
5000 if (val <= mmin)
5001 return const_true_rtx;
5002 if (val > mmax)
5003 return const0_rtx;
5004 break;
5006 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5007 case LEU:
5008 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5009 return const_true_rtx;
5010 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5011 return const0_rtx;
5012 break;
5013 case LE:
5014 if (val >= mmax)
5015 return const_true_rtx;
5016 if (val < mmin)
5017 return const0_rtx;
5018 break;
5020 case EQ:
5021 /* x == y is always false for y out of range. */
5022 if (val < mmin || val > mmax)
5023 return const0_rtx;
5024 break;
5026 /* x > y is always false for y >= mmax, always true for y < mmin. */
5027 case GTU:
5028 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5029 return const0_rtx;
5030 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5031 return const_true_rtx;
5032 break;
5033 case GT:
5034 if (val >= mmax)
5035 return const0_rtx;
5036 if (val < mmin)
5037 return const_true_rtx;
5038 break;
5040 /* x < y is always false for y <= mmin, always true for y > mmax. */
5041 case LTU:
5042 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5043 return const0_rtx;
5044 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5045 return const_true_rtx;
5046 break;
5047 case LT:
5048 if (val <= mmin)
5049 return const0_rtx;
5050 if (val > mmax)
5051 return const_true_rtx;
5052 break;
5054 case NE:
5055 /* x != y is always true for y out of range. */
5056 if (val < mmin || val > mmax)
5057 return const_true_rtx;
5058 break;
5060 default:
5061 break;
5065 /* Optimize integer comparisons with zero. */
5066 if (trueop1 == const0_rtx)
5068 /* Some addresses are known to be nonzero. We don't know
5069 their sign, but equality comparisons are known. */
5070 if (nonzero_address_p (trueop0))
5072 if (code == EQ || code == LEU)
5073 return const0_rtx;
5074 if (code == NE || code == GTU)
5075 return const_true_rtx;
5078 /* See if the first operand is an IOR with a constant. If so, we
5079 may be able to determine the result of this comparison. */
5080 if (GET_CODE (op0) == IOR)
5082 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5083 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5085 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
5086 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5087 && (UINTVAL (inner_const)
5088 & ((unsigned HOST_WIDE_INT) 1
5089 << sign_bitnum)));
5091 switch (code)
5093 case EQ:
5094 case LEU:
5095 return const0_rtx;
5096 case NE:
5097 case GTU:
5098 return const_true_rtx;
5099 case LT:
5100 case LE:
5101 if (has_sign)
5102 return const_true_rtx;
5103 break;
5104 case GT:
5105 case GE:
5106 if (has_sign)
5107 return const0_rtx;
5108 break;
5109 default:
5110 break;
5116 /* Optimize comparison of ABS with zero. */
5117 if (trueop1 == CONST0_RTX (mode)
5118 && (GET_CODE (trueop0) == ABS
5119 || (GET_CODE (trueop0) == FLOAT_EXTEND
5120 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5122 switch (code)
5124 case LT:
5125 /* Optimize abs(x) < 0.0. */
5126 if (!HONOR_SNANS (mode)
5127 && (!INTEGRAL_MODE_P (mode)
5128 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5130 if (INTEGRAL_MODE_P (mode)
5131 && (issue_strict_overflow_warning
5132 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5133 warning (OPT_Wstrict_overflow,
5134 ("assuming signed overflow does not occur when "
5135 "assuming abs (x) < 0 is false"));
5136 return const0_rtx;
5138 break;
5140 case GE:
5141 /* Optimize abs(x) >= 0.0. */
5142 if (!HONOR_NANS (mode)
5143 && (!INTEGRAL_MODE_P (mode)
5144 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5146 if (INTEGRAL_MODE_P (mode)
5147 && (issue_strict_overflow_warning
5148 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5149 warning (OPT_Wstrict_overflow,
5150 ("assuming signed overflow does not occur when "
5151 "assuming abs (x) >= 0 is true"));
5152 return const_true_rtx;
5154 break;
5156 case UNGE:
5157 /* Optimize ! (abs(x) < 0.0). */
5158 return const_true_rtx;
5160 default:
5161 break;
5165 return 0;
5168 /* Simplify CODE, an operation with result mode MODE and three operands,
5169 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5170 a constant. Return 0 if no simplifications is possible. */
5173 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
5174 enum machine_mode op0_mode, rtx op0, rtx op1,
5175 rtx op2)
5177 unsigned int width = GET_MODE_PRECISION (mode);
5178 bool any_change = false;
5179 rtx tem;
5181 /* VOIDmode means "infinite" precision. */
5182 if (width == 0)
5183 width = HOST_BITS_PER_WIDE_INT;
5185 switch (code)
5187 case FMA:
5188 /* Simplify negations around the multiplication. */
5189 /* -a * -b + c => a * b + c. */
5190 if (GET_CODE (op0) == NEG)
5192 tem = simplify_unary_operation (NEG, mode, op1, mode);
5193 if (tem)
5194 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5196 else if (GET_CODE (op1) == NEG)
5198 tem = simplify_unary_operation (NEG, mode, op0, mode);
5199 if (tem)
5200 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5203 /* Canonicalize the two multiplication operands. */
5204 /* a * -b + c => -b * a + c. */
5205 if (swap_commutative_operands_p (op0, op1))
5206 tem = op0, op0 = op1, op1 = tem, any_change = true;
5208 if (any_change)
5209 return gen_rtx_FMA (mode, op0, op1, op2);
5210 return NULL_RTX;
5212 case SIGN_EXTRACT:
5213 case ZERO_EXTRACT:
5214 if (CONST_INT_P (op0)
5215 && CONST_INT_P (op1)
5216 && CONST_INT_P (op2)
5217 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5218 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5220 /* Extracting a bit-field from a constant */
5221 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5222 HOST_WIDE_INT op1val = INTVAL (op1);
5223 HOST_WIDE_INT op2val = INTVAL (op2);
5224 if (BITS_BIG_ENDIAN)
5225 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5226 else
5227 val >>= op2val;
5229 if (HOST_BITS_PER_WIDE_INT != op1val)
5231 /* First zero-extend. */
5232 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
5233 /* If desired, propagate sign bit. */
5234 if (code == SIGN_EXTRACT
5235 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
5236 != 0)
5237 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5240 return gen_int_mode (val, mode);
5242 break;
5244 case IF_THEN_ELSE:
5245 if (CONST_INT_P (op0))
5246 return op0 != const0_rtx ? op1 : op2;
5248 /* Convert c ? a : a into "a". */
5249 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5250 return op1;
5252 /* Convert a != b ? a : b into "a". */
5253 if (GET_CODE (op0) == NE
5254 && ! side_effects_p (op0)
5255 && ! HONOR_NANS (mode)
5256 && ! HONOR_SIGNED_ZEROS (mode)
5257 && ((rtx_equal_p (XEXP (op0, 0), op1)
5258 && rtx_equal_p (XEXP (op0, 1), op2))
5259 || (rtx_equal_p (XEXP (op0, 0), op2)
5260 && rtx_equal_p (XEXP (op0, 1), op1))))
5261 return op1;
5263 /* Convert a == b ? a : b into "b". */
5264 if (GET_CODE (op0) == EQ
5265 && ! side_effects_p (op0)
5266 && ! HONOR_NANS (mode)
5267 && ! HONOR_SIGNED_ZEROS (mode)
5268 && ((rtx_equal_p (XEXP (op0, 0), op1)
5269 && rtx_equal_p (XEXP (op0, 1), op2))
5270 || (rtx_equal_p (XEXP (op0, 0), op2)
5271 && rtx_equal_p (XEXP (op0, 1), op1))))
5272 return op2;
5274 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5276 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5277 ? GET_MODE (XEXP (op0, 1))
5278 : GET_MODE (XEXP (op0, 0)));
5279 rtx temp;
5281 /* Look for happy constants in op1 and op2. */
5282 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5284 HOST_WIDE_INT t = INTVAL (op1);
5285 HOST_WIDE_INT f = INTVAL (op2);
5287 if (t == STORE_FLAG_VALUE && f == 0)
5288 code = GET_CODE (op0);
5289 else if (t == 0 && f == STORE_FLAG_VALUE)
5291 enum rtx_code tmp;
5292 tmp = reversed_comparison_code (op0, NULL_RTX);
5293 if (tmp == UNKNOWN)
5294 break;
5295 code = tmp;
5297 else
5298 break;
5300 return simplify_gen_relational (code, mode, cmp_mode,
5301 XEXP (op0, 0), XEXP (op0, 1));
5304 if (cmp_mode == VOIDmode)
5305 cmp_mode = op0_mode;
5306 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5307 cmp_mode, XEXP (op0, 0),
5308 XEXP (op0, 1));
5310 /* See if any simplifications were possible. */
5311 if (temp)
5313 if (CONST_INT_P (temp))
5314 return temp == const0_rtx ? op2 : op1;
5315 else if (temp)
5316 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5319 break;
5321 case VEC_MERGE:
5322 gcc_assert (GET_MODE (op0) == mode);
5323 gcc_assert (GET_MODE (op1) == mode);
5324 gcc_assert (VECTOR_MODE_P (mode));
5325 op2 = avoid_constant_pool_reference (op2);
5326 if (CONST_INT_P (op2))
5328 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5329 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5330 int mask = (1 << n_elts) - 1;
5332 if (!(INTVAL (op2) & mask))
5333 return op1;
5334 if ((INTVAL (op2) & mask) == mask)
5335 return op0;
5337 op0 = avoid_constant_pool_reference (op0);
5338 op1 = avoid_constant_pool_reference (op1);
5339 if (GET_CODE (op0) == CONST_VECTOR
5340 && GET_CODE (op1) == CONST_VECTOR)
5342 rtvec v = rtvec_alloc (n_elts);
5343 unsigned int i;
5345 for (i = 0; i < n_elts; i++)
5346 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
5347 ? CONST_VECTOR_ELT (op0, i)
5348 : CONST_VECTOR_ELT (op1, i));
5349 return gen_rtx_CONST_VECTOR (mode, v);
5352 break;
5354 default:
5355 gcc_unreachable ();
5358 return 0;
5361 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
5362 or CONST_VECTOR,
5363 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5365 Works by unpacking OP into a collection of 8-bit values
5366 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5367 and then repacking them again for OUTERMODE. */
5369 static rtx
5370 simplify_immed_subreg (enum machine_mode outermode, rtx op,
5371 enum machine_mode innermode, unsigned int byte)
5373 /* We support up to 512-bit values (for V8DFmode). */
5374 enum {
5375 max_bitsize = 512,
5376 value_bit = 8,
5377 value_mask = (1 << value_bit) - 1
5379 unsigned char value[max_bitsize / value_bit];
5380 int value_start;
5381 int i;
5382 int elem;
5384 int num_elem;
5385 rtx * elems;
5386 int elem_bitsize;
5387 rtx result_s;
5388 rtvec result_v = NULL;
5389 enum mode_class outer_class;
5390 enum machine_mode outer_submode;
5392 /* Some ports misuse CCmode. */
5393 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5394 return op;
5396 /* We have no way to represent a complex constant at the rtl level. */
5397 if (COMPLEX_MODE_P (outermode))
5398 return NULL_RTX;
5400 /* Unpack the value. */
5402 if (GET_CODE (op) == CONST_VECTOR)
5404 num_elem = CONST_VECTOR_NUNITS (op);
5405 elems = &CONST_VECTOR_ELT (op, 0);
5406 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5408 else
5410 num_elem = 1;
5411 elems = &op;
5412 elem_bitsize = max_bitsize;
5414 /* If this asserts, it is too complicated; reducing value_bit may help. */
5415 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5416 /* I don't know how to handle endianness of sub-units. */
5417 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5419 for (elem = 0; elem < num_elem; elem++)
5421 unsigned char * vp;
5422 rtx el = elems[elem];
5424 /* Vectors are kept in target memory order. (This is probably
5425 a mistake.) */
5427 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5428 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5429 / BITS_PER_UNIT);
5430 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5431 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5432 unsigned bytele = (subword_byte % UNITS_PER_WORD
5433 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5434 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5437 switch (GET_CODE (el))
5439 case CONST_INT:
5440 for (i = 0;
5441 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5442 i += value_bit)
5443 *vp++ = INTVAL (el) >> i;
5444 /* CONST_INTs are always logically sign-extended. */
5445 for (; i < elem_bitsize; i += value_bit)
5446 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5447 break;
5449 case CONST_DOUBLE:
5450 if (GET_MODE (el) == VOIDmode)
5452 unsigned char extend = 0;
5453 /* If this triggers, someone should have generated a
5454 CONST_INT instead. */
5455 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5457 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5458 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5459 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5461 *vp++
5462 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5463 i += value_bit;
5466 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5467 extend = -1;
5468 for (; i < elem_bitsize; i += value_bit)
5469 *vp++ = extend;
5471 else
5473 long tmp[max_bitsize / 32];
5474 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5476 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5477 gcc_assert (bitsize <= elem_bitsize);
5478 gcc_assert (bitsize % value_bit == 0);
5480 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5481 GET_MODE (el));
5483 /* real_to_target produces its result in words affected by
5484 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5485 and use WORDS_BIG_ENDIAN instead; see the documentation
5486 of SUBREG in rtl.texi. */
5487 for (i = 0; i < bitsize; i += value_bit)
5489 int ibase;
5490 if (WORDS_BIG_ENDIAN)
5491 ibase = bitsize - 1 - i;
5492 else
5493 ibase = i;
5494 *vp++ = tmp[ibase / 32] >> i % 32;
5497 /* It shouldn't matter what's done here, so fill it with
5498 zero. */
5499 for (; i < elem_bitsize; i += value_bit)
5500 *vp++ = 0;
5502 break;
5504 case CONST_FIXED:
5505 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5507 for (i = 0; i < elem_bitsize; i += value_bit)
5508 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5510 else
5512 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5513 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5514 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5515 i += value_bit)
5516 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5517 >> (i - HOST_BITS_PER_WIDE_INT);
5518 for (; i < elem_bitsize; i += value_bit)
5519 *vp++ = 0;
5521 break;
5523 default:
5524 gcc_unreachable ();
5528 /* Now, pick the right byte to start with. */
5529 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5530 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5531 will already have offset 0. */
5532 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5534 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5535 - byte);
5536 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5537 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5538 byte = (subword_byte % UNITS_PER_WORD
5539 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5542 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5543 so if it's become negative it will instead be very large.) */
5544 gcc_assert (byte < GET_MODE_SIZE (innermode));
5546 /* Convert from bytes to chunks of size value_bit. */
5547 value_start = byte * (BITS_PER_UNIT / value_bit);
5549 /* Re-pack the value. */
5551 if (VECTOR_MODE_P (outermode))
5553 num_elem = GET_MODE_NUNITS (outermode);
5554 result_v = rtvec_alloc (num_elem);
5555 elems = &RTVEC_ELT (result_v, 0);
5556 outer_submode = GET_MODE_INNER (outermode);
5558 else
5560 num_elem = 1;
5561 elems = &result_s;
5562 outer_submode = outermode;
5565 outer_class = GET_MODE_CLASS (outer_submode);
5566 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5568 gcc_assert (elem_bitsize % value_bit == 0);
5569 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5571 for (elem = 0; elem < num_elem; elem++)
5573 unsigned char *vp;
5575 /* Vectors are stored in target memory order. (This is probably
5576 a mistake.) */
5578 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5579 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5580 / BITS_PER_UNIT);
5581 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5582 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5583 unsigned bytele = (subword_byte % UNITS_PER_WORD
5584 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5585 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5588 switch (outer_class)
5590 case MODE_INT:
5591 case MODE_PARTIAL_INT:
5593 unsigned HOST_WIDE_INT hi = 0, lo = 0;
5595 for (i = 0;
5596 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5597 i += value_bit)
5598 lo |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5599 for (; i < elem_bitsize; i += value_bit)
5600 hi |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5601 << (i - HOST_BITS_PER_WIDE_INT);
5603 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5604 know why. */
5605 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5606 elems[elem] = gen_int_mode (lo, outer_submode);
5607 else if (elem_bitsize <= HOST_BITS_PER_DOUBLE_INT)
5608 elems[elem] = immed_double_const (lo, hi, outer_submode);
5609 else
5610 return NULL_RTX;
5612 break;
5614 case MODE_FLOAT:
5615 case MODE_DECIMAL_FLOAT:
5617 REAL_VALUE_TYPE r;
5618 long tmp[max_bitsize / 32];
5620 /* real_from_target wants its input in words affected by
5621 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5622 and use WORDS_BIG_ENDIAN instead; see the documentation
5623 of SUBREG in rtl.texi. */
5624 for (i = 0; i < max_bitsize / 32; i++)
5625 tmp[i] = 0;
5626 for (i = 0; i < elem_bitsize; i += value_bit)
5628 int ibase;
5629 if (WORDS_BIG_ENDIAN)
5630 ibase = elem_bitsize - 1 - i;
5631 else
5632 ibase = i;
5633 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5636 real_from_target (&r, tmp, outer_submode);
5637 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5639 break;
5641 case MODE_FRACT:
5642 case MODE_UFRACT:
5643 case MODE_ACCUM:
5644 case MODE_UACCUM:
5646 FIXED_VALUE_TYPE f;
5647 f.data.low = 0;
5648 f.data.high = 0;
5649 f.mode = outer_submode;
5651 for (i = 0;
5652 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5653 i += value_bit)
5654 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5655 for (; i < elem_bitsize; i += value_bit)
5656 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5657 << (i - HOST_BITS_PER_WIDE_INT));
5659 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5661 break;
5663 default:
5664 gcc_unreachable ();
5667 if (VECTOR_MODE_P (outermode))
5668 return gen_rtx_CONST_VECTOR (outermode, result_v);
5669 else
5670 return result_s;
5673 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5674 Return 0 if no simplifications are possible. */
5676 simplify_subreg (enum machine_mode outermode, rtx op,
5677 enum machine_mode innermode, unsigned int byte)
5679 /* Little bit of sanity checking. */
5680 gcc_assert (innermode != VOIDmode);
5681 gcc_assert (outermode != VOIDmode);
5682 gcc_assert (innermode != BLKmode);
5683 gcc_assert (outermode != BLKmode);
5685 gcc_assert (GET_MODE (op) == innermode
5686 || GET_MODE (op) == VOIDmode);
5688 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
5689 gcc_assert (byte < GET_MODE_SIZE (innermode));
5691 if (outermode == innermode && !byte)
5692 return op;
5694 if (CONST_INT_P (op)
5695 || CONST_DOUBLE_P (op)
5696 || GET_CODE (op) == CONST_FIXED
5697 || GET_CODE (op) == CONST_VECTOR)
5698 return simplify_immed_subreg (outermode, op, innermode, byte);
5700 /* Changing mode twice with SUBREG => just change it once,
5701 or not at all if changing back op starting mode. */
5702 if (GET_CODE (op) == SUBREG)
5704 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5705 int final_offset = byte + SUBREG_BYTE (op);
5706 rtx newx;
5708 if (outermode == innermostmode
5709 && byte == 0 && SUBREG_BYTE (op) == 0)
5710 return SUBREG_REG (op);
5712 /* The SUBREG_BYTE represents offset, as if the value were stored
5713 in memory. Irritating exception is paradoxical subreg, where
5714 we define SUBREG_BYTE to be 0. On big endian machines, this
5715 value should be negative. For a moment, undo this exception. */
5716 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5718 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5719 if (WORDS_BIG_ENDIAN)
5720 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5721 if (BYTES_BIG_ENDIAN)
5722 final_offset += difference % UNITS_PER_WORD;
5724 if (SUBREG_BYTE (op) == 0
5725 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5727 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5728 if (WORDS_BIG_ENDIAN)
5729 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5730 if (BYTES_BIG_ENDIAN)
5731 final_offset += difference % UNITS_PER_WORD;
5734 /* See whether resulting subreg will be paradoxical. */
5735 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5737 /* In nonparadoxical subregs we can't handle negative offsets. */
5738 if (final_offset < 0)
5739 return NULL_RTX;
5740 /* Bail out in case resulting subreg would be incorrect. */
5741 if (final_offset % GET_MODE_SIZE (outermode)
5742 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5743 return NULL_RTX;
5745 else
5747 int offset = 0;
5748 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5750 /* In paradoxical subreg, see if we are still looking on lower part.
5751 If so, our SUBREG_BYTE will be 0. */
5752 if (WORDS_BIG_ENDIAN)
5753 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5754 if (BYTES_BIG_ENDIAN)
5755 offset += difference % UNITS_PER_WORD;
5756 if (offset == final_offset)
5757 final_offset = 0;
5758 else
5759 return NULL_RTX;
5762 /* Recurse for further possible simplifications. */
5763 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5764 final_offset);
5765 if (newx)
5766 return newx;
5767 if (validate_subreg (outermode, innermostmode,
5768 SUBREG_REG (op), final_offset))
5770 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5771 if (SUBREG_PROMOTED_VAR_P (op)
5772 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5773 && GET_MODE_CLASS (outermode) == MODE_INT
5774 && IN_RANGE (GET_MODE_SIZE (outermode),
5775 GET_MODE_SIZE (innermode),
5776 GET_MODE_SIZE (innermostmode))
5777 && subreg_lowpart_p (newx))
5779 SUBREG_PROMOTED_VAR_P (newx) = 1;
5780 SUBREG_PROMOTED_UNSIGNED_SET
5781 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5783 return newx;
5785 return NULL_RTX;
5788 /* SUBREG of a hard register => just change the register number
5789 and/or mode. If the hard register is not valid in that mode,
5790 suppress this simplification. If the hard register is the stack,
5791 frame, or argument pointer, leave this as a SUBREG. */
5793 if (REG_P (op) && HARD_REGISTER_P (op))
5795 unsigned int regno, final_regno;
5797 regno = REGNO (op);
5798 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5799 if (HARD_REGISTER_NUM_P (final_regno))
5801 rtx x;
5802 int final_offset = byte;
5804 /* Adjust offset for paradoxical subregs. */
5805 if (byte == 0
5806 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5808 int difference = (GET_MODE_SIZE (innermode)
5809 - GET_MODE_SIZE (outermode));
5810 if (WORDS_BIG_ENDIAN)
5811 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5812 if (BYTES_BIG_ENDIAN)
5813 final_offset += difference % UNITS_PER_WORD;
5816 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5818 /* Propagate original regno. We don't have any way to specify
5819 the offset inside original regno, so do so only for lowpart.
5820 The information is used only by alias analysis that can not
5821 grog partial register anyway. */
5823 if (subreg_lowpart_offset (outermode, innermode) == byte)
5824 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5825 return x;
5829 /* If we have a SUBREG of a register that we are replacing and we are
5830 replacing it with a MEM, make a new MEM and try replacing the
5831 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5832 or if we would be widening it. */
5834 if (MEM_P (op)
5835 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
5836 /* Allow splitting of volatile memory references in case we don't
5837 have instruction to move the whole thing. */
5838 && (! MEM_VOLATILE_P (op)
5839 || ! have_insn_for (SET, innermode))
5840 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5841 return adjust_address_nv (op, outermode, byte);
5843 /* Handle complex values represented as CONCAT
5844 of real and imaginary part. */
5845 if (GET_CODE (op) == CONCAT)
5847 unsigned int part_size, final_offset;
5848 rtx part, res;
5850 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5851 if (byte < part_size)
5853 part = XEXP (op, 0);
5854 final_offset = byte;
5856 else
5858 part = XEXP (op, 1);
5859 final_offset = byte - part_size;
5862 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5863 return NULL_RTX;
5865 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5866 if (res)
5867 return res;
5868 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5869 return gen_rtx_SUBREG (outermode, part, final_offset);
5870 return NULL_RTX;
5873 /* A SUBREG resulting from a zero extension may fold to zero if
5874 it extracts higher bits that the ZERO_EXTEND's source bits. */
5875 if (GET_CODE (op) == ZERO_EXTEND)
5877 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5878 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
5879 return CONST0_RTX (outermode);
5882 if (SCALAR_INT_MODE_P (outermode)
5883 && SCALAR_INT_MODE_P (innermode)
5884 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5885 && byte == subreg_lowpart_offset (outermode, innermode))
5887 rtx tem = simplify_truncation (outermode, op, innermode);
5888 if (tem)
5889 return tem;
5892 return NULL_RTX;
5895 /* Make a SUBREG operation or equivalent if it folds. */
5898 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5899 enum machine_mode innermode, unsigned int byte)
5901 rtx newx;
5903 newx = simplify_subreg (outermode, op, innermode, byte);
5904 if (newx)
5905 return newx;
5907 if (GET_CODE (op) == SUBREG
5908 || GET_CODE (op) == CONCAT
5909 || GET_MODE (op) == VOIDmode)
5910 return NULL_RTX;
5912 if (validate_subreg (outermode, innermode, op, byte))
5913 return gen_rtx_SUBREG (outermode, op, byte);
5915 return NULL_RTX;
5918 /* Simplify X, an rtx expression.
5920 Return the simplified expression or NULL if no simplifications
5921 were possible.
5923 This is the preferred entry point into the simplification routines;
5924 however, we still allow passes to call the more specific routines.
5926 Right now GCC has three (yes, three) major bodies of RTL simplification
5927 code that need to be unified.
5929 1. fold_rtx in cse.c. This code uses various CSE specific
5930 information to aid in RTL simplification.
5932 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5933 it uses combine specific information to aid in RTL
5934 simplification.
5936 3. The routines in this file.
5939 Long term we want to only have one body of simplification code; to
5940 get to that state I recommend the following steps:
5942 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5943 which are not pass dependent state into these routines.
5945 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5946 use this routine whenever possible.
5948 3. Allow for pass dependent state to be provided to these
5949 routines and add simplifications based on the pass dependent
5950 state. Remove code from cse.c & combine.c that becomes
5951 redundant/dead.
5953 It will take time, but ultimately the compiler will be easier to
5954 maintain and improve. It's totally silly that when we add a
5955 simplification that it needs to be added to 4 places (3 for RTL
5956 simplification and 1 for tree simplification. */
5959 simplify_rtx (const_rtx x)
5961 const enum rtx_code code = GET_CODE (x);
5962 const enum machine_mode mode = GET_MODE (x);
5964 switch (GET_RTX_CLASS (code))
5966 case RTX_UNARY:
5967 return simplify_unary_operation (code, mode,
5968 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5969 case RTX_COMM_ARITH:
5970 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5971 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5973 /* Fall through.... */
5975 case RTX_BIN_ARITH:
5976 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5978 case RTX_TERNARY:
5979 case RTX_BITFIELD_OPS:
5980 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5981 XEXP (x, 0), XEXP (x, 1),
5982 XEXP (x, 2));
5984 case RTX_COMPARE:
5985 case RTX_COMM_COMPARE:
5986 return simplify_relational_operation (code, mode,
5987 ((GET_MODE (XEXP (x, 0))
5988 != VOIDmode)
5989 ? GET_MODE (XEXP (x, 0))
5990 : GET_MODE (XEXP (x, 1))),
5991 XEXP (x, 0),
5992 XEXP (x, 1));
5994 case RTX_EXTRA:
5995 if (code == SUBREG)
5996 return simplify_subreg (mode, SUBREG_REG (x),
5997 GET_MODE (SUBREG_REG (x)),
5998 SUBREG_BYTE (x));
5999 break;
6001 case RTX_OBJ:
6002 if (code == LO_SUM)
6004 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6005 if (GET_CODE (XEXP (x, 0)) == HIGH
6006 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6007 return XEXP (x, 1);
6009 break;
6011 default:
6012 break;
6014 return NULL;