2013-05-23 Richard Biener <rguenther@suse.de>
[official-gcc.git] / gcc / simplify-rtx.c
blob865d37d49c31a572eb9bf6f6c74821a1fca8875f
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2013 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "tm_p.h"
28 #include "regs.h"
29 #include "hard-reg-set.h"
30 #include "flags.h"
31 #include "insn-config.h"
32 #include "recog.h"
33 #include "function.h"
34 #include "expr.h"
35 #include "diagnostic-core.h"
36 #include "ggc.h"
37 #include "target.h"
39 /* Simplification and canonicalization of RTL. */
41 /* Much code operates on (low, high) pairs; the low value is an
42 unsigned wide int, the high value a signed wide int. We
43 occasionally need to sign extend from low to high as if low were a
44 signed wide int. */
45 #define HWI_SIGN_EXTEND(low) \
46 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
48 static rtx neg_const_int (enum machine_mode, const_rtx);
49 static bool plus_minus_operand_p (const_rtx);
50 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
51 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
52 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
53 unsigned int);
54 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
55 rtx, rtx);
56 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
57 enum machine_mode, rtx, rtx);
58 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
59 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
60 rtx, rtx, rtx, rtx);
62 /* Negate a CONST_INT rtx, truncating (because a conversion from a
63 maximally negative number can overflow). */
64 static rtx
65 neg_const_int (enum machine_mode mode, const_rtx i)
67 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
70 /* Test whether expression, X, is an immediate constant that represents
71 the most significant bit of machine mode MODE. */
73 bool
74 mode_signbit_p (enum machine_mode mode, const_rtx x)
76 unsigned HOST_WIDE_INT val;
77 unsigned int width;
79 if (GET_MODE_CLASS (mode) != MODE_INT)
80 return false;
82 width = GET_MODE_PRECISION (mode);
83 if (width == 0)
84 return false;
86 if (width <= HOST_BITS_PER_WIDE_INT
87 && CONST_INT_P (x))
88 val = INTVAL (x);
89 else if (width <= HOST_BITS_PER_DOUBLE_INT
90 && CONST_DOUBLE_AS_INT_P (x)
91 && CONST_DOUBLE_LOW (x) == 0)
93 val = CONST_DOUBLE_HIGH (x);
94 width -= HOST_BITS_PER_WIDE_INT;
96 else
97 /* FIXME: We don't yet have a representation for wider modes. */
98 return false;
100 if (width < HOST_BITS_PER_WIDE_INT)
101 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
102 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
105 /* Test whether VAL is equal to the most significant bit of mode MODE
106 (after masking with the mode mask of MODE). Returns false if the
107 precision of MODE is too large to handle. */
109 bool
110 val_signbit_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
112 unsigned int width;
114 if (GET_MODE_CLASS (mode) != MODE_INT)
115 return false;
117 width = GET_MODE_PRECISION (mode);
118 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
119 return false;
121 val &= GET_MODE_MASK (mode);
122 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
125 /* Test whether the most significant bit of mode MODE is set in VAL.
126 Returns false if the precision of MODE is too large to handle. */
127 bool
128 val_signbit_known_set_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
130 unsigned int width;
132 if (GET_MODE_CLASS (mode) != MODE_INT)
133 return false;
135 width = GET_MODE_PRECISION (mode);
136 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
137 return false;
139 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
140 return val != 0;
143 /* Test whether the most significant bit of mode MODE is clear in VAL.
144 Returns false if the precision of MODE is too large to handle. */
145 bool
146 val_signbit_known_clear_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
148 unsigned int width;
150 if (GET_MODE_CLASS (mode) != MODE_INT)
151 return false;
153 width = GET_MODE_PRECISION (mode);
154 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
155 return false;
157 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
158 return val == 0;
161 /* Make a binary operation by properly ordering the operands and
162 seeing if the expression folds. */
165 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
166 rtx op1)
168 rtx tem;
170 /* If this simplifies, do it. */
171 tem = simplify_binary_operation (code, mode, op0, op1);
172 if (tem)
173 return tem;
175 /* Put complex operands first and constants second if commutative. */
176 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
177 && swap_commutative_operands_p (op0, op1))
178 tem = op0, op0 = op1, op1 = tem;
180 return gen_rtx_fmt_ee (code, mode, op0, op1);
183 /* If X is a MEM referencing the constant pool, return the real value.
184 Otherwise return X. */
186 avoid_constant_pool_reference (rtx x)
188 rtx c, tmp, addr;
189 enum machine_mode cmode;
190 HOST_WIDE_INT offset = 0;
192 switch (GET_CODE (x))
194 case MEM:
195 break;
197 case FLOAT_EXTEND:
198 /* Handle float extensions of constant pool references. */
199 tmp = XEXP (x, 0);
200 c = avoid_constant_pool_reference (tmp);
201 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
203 REAL_VALUE_TYPE d;
205 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
206 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
208 return x;
210 default:
211 return x;
214 if (GET_MODE (x) == BLKmode)
215 return x;
217 addr = XEXP (x, 0);
219 /* Call target hook to avoid the effects of -fpic etc.... */
220 addr = targetm.delegitimize_address (addr);
222 /* Split the address into a base and integer offset. */
223 if (GET_CODE (addr) == CONST
224 && GET_CODE (XEXP (addr, 0)) == PLUS
225 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
227 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
228 addr = XEXP (XEXP (addr, 0), 0);
231 if (GET_CODE (addr) == LO_SUM)
232 addr = XEXP (addr, 1);
234 /* If this is a constant pool reference, we can turn it into its
235 constant and hope that simplifications happen. */
236 if (GET_CODE (addr) == SYMBOL_REF
237 && CONSTANT_POOL_ADDRESS_P (addr))
239 c = get_pool_constant (addr);
240 cmode = get_pool_mode (addr);
242 /* If we're accessing the constant in a different mode than it was
243 originally stored, attempt to fix that up via subreg simplifications.
244 If that fails we have no choice but to return the original memory. */
245 if ((offset != 0 || cmode != GET_MODE (x))
246 && offset >= 0 && offset < GET_MODE_SIZE (cmode))
248 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
249 if (tem && CONSTANT_P (tem))
250 return tem;
252 else
253 return c;
256 return x;
259 /* Simplify a MEM based on its attributes. This is the default
260 delegitimize_address target hook, and it's recommended that every
261 overrider call it. */
264 delegitimize_mem_from_attrs (rtx x)
266 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
267 use their base addresses as equivalent. */
268 if (MEM_P (x)
269 && MEM_EXPR (x)
270 && MEM_OFFSET_KNOWN_P (x))
272 tree decl = MEM_EXPR (x);
273 enum machine_mode mode = GET_MODE (x);
274 HOST_WIDE_INT offset = 0;
276 switch (TREE_CODE (decl))
278 default:
279 decl = NULL;
280 break;
282 case VAR_DECL:
283 break;
285 case ARRAY_REF:
286 case ARRAY_RANGE_REF:
287 case COMPONENT_REF:
288 case BIT_FIELD_REF:
289 case REALPART_EXPR:
290 case IMAGPART_EXPR:
291 case VIEW_CONVERT_EXPR:
293 HOST_WIDE_INT bitsize, bitpos;
294 tree toffset;
295 int unsignedp, volatilep = 0;
297 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
298 &mode, &unsignedp, &volatilep, false);
299 if (bitsize != GET_MODE_BITSIZE (mode)
300 || (bitpos % BITS_PER_UNIT)
301 || (toffset && !host_integerp (toffset, 0)))
302 decl = NULL;
303 else
305 offset += bitpos / BITS_PER_UNIT;
306 if (toffset)
307 offset += TREE_INT_CST_LOW (toffset);
309 break;
313 if (decl
314 && mode == GET_MODE (x)
315 && TREE_CODE (decl) == VAR_DECL
316 && (TREE_STATIC (decl)
317 || DECL_THREAD_LOCAL_P (decl))
318 && DECL_RTL_SET_P (decl)
319 && MEM_P (DECL_RTL (decl)))
321 rtx newx;
323 offset += MEM_OFFSET (x);
325 newx = DECL_RTL (decl);
327 if (MEM_P (newx))
329 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
331 /* Avoid creating a new MEM needlessly if we already had
332 the same address. We do if there's no OFFSET and the
333 old address X is identical to NEWX, or if X is of the
334 form (plus NEWX OFFSET), or the NEWX is of the form
335 (plus Y (const_int Z)) and X is that with the offset
336 added: (plus Y (const_int Z+OFFSET)). */
337 if (!((offset == 0
338 || (GET_CODE (o) == PLUS
339 && GET_CODE (XEXP (o, 1)) == CONST_INT
340 && (offset == INTVAL (XEXP (o, 1))
341 || (GET_CODE (n) == PLUS
342 && GET_CODE (XEXP (n, 1)) == CONST_INT
343 && (INTVAL (XEXP (n, 1)) + offset
344 == INTVAL (XEXP (o, 1)))
345 && (n = XEXP (n, 0))))
346 && (o = XEXP (o, 0))))
347 && rtx_equal_p (o, n)))
348 x = adjust_address_nv (newx, mode, offset);
350 else if (GET_MODE (x) == GET_MODE (newx)
351 && offset == 0)
352 x = newx;
356 return x;
359 /* Make a unary operation by first seeing if it folds and otherwise making
360 the specified operation. */
363 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
364 enum machine_mode op_mode)
366 rtx tem;
368 /* If this simplifies, use it. */
369 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
370 return tem;
372 return gen_rtx_fmt_e (code, mode, op);
375 /* Likewise for ternary operations. */
378 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
379 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
381 rtx tem;
383 /* If this simplifies, use it. */
384 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
385 op0, op1, op2)))
386 return tem;
388 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
391 /* Likewise, for relational operations.
392 CMP_MODE specifies mode comparison is done in. */
395 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
396 enum machine_mode cmp_mode, rtx op0, rtx op1)
398 rtx tem;
400 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
401 op0, op1)))
402 return tem;
404 return gen_rtx_fmt_ee (code, mode, op0, op1);
407 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
408 and simplify the result. If FN is non-NULL, call this callback on each
409 X, if it returns non-NULL, replace X with its return value and simplify the
410 result. */
413 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
414 rtx (*fn) (rtx, const_rtx, void *), void *data)
416 enum rtx_code code = GET_CODE (x);
417 enum machine_mode mode = GET_MODE (x);
418 enum machine_mode op_mode;
419 const char *fmt;
420 rtx op0, op1, op2, newx, op;
421 rtvec vec, newvec;
422 int i, j;
424 if (__builtin_expect (fn != NULL, 0))
426 newx = fn (x, old_rtx, data);
427 if (newx)
428 return newx;
430 else if (rtx_equal_p (x, old_rtx))
431 return copy_rtx ((rtx) data);
433 switch (GET_RTX_CLASS (code))
435 case RTX_UNARY:
436 op0 = XEXP (x, 0);
437 op_mode = GET_MODE (op0);
438 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
439 if (op0 == XEXP (x, 0))
440 return x;
441 return simplify_gen_unary (code, mode, op0, op_mode);
443 case RTX_BIN_ARITH:
444 case RTX_COMM_ARITH:
445 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
446 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
447 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
448 return x;
449 return simplify_gen_binary (code, mode, op0, op1);
451 case RTX_COMPARE:
452 case RTX_COMM_COMPARE:
453 op0 = XEXP (x, 0);
454 op1 = XEXP (x, 1);
455 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
456 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
457 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
458 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
459 return x;
460 return simplify_gen_relational (code, mode, op_mode, op0, op1);
462 case RTX_TERNARY:
463 case RTX_BITFIELD_OPS:
464 op0 = XEXP (x, 0);
465 op_mode = GET_MODE (op0);
466 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
467 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
468 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
469 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
470 return x;
471 if (op_mode == VOIDmode)
472 op_mode = GET_MODE (op0);
473 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
475 case RTX_EXTRA:
476 if (code == SUBREG)
478 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
479 if (op0 == SUBREG_REG (x))
480 return x;
481 op0 = simplify_gen_subreg (GET_MODE (x), op0,
482 GET_MODE (SUBREG_REG (x)),
483 SUBREG_BYTE (x));
484 return op0 ? op0 : x;
486 break;
488 case RTX_OBJ:
489 if (code == MEM)
491 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
492 if (op0 == XEXP (x, 0))
493 return x;
494 return replace_equiv_address_nv (x, op0);
496 else if (code == LO_SUM)
498 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
499 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
501 /* (lo_sum (high x) x) -> x */
502 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
503 return op1;
505 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
506 return x;
507 return gen_rtx_LO_SUM (mode, op0, op1);
509 break;
511 default:
512 break;
515 newx = x;
516 fmt = GET_RTX_FORMAT (code);
517 for (i = 0; fmt[i]; i++)
518 switch (fmt[i])
520 case 'E':
521 vec = XVEC (x, i);
522 newvec = XVEC (newx, i);
523 for (j = 0; j < GET_NUM_ELEM (vec); j++)
525 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
526 old_rtx, fn, data);
527 if (op != RTVEC_ELT (vec, j))
529 if (newvec == vec)
531 newvec = shallow_copy_rtvec (vec);
532 if (x == newx)
533 newx = shallow_copy_rtx (x);
534 XVEC (newx, i) = newvec;
536 RTVEC_ELT (newvec, j) = op;
539 break;
541 case 'e':
542 if (XEXP (x, i))
544 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
545 if (op != XEXP (x, i))
547 if (x == newx)
548 newx = shallow_copy_rtx (x);
549 XEXP (newx, i) = op;
552 break;
554 return newx;
557 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
558 resulting RTX. Return a new RTX which is as simplified as possible. */
561 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
563 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
566 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
567 Only handle cases where the truncated value is inherently an rvalue.
569 RTL provides two ways of truncating a value:
571 1. a lowpart subreg. This form is only a truncation when both
572 the outer and inner modes (here MODE and OP_MODE respectively)
573 are scalar integers, and only then when the subreg is used as
574 an rvalue.
576 It is only valid to form such truncating subregs if the
577 truncation requires no action by the target. The onus for
578 proving this is on the creator of the subreg -- e.g. the
579 caller to simplify_subreg or simplify_gen_subreg -- and typically
580 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
582 2. a TRUNCATE. This form handles both scalar and compound integers.
584 The first form is preferred where valid. However, the TRUNCATE
585 handling in simplify_unary_operation turns the second form into the
586 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
587 so it is generally safe to form rvalue truncations using:
589 simplify_gen_unary (TRUNCATE, ...)
591 and leave simplify_unary_operation to work out which representation
592 should be used.
594 Because of the proof requirements on (1), simplify_truncation must
595 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
596 regardless of whether the outer truncation came from a SUBREG or a
597 TRUNCATE. For example, if the caller has proven that an SImode
598 truncation of:
600 (and:DI X Y)
602 is a no-op and can be represented as a subreg, it does not follow
603 that SImode truncations of X and Y are also no-ops. On a target
604 like 64-bit MIPS that requires SImode values to be stored in
605 sign-extended form, an SImode truncation of:
607 (and:DI (reg:DI X) (const_int 63))
609 is trivially a no-op because only the lower 6 bits can be set.
610 However, X is still an arbitrary 64-bit number and so we cannot
611 assume that truncating it too is a no-op. */
613 static rtx
614 simplify_truncation (enum machine_mode mode, rtx op,
615 enum machine_mode op_mode)
617 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
618 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
619 gcc_assert (precision <= op_precision);
621 /* Optimize truncations of zero and sign extended values. */
622 if (GET_CODE (op) == ZERO_EXTEND
623 || GET_CODE (op) == SIGN_EXTEND)
625 /* There are three possibilities. If MODE is the same as the
626 origmode, we can omit both the extension and the subreg.
627 If MODE is not larger than the origmode, we can apply the
628 truncation without the extension. Finally, if the outermode
629 is larger than the origmode, we can just extend to the appropriate
630 mode. */
631 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
632 if (mode == origmode)
633 return XEXP (op, 0);
634 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
635 return simplify_gen_unary (TRUNCATE, mode,
636 XEXP (op, 0), origmode);
637 else
638 return simplify_gen_unary (GET_CODE (op), mode,
639 XEXP (op, 0), origmode);
642 /* Simplify (truncate:SI (op:DI (x:DI) (y:DI)))
643 to (op:SI (truncate:SI (x:DI)) (truncate:SI (x:DI))). */
644 if (GET_CODE (op) == PLUS
645 || GET_CODE (op) == MINUS
646 || GET_CODE (op) == MULT)
648 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
649 if (op0)
651 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
652 if (op1)
653 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
657 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
658 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
659 the outer subreg is effectively a truncation to the original mode. */
660 if ((GET_CODE (op) == LSHIFTRT
661 || GET_CODE (op) == ASHIFTRT)
662 /* Ensure that OP_MODE is at least twice as wide as MODE
663 to avoid the possibility that an outer LSHIFTRT shifts by more
664 than the sign extension's sign_bit_copies and introduces zeros
665 into the high bits of the result. */
666 && 2 * precision <= op_precision
667 && CONST_INT_P (XEXP (op, 1))
668 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
669 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
670 && UINTVAL (XEXP (op, 1)) < precision)
671 return simplify_gen_binary (ASHIFTRT, mode,
672 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
674 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
675 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
676 the outer subreg is effectively a truncation to the original mode. */
677 if ((GET_CODE (op) == LSHIFTRT
678 || GET_CODE (op) == ASHIFTRT)
679 && CONST_INT_P (XEXP (op, 1))
680 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
681 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
682 && UINTVAL (XEXP (op, 1)) < precision)
683 return simplify_gen_binary (LSHIFTRT, mode,
684 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
686 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
687 to (ashift:QI (x:QI) C), where C is a suitable small constant and
688 the outer subreg is effectively a truncation to the original mode. */
689 if (GET_CODE (op) == ASHIFT
690 && CONST_INT_P (XEXP (op, 1))
691 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
692 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
693 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
694 && UINTVAL (XEXP (op, 1)) < precision)
695 return simplify_gen_binary (ASHIFT, mode,
696 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
698 /* Recognize a word extraction from a multi-word subreg. */
699 if ((GET_CODE (op) == LSHIFTRT
700 || GET_CODE (op) == ASHIFTRT)
701 && SCALAR_INT_MODE_P (mode)
702 && SCALAR_INT_MODE_P (op_mode)
703 && precision >= BITS_PER_WORD
704 && 2 * precision <= op_precision
705 && CONST_INT_P (XEXP (op, 1))
706 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
707 && UINTVAL (XEXP (op, 1)) < op_precision)
709 int byte = subreg_lowpart_offset (mode, op_mode);
710 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
711 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
712 (WORDS_BIG_ENDIAN
713 ? byte - shifted_bytes
714 : byte + shifted_bytes));
717 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
718 and try replacing the TRUNCATE and shift with it. Don't do this
719 if the MEM has a mode-dependent address. */
720 if ((GET_CODE (op) == LSHIFTRT
721 || GET_CODE (op) == ASHIFTRT)
722 && SCALAR_INT_MODE_P (op_mode)
723 && MEM_P (XEXP (op, 0))
724 && CONST_INT_P (XEXP (op, 1))
725 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
726 && INTVAL (XEXP (op, 1)) > 0
727 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
728 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
729 MEM_ADDR_SPACE (XEXP (op, 0)))
730 && ! MEM_VOLATILE_P (XEXP (op, 0))
731 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
732 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
734 int byte = subreg_lowpart_offset (mode, op_mode);
735 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
736 return adjust_address_nv (XEXP (op, 0), mode,
737 (WORDS_BIG_ENDIAN
738 ? byte - shifted_bytes
739 : byte + shifted_bytes));
742 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
743 (OP:SI foo:SI) if OP is NEG or ABS. */
744 if ((GET_CODE (op) == ABS
745 || GET_CODE (op) == NEG)
746 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
747 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
748 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
749 return simplify_gen_unary (GET_CODE (op), mode,
750 XEXP (XEXP (op, 0), 0), mode);
752 /* (truncate:A (subreg:B (truncate:C X) 0)) is
753 (truncate:A X). */
754 if (GET_CODE (op) == SUBREG
755 && SCALAR_INT_MODE_P (mode)
756 && SCALAR_INT_MODE_P (op_mode)
757 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
758 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
759 && subreg_lowpart_p (op))
761 rtx inner = XEXP (SUBREG_REG (op), 0);
762 if (GET_MODE_PRECISION (mode)
763 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
764 return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
765 else
766 /* If subreg above is paradoxical and C is narrower
767 than A, return (subreg:A (truncate:C X) 0). */
768 return simplify_gen_subreg (mode, SUBREG_REG (op),
769 GET_MODE (SUBREG_REG (op)), 0);
772 /* (truncate:A (truncate:B X)) is (truncate:A X). */
773 if (GET_CODE (op) == TRUNCATE)
774 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
775 GET_MODE (XEXP (op, 0)));
777 return NULL_RTX;
780 /* Try to simplify a unary operation CODE whose output mode is to be
781 MODE with input operand OP whose mode was originally OP_MODE.
782 Return zero if no simplification can be made. */
784 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
785 rtx op, enum machine_mode op_mode)
787 rtx trueop, tem;
789 trueop = avoid_constant_pool_reference (op);
791 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
792 if (tem)
793 return tem;
795 return simplify_unary_operation_1 (code, mode, op);
798 /* Perform some simplifications we can do even if the operands
799 aren't constant. */
800 static rtx
801 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
803 enum rtx_code reversed;
804 rtx temp;
806 switch (code)
808 case NOT:
809 /* (not (not X)) == X. */
810 if (GET_CODE (op) == NOT)
811 return XEXP (op, 0);
813 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
814 comparison is all ones. */
815 if (COMPARISON_P (op)
816 && (mode == BImode || STORE_FLAG_VALUE == -1)
817 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
818 return simplify_gen_relational (reversed, mode, VOIDmode,
819 XEXP (op, 0), XEXP (op, 1));
821 /* (not (plus X -1)) can become (neg X). */
822 if (GET_CODE (op) == PLUS
823 && XEXP (op, 1) == constm1_rtx)
824 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
826 /* Similarly, (not (neg X)) is (plus X -1). */
827 if (GET_CODE (op) == NEG)
828 return plus_constant (mode, XEXP (op, 0), -1);
830 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
831 if (GET_CODE (op) == XOR
832 && CONST_INT_P (XEXP (op, 1))
833 && (temp = simplify_unary_operation (NOT, mode,
834 XEXP (op, 1), mode)) != 0)
835 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
837 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
838 if (GET_CODE (op) == PLUS
839 && CONST_INT_P (XEXP (op, 1))
840 && mode_signbit_p (mode, XEXP (op, 1))
841 && (temp = simplify_unary_operation (NOT, mode,
842 XEXP (op, 1), mode)) != 0)
843 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
846 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
847 operands other than 1, but that is not valid. We could do a
848 similar simplification for (not (lshiftrt C X)) where C is
849 just the sign bit, but this doesn't seem common enough to
850 bother with. */
851 if (GET_CODE (op) == ASHIFT
852 && XEXP (op, 0) == const1_rtx)
854 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
855 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
858 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
859 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
860 so we can perform the above simplification. */
862 if (STORE_FLAG_VALUE == -1
863 && GET_CODE (op) == ASHIFTRT
864 && GET_CODE (XEXP (op, 1))
865 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
866 return simplify_gen_relational (GE, mode, VOIDmode,
867 XEXP (op, 0), const0_rtx);
870 if (GET_CODE (op) == SUBREG
871 && subreg_lowpart_p (op)
872 && (GET_MODE_SIZE (GET_MODE (op))
873 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
874 && GET_CODE (SUBREG_REG (op)) == ASHIFT
875 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
877 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
878 rtx x;
880 x = gen_rtx_ROTATE (inner_mode,
881 simplify_gen_unary (NOT, inner_mode, const1_rtx,
882 inner_mode),
883 XEXP (SUBREG_REG (op), 1));
884 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
885 if (temp)
886 return temp;
889 /* Apply De Morgan's laws to reduce number of patterns for machines
890 with negating logical insns (and-not, nand, etc.). If result has
891 only one NOT, put it first, since that is how the patterns are
892 coded. */
894 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
896 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
897 enum machine_mode op_mode;
899 op_mode = GET_MODE (in1);
900 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
902 op_mode = GET_MODE (in2);
903 if (op_mode == VOIDmode)
904 op_mode = mode;
905 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
907 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
909 rtx tem = in2;
910 in2 = in1; in1 = tem;
913 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
914 mode, in1, in2);
916 break;
918 case NEG:
919 /* (neg (neg X)) == X. */
920 if (GET_CODE (op) == NEG)
921 return XEXP (op, 0);
923 /* (neg (plus X 1)) can become (not X). */
924 if (GET_CODE (op) == PLUS
925 && XEXP (op, 1) == const1_rtx)
926 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
928 /* Similarly, (neg (not X)) is (plus X 1). */
929 if (GET_CODE (op) == NOT)
930 return plus_constant (mode, XEXP (op, 0), 1);
932 /* (neg (minus X Y)) can become (minus Y X). This transformation
933 isn't safe for modes with signed zeros, since if X and Y are
934 both +0, (minus Y X) is the same as (minus X Y). If the
935 rounding mode is towards +infinity (or -infinity) then the two
936 expressions will be rounded differently. */
937 if (GET_CODE (op) == MINUS
938 && !HONOR_SIGNED_ZEROS (mode)
939 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
940 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
942 if (GET_CODE (op) == PLUS
943 && !HONOR_SIGNED_ZEROS (mode)
944 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
946 /* (neg (plus A C)) is simplified to (minus -C A). */
947 if (CONST_SCALAR_INT_P (XEXP (op, 1))
948 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
950 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
951 if (temp)
952 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
955 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
956 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
957 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
960 /* (neg (mult A B)) becomes (mult A (neg B)).
961 This works even for floating-point values. */
962 if (GET_CODE (op) == MULT
963 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
965 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
966 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
969 /* NEG commutes with ASHIFT since it is multiplication. Only do
970 this if we can then eliminate the NEG (e.g., if the operand
971 is a constant). */
972 if (GET_CODE (op) == ASHIFT)
974 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
975 if (temp)
976 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
979 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
980 C is equal to the width of MODE minus 1. */
981 if (GET_CODE (op) == ASHIFTRT
982 && CONST_INT_P (XEXP (op, 1))
983 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
984 return simplify_gen_binary (LSHIFTRT, mode,
985 XEXP (op, 0), XEXP (op, 1));
987 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
988 C is equal to the width of MODE minus 1. */
989 if (GET_CODE (op) == LSHIFTRT
990 && CONST_INT_P (XEXP (op, 1))
991 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
992 return simplify_gen_binary (ASHIFTRT, mode,
993 XEXP (op, 0), XEXP (op, 1));
995 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
996 if (GET_CODE (op) == XOR
997 && XEXP (op, 1) == const1_rtx
998 && nonzero_bits (XEXP (op, 0), mode) == 1)
999 return plus_constant (mode, XEXP (op, 0), -1);
1001 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1002 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1003 if (GET_CODE (op) == LT
1004 && XEXP (op, 1) == const0_rtx
1005 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1007 enum machine_mode inner = GET_MODE (XEXP (op, 0));
1008 int isize = GET_MODE_PRECISION (inner);
1009 if (STORE_FLAG_VALUE == 1)
1011 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1012 GEN_INT (isize - 1));
1013 if (mode == inner)
1014 return temp;
1015 if (GET_MODE_PRECISION (mode) > isize)
1016 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1017 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1019 else if (STORE_FLAG_VALUE == -1)
1021 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1022 GEN_INT (isize - 1));
1023 if (mode == inner)
1024 return temp;
1025 if (GET_MODE_PRECISION (mode) > isize)
1026 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1027 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1030 break;
1032 case TRUNCATE:
1033 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1034 with the umulXi3_highpart patterns. */
1035 if (GET_CODE (op) == LSHIFTRT
1036 && GET_CODE (XEXP (op, 0)) == MULT)
1037 break;
1039 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1041 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1043 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1044 if (temp)
1045 return temp;
1047 /* We can't handle truncation to a partial integer mode here
1048 because we don't know the real bitsize of the partial
1049 integer mode. */
1050 break;
1053 if (GET_MODE (op) != VOIDmode)
1055 temp = simplify_truncation (mode, op, GET_MODE (op));
1056 if (temp)
1057 return temp;
1060 /* If we know that the value is already truncated, we can
1061 replace the TRUNCATE with a SUBREG. */
1062 if (GET_MODE_NUNITS (mode) == 1
1063 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1064 || truncated_to_mode (mode, op)))
1066 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1067 if (temp)
1068 return temp;
1071 /* A truncate of a comparison can be replaced with a subreg if
1072 STORE_FLAG_VALUE permits. This is like the previous test,
1073 but it works even if the comparison is done in a mode larger
1074 than HOST_BITS_PER_WIDE_INT. */
1075 if (HWI_COMPUTABLE_MODE_P (mode)
1076 && COMPARISON_P (op)
1077 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1079 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1080 if (temp)
1081 return temp;
1084 /* A truncate of a memory is just loading the low part of the memory
1085 if we are not changing the meaning of the address. */
1086 if (GET_CODE (op) == MEM
1087 && !VECTOR_MODE_P (mode)
1088 && !MEM_VOLATILE_P (op)
1089 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1091 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1092 if (temp)
1093 return temp;
1096 break;
1098 case FLOAT_TRUNCATE:
1099 if (DECIMAL_FLOAT_MODE_P (mode))
1100 break;
1102 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1103 if (GET_CODE (op) == FLOAT_EXTEND
1104 && GET_MODE (XEXP (op, 0)) == mode)
1105 return XEXP (op, 0);
1107 /* (float_truncate:SF (float_truncate:DF foo:XF))
1108 = (float_truncate:SF foo:XF).
1109 This may eliminate double rounding, so it is unsafe.
1111 (float_truncate:SF (float_extend:XF foo:DF))
1112 = (float_truncate:SF foo:DF).
1114 (float_truncate:DF (float_extend:XF foo:SF))
1115 = (float_extend:SF foo:DF). */
1116 if ((GET_CODE (op) == FLOAT_TRUNCATE
1117 && flag_unsafe_math_optimizations)
1118 || GET_CODE (op) == FLOAT_EXTEND)
1119 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1120 0)))
1121 > GET_MODE_SIZE (mode)
1122 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1123 mode,
1124 XEXP (op, 0), mode);
1126 /* (float_truncate (float x)) is (float x) */
1127 if (GET_CODE (op) == FLOAT
1128 && (flag_unsafe_math_optimizations
1129 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
1130 && ((unsigned)significand_size (GET_MODE (op))
1131 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1132 - num_sign_bit_copies (XEXP (op, 0),
1133 GET_MODE (XEXP (op, 0))))))))
1134 return simplify_gen_unary (FLOAT, mode,
1135 XEXP (op, 0),
1136 GET_MODE (XEXP (op, 0)));
1138 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1139 (OP:SF foo:SF) if OP is NEG or ABS. */
1140 if ((GET_CODE (op) == ABS
1141 || GET_CODE (op) == NEG)
1142 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1143 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1144 return simplify_gen_unary (GET_CODE (op), mode,
1145 XEXP (XEXP (op, 0), 0), mode);
1147 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1148 is (float_truncate:SF x). */
1149 if (GET_CODE (op) == SUBREG
1150 && subreg_lowpart_p (op)
1151 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1152 return SUBREG_REG (op);
1153 break;
1155 case FLOAT_EXTEND:
1156 if (DECIMAL_FLOAT_MODE_P (mode))
1157 break;
1159 /* (float_extend (float_extend x)) is (float_extend x)
1161 (float_extend (float x)) is (float x) assuming that double
1162 rounding can't happen.
1164 if (GET_CODE (op) == FLOAT_EXTEND
1165 || (GET_CODE (op) == FLOAT
1166 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1167 && ((unsigned)significand_size (GET_MODE (op))
1168 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1169 - num_sign_bit_copies (XEXP (op, 0),
1170 GET_MODE (XEXP (op, 0)))))))
1171 return simplify_gen_unary (GET_CODE (op), mode,
1172 XEXP (op, 0),
1173 GET_MODE (XEXP (op, 0)));
1175 break;
1177 case ABS:
1178 /* (abs (neg <foo>)) -> (abs <foo>) */
1179 if (GET_CODE (op) == NEG)
1180 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1181 GET_MODE (XEXP (op, 0)));
1183 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1184 do nothing. */
1185 if (GET_MODE (op) == VOIDmode)
1186 break;
1188 /* If operand is something known to be positive, ignore the ABS. */
1189 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1190 || val_signbit_known_clear_p (GET_MODE (op),
1191 nonzero_bits (op, GET_MODE (op))))
1192 return op;
1194 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1195 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1196 return gen_rtx_NEG (mode, op);
1198 break;
1200 case FFS:
1201 /* (ffs (*_extend <X>)) = (ffs <X>) */
1202 if (GET_CODE (op) == SIGN_EXTEND
1203 || GET_CODE (op) == ZERO_EXTEND)
1204 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1205 GET_MODE (XEXP (op, 0)));
1206 break;
1208 case POPCOUNT:
1209 switch (GET_CODE (op))
1211 case BSWAP:
1212 case ZERO_EXTEND:
1213 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1214 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1215 GET_MODE (XEXP (op, 0)));
1217 case ROTATE:
1218 case ROTATERT:
1219 /* Rotations don't affect popcount. */
1220 if (!side_effects_p (XEXP (op, 1)))
1221 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1222 GET_MODE (XEXP (op, 0)));
1223 break;
1225 default:
1226 break;
1228 break;
1230 case PARITY:
1231 switch (GET_CODE (op))
1233 case NOT:
1234 case BSWAP:
1235 case ZERO_EXTEND:
1236 case SIGN_EXTEND:
1237 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1238 GET_MODE (XEXP (op, 0)));
1240 case ROTATE:
1241 case ROTATERT:
1242 /* Rotations don't affect parity. */
1243 if (!side_effects_p (XEXP (op, 1)))
1244 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1245 GET_MODE (XEXP (op, 0)));
1246 break;
1248 default:
1249 break;
1251 break;
1253 case BSWAP:
1254 /* (bswap (bswap x)) -> x. */
1255 if (GET_CODE (op) == BSWAP)
1256 return XEXP (op, 0);
1257 break;
1259 case FLOAT:
1260 /* (float (sign_extend <X>)) = (float <X>). */
1261 if (GET_CODE (op) == SIGN_EXTEND)
1262 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1263 GET_MODE (XEXP (op, 0)));
1264 break;
1266 case SIGN_EXTEND:
1267 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1268 becomes just the MINUS if its mode is MODE. This allows
1269 folding switch statements on machines using casesi (such as
1270 the VAX). */
1271 if (GET_CODE (op) == TRUNCATE
1272 && GET_MODE (XEXP (op, 0)) == mode
1273 && GET_CODE (XEXP (op, 0)) == MINUS
1274 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1275 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1276 return XEXP (op, 0);
1278 /* Extending a widening multiplication should be canonicalized to
1279 a wider widening multiplication. */
1280 if (GET_CODE (op) == MULT)
1282 rtx lhs = XEXP (op, 0);
1283 rtx rhs = XEXP (op, 1);
1284 enum rtx_code lcode = GET_CODE (lhs);
1285 enum rtx_code rcode = GET_CODE (rhs);
1287 /* Widening multiplies usually extend both operands, but sometimes
1288 they use a shift to extract a portion of a register. */
1289 if ((lcode == SIGN_EXTEND
1290 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1291 && (rcode == SIGN_EXTEND
1292 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1294 enum machine_mode lmode = GET_MODE (lhs);
1295 enum machine_mode rmode = GET_MODE (rhs);
1296 int bits;
1298 if (lcode == ASHIFTRT)
1299 /* Number of bits not shifted off the end. */
1300 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1301 else /* lcode == SIGN_EXTEND */
1302 /* Size of inner mode. */
1303 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1305 if (rcode == ASHIFTRT)
1306 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1307 else /* rcode == SIGN_EXTEND */
1308 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1310 /* We can only widen multiplies if the result is mathematiclly
1311 equivalent. I.e. if overflow was impossible. */
1312 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1313 return simplify_gen_binary
1314 (MULT, mode,
1315 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1316 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1320 /* Check for a sign extension of a subreg of a promoted
1321 variable, where the promotion is sign-extended, and the
1322 target mode is the same as the variable's promotion. */
1323 if (GET_CODE (op) == SUBREG
1324 && SUBREG_PROMOTED_VAR_P (op)
1325 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1326 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1328 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1329 if (temp)
1330 return temp;
1333 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1334 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1335 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1337 gcc_assert (GET_MODE_BITSIZE (mode)
1338 > GET_MODE_BITSIZE (GET_MODE (op)));
1339 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1340 GET_MODE (XEXP (op, 0)));
1343 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1344 is (sign_extend:M (subreg:O <X>)) if there is mode with
1345 GET_MODE_BITSIZE (N) - I bits.
1346 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1347 is similarly (zero_extend:M (subreg:O <X>)). */
1348 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1349 && GET_CODE (XEXP (op, 0)) == ASHIFT
1350 && CONST_INT_P (XEXP (op, 1))
1351 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1352 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1354 enum machine_mode tmode
1355 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1356 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1357 gcc_assert (GET_MODE_BITSIZE (mode)
1358 > GET_MODE_BITSIZE (GET_MODE (op)));
1359 if (tmode != BLKmode)
1361 rtx inner =
1362 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1363 if (inner)
1364 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1365 ? SIGN_EXTEND : ZERO_EXTEND,
1366 mode, inner, tmode);
1370 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1371 /* As we do not know which address space the pointer is referring to,
1372 we can do this only if the target does not support different pointer
1373 or address modes depending on the address space. */
1374 if (target_default_pointer_address_modes_p ()
1375 && ! POINTERS_EXTEND_UNSIGNED
1376 && mode == Pmode && GET_MODE (op) == ptr_mode
1377 && (CONSTANT_P (op)
1378 || (GET_CODE (op) == SUBREG
1379 && REG_P (SUBREG_REG (op))
1380 && REG_POINTER (SUBREG_REG (op))
1381 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1382 return convert_memory_address (Pmode, op);
1383 #endif
1384 break;
1386 case ZERO_EXTEND:
1387 /* Check for a zero extension of a subreg of a promoted
1388 variable, where the promotion is zero-extended, and the
1389 target mode is the same as the variable's promotion. */
1390 if (GET_CODE (op) == SUBREG
1391 && SUBREG_PROMOTED_VAR_P (op)
1392 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1393 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1395 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1396 if (temp)
1397 return temp;
1400 /* Extending a widening multiplication should be canonicalized to
1401 a wider widening multiplication. */
1402 if (GET_CODE (op) == MULT)
1404 rtx lhs = XEXP (op, 0);
1405 rtx rhs = XEXP (op, 1);
1406 enum rtx_code lcode = GET_CODE (lhs);
1407 enum rtx_code rcode = GET_CODE (rhs);
1409 /* Widening multiplies usually extend both operands, but sometimes
1410 they use a shift to extract a portion of a register. */
1411 if ((lcode == ZERO_EXTEND
1412 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1413 && (rcode == ZERO_EXTEND
1414 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1416 enum machine_mode lmode = GET_MODE (lhs);
1417 enum machine_mode rmode = GET_MODE (rhs);
1418 int bits;
1420 if (lcode == LSHIFTRT)
1421 /* Number of bits not shifted off the end. */
1422 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1423 else /* lcode == ZERO_EXTEND */
1424 /* Size of inner mode. */
1425 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1427 if (rcode == LSHIFTRT)
1428 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1429 else /* rcode == ZERO_EXTEND */
1430 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1432 /* We can only widen multiplies if the result is mathematiclly
1433 equivalent. I.e. if overflow was impossible. */
1434 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1435 return simplify_gen_binary
1436 (MULT, mode,
1437 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1438 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1442 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1443 if (GET_CODE (op) == ZERO_EXTEND)
1444 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1445 GET_MODE (XEXP (op, 0)));
1447 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1448 is (zero_extend:M (subreg:O <X>)) if there is mode with
1449 GET_MODE_BITSIZE (N) - I bits. */
1450 if (GET_CODE (op) == LSHIFTRT
1451 && GET_CODE (XEXP (op, 0)) == ASHIFT
1452 && CONST_INT_P (XEXP (op, 1))
1453 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1454 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1456 enum machine_mode tmode
1457 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1458 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1459 if (tmode != BLKmode)
1461 rtx inner =
1462 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1463 if (inner)
1464 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1468 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1469 /* As we do not know which address space the pointer is referring to,
1470 we can do this only if the target does not support different pointer
1471 or address modes depending on the address space. */
1472 if (target_default_pointer_address_modes_p ()
1473 && POINTERS_EXTEND_UNSIGNED > 0
1474 && mode == Pmode && GET_MODE (op) == ptr_mode
1475 && (CONSTANT_P (op)
1476 || (GET_CODE (op) == SUBREG
1477 && REG_P (SUBREG_REG (op))
1478 && REG_POINTER (SUBREG_REG (op))
1479 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1480 return convert_memory_address (Pmode, op);
1481 #endif
1482 break;
1484 default:
1485 break;
1488 return 0;
1491 /* Try to compute the value of a unary operation CODE whose output mode is to
1492 be MODE with input operand OP whose mode was originally OP_MODE.
1493 Return zero if the value cannot be computed. */
1495 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1496 rtx op, enum machine_mode op_mode)
1498 unsigned int width = GET_MODE_PRECISION (mode);
1499 unsigned int op_width = GET_MODE_PRECISION (op_mode);
1501 if (code == VEC_DUPLICATE)
1503 gcc_assert (VECTOR_MODE_P (mode));
1504 if (GET_MODE (op) != VOIDmode)
1506 if (!VECTOR_MODE_P (GET_MODE (op)))
1507 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1508 else
1509 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1510 (GET_MODE (op)));
1512 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1513 || GET_CODE (op) == CONST_VECTOR)
1515 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1516 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1517 rtvec v = rtvec_alloc (n_elts);
1518 unsigned int i;
1520 if (GET_CODE (op) != CONST_VECTOR)
1521 for (i = 0; i < n_elts; i++)
1522 RTVEC_ELT (v, i) = op;
1523 else
1525 enum machine_mode inmode = GET_MODE (op);
1526 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1527 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1529 gcc_assert (in_n_elts < n_elts);
1530 gcc_assert ((n_elts % in_n_elts) == 0);
1531 for (i = 0; i < n_elts; i++)
1532 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1534 return gen_rtx_CONST_VECTOR (mode, v);
1538 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1540 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1541 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1542 enum machine_mode opmode = GET_MODE (op);
1543 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1544 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1545 rtvec v = rtvec_alloc (n_elts);
1546 unsigned int i;
1548 gcc_assert (op_n_elts == n_elts);
1549 for (i = 0; i < n_elts; i++)
1551 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1552 CONST_VECTOR_ELT (op, i),
1553 GET_MODE_INNER (opmode));
1554 if (!x)
1555 return 0;
1556 RTVEC_ELT (v, i) = x;
1558 return gen_rtx_CONST_VECTOR (mode, v);
1561 /* The order of these tests is critical so that, for example, we don't
1562 check the wrong mode (input vs. output) for a conversion operation,
1563 such as FIX. At some point, this should be simplified. */
1565 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1567 HOST_WIDE_INT hv, lv;
1568 REAL_VALUE_TYPE d;
1570 if (CONST_INT_P (op))
1571 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1572 else
1573 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1575 REAL_VALUE_FROM_INT (d, lv, hv, mode);
1576 d = real_value_truncate (mode, d);
1577 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1579 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1581 HOST_WIDE_INT hv, lv;
1582 REAL_VALUE_TYPE d;
1584 if (CONST_INT_P (op))
1585 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1586 else
1587 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1589 if (op_mode == VOIDmode
1590 || GET_MODE_PRECISION (op_mode) > HOST_BITS_PER_DOUBLE_INT)
1591 /* We should never get a negative number. */
1592 gcc_assert (hv >= 0);
1593 else if (GET_MODE_PRECISION (op_mode) <= HOST_BITS_PER_WIDE_INT)
1594 hv = 0, lv &= GET_MODE_MASK (op_mode);
1596 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1597 d = real_value_truncate (mode, d);
1598 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1601 if (CONST_INT_P (op)
1602 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1604 HOST_WIDE_INT arg0 = INTVAL (op);
1605 HOST_WIDE_INT val;
1607 switch (code)
1609 case NOT:
1610 val = ~ arg0;
1611 break;
1613 case NEG:
1614 val = - arg0;
1615 break;
1617 case ABS:
1618 val = (arg0 >= 0 ? arg0 : - arg0);
1619 break;
1621 case FFS:
1622 arg0 &= GET_MODE_MASK (mode);
1623 val = ffs_hwi (arg0);
1624 break;
1626 case CLZ:
1627 arg0 &= GET_MODE_MASK (mode);
1628 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1630 else
1631 val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 1;
1632 break;
1634 case CLRSB:
1635 arg0 &= GET_MODE_MASK (mode);
1636 if (arg0 == 0)
1637 val = GET_MODE_PRECISION (mode) - 1;
1638 else if (arg0 >= 0)
1639 val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 2;
1640 else if (arg0 < 0)
1641 val = GET_MODE_PRECISION (mode) - floor_log2 (~arg0) - 2;
1642 break;
1644 case CTZ:
1645 arg0 &= GET_MODE_MASK (mode);
1646 if (arg0 == 0)
1648 /* Even if the value at zero is undefined, we have to come
1649 up with some replacement. Seems good enough. */
1650 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1651 val = GET_MODE_PRECISION (mode);
1653 else
1654 val = ctz_hwi (arg0);
1655 break;
1657 case POPCOUNT:
1658 arg0 &= GET_MODE_MASK (mode);
1659 val = 0;
1660 while (arg0)
1661 val++, arg0 &= arg0 - 1;
1662 break;
1664 case PARITY:
1665 arg0 &= GET_MODE_MASK (mode);
1666 val = 0;
1667 while (arg0)
1668 val++, arg0 &= arg0 - 1;
1669 val &= 1;
1670 break;
1672 case BSWAP:
1674 unsigned int s;
1676 val = 0;
1677 for (s = 0; s < width; s += 8)
1679 unsigned int d = width - s - 8;
1680 unsigned HOST_WIDE_INT byte;
1681 byte = (arg0 >> s) & 0xff;
1682 val |= byte << d;
1685 break;
1687 case TRUNCATE:
1688 val = arg0;
1689 break;
1691 case ZERO_EXTEND:
1692 /* When zero-extending a CONST_INT, we need to know its
1693 original mode. */
1694 gcc_assert (op_mode != VOIDmode);
1695 if (op_width == HOST_BITS_PER_WIDE_INT)
1697 /* If we were really extending the mode,
1698 we would have to distinguish between zero-extension
1699 and sign-extension. */
1700 gcc_assert (width == op_width);
1701 val = arg0;
1703 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1704 val = arg0 & GET_MODE_MASK (op_mode);
1705 else
1706 return 0;
1707 break;
1709 case SIGN_EXTEND:
1710 if (op_mode == VOIDmode)
1711 op_mode = mode;
1712 op_width = GET_MODE_PRECISION (op_mode);
1713 if (op_width == HOST_BITS_PER_WIDE_INT)
1715 /* If we were really extending the mode,
1716 we would have to distinguish between zero-extension
1717 and sign-extension. */
1718 gcc_assert (width == op_width);
1719 val = arg0;
1721 else if (op_width < HOST_BITS_PER_WIDE_INT)
1723 val = arg0 & GET_MODE_MASK (op_mode);
1724 if (val_signbit_known_set_p (op_mode, val))
1725 val |= ~GET_MODE_MASK (op_mode);
1727 else
1728 return 0;
1729 break;
1731 case SQRT:
1732 case FLOAT_EXTEND:
1733 case FLOAT_TRUNCATE:
1734 case SS_TRUNCATE:
1735 case US_TRUNCATE:
1736 case SS_NEG:
1737 case US_NEG:
1738 case SS_ABS:
1739 return 0;
1741 default:
1742 gcc_unreachable ();
1745 return gen_int_mode (val, mode);
1748 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1749 for a DImode operation on a CONST_INT. */
1750 else if (width <= HOST_BITS_PER_DOUBLE_INT
1751 && (CONST_DOUBLE_AS_INT_P (op) || CONST_INT_P (op)))
1753 double_int first, value;
1755 if (CONST_DOUBLE_AS_INT_P (op))
1756 first = double_int::from_pair (CONST_DOUBLE_HIGH (op),
1757 CONST_DOUBLE_LOW (op));
1758 else
1759 first = double_int::from_shwi (INTVAL (op));
1761 switch (code)
1763 case NOT:
1764 value = ~first;
1765 break;
1767 case NEG:
1768 value = -first;
1769 break;
1771 case ABS:
1772 if (first.is_negative ())
1773 value = -first;
1774 else
1775 value = first;
1776 break;
1778 case FFS:
1779 value.high = 0;
1780 if (first.low != 0)
1781 value.low = ffs_hwi (first.low);
1782 else if (first.high != 0)
1783 value.low = HOST_BITS_PER_WIDE_INT + ffs_hwi (first.high);
1784 else
1785 value.low = 0;
1786 break;
1788 case CLZ:
1789 value.high = 0;
1790 if (first.high != 0)
1791 value.low = GET_MODE_PRECISION (mode) - floor_log2 (first.high) - 1
1792 - HOST_BITS_PER_WIDE_INT;
1793 else if (first.low != 0)
1794 value.low = GET_MODE_PRECISION (mode) - floor_log2 (first.low) - 1;
1795 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, value.low))
1796 value.low = GET_MODE_PRECISION (mode);
1797 break;
1799 case CTZ:
1800 value.high = 0;
1801 if (first.low != 0)
1802 value.low = ctz_hwi (first.low);
1803 else if (first.high != 0)
1804 value.low = HOST_BITS_PER_WIDE_INT + ctz_hwi (first.high);
1805 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, value.low))
1806 value.low = GET_MODE_PRECISION (mode);
1807 break;
1809 case POPCOUNT:
1810 value = double_int_zero;
1811 while (first.low)
1813 value.low++;
1814 first.low &= first.low - 1;
1816 while (first.high)
1818 value.low++;
1819 first.high &= first.high - 1;
1821 break;
1823 case PARITY:
1824 value = double_int_zero;
1825 while (first.low)
1827 value.low++;
1828 first.low &= first.low - 1;
1830 while (first.high)
1832 value.low++;
1833 first.high &= first.high - 1;
1835 value.low &= 1;
1836 break;
1838 case BSWAP:
1840 unsigned int s;
1842 value = double_int_zero;
1843 for (s = 0; s < width; s += 8)
1845 unsigned int d = width - s - 8;
1846 unsigned HOST_WIDE_INT byte;
1848 if (s < HOST_BITS_PER_WIDE_INT)
1849 byte = (first.low >> s) & 0xff;
1850 else
1851 byte = (first.high >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1853 if (d < HOST_BITS_PER_WIDE_INT)
1854 value.low |= byte << d;
1855 else
1856 value.high |= byte << (d - HOST_BITS_PER_WIDE_INT);
1859 break;
1861 case TRUNCATE:
1862 /* This is just a change-of-mode, so do nothing. */
1863 value = first;
1864 break;
1866 case ZERO_EXTEND:
1867 gcc_assert (op_mode != VOIDmode);
1869 if (op_width > HOST_BITS_PER_WIDE_INT)
1870 return 0;
1872 value = double_int::from_uhwi (first.low & GET_MODE_MASK (op_mode));
1873 break;
1875 case SIGN_EXTEND:
1876 if (op_mode == VOIDmode
1877 || op_width > HOST_BITS_PER_WIDE_INT)
1878 return 0;
1879 else
1881 value.low = first.low & GET_MODE_MASK (op_mode);
1882 if (val_signbit_known_set_p (op_mode, value.low))
1883 value.low |= ~GET_MODE_MASK (op_mode);
1885 value.high = HWI_SIGN_EXTEND (value.low);
1887 break;
1889 case SQRT:
1890 return 0;
1892 default:
1893 return 0;
1896 return immed_double_int_const (value, mode);
1899 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1900 && SCALAR_FLOAT_MODE_P (mode)
1901 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1903 REAL_VALUE_TYPE d, t;
1904 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1906 switch (code)
1908 case SQRT:
1909 if (HONOR_SNANS (mode) && real_isnan (&d))
1910 return 0;
1911 real_sqrt (&t, mode, &d);
1912 d = t;
1913 break;
1914 case ABS:
1915 d = real_value_abs (&d);
1916 break;
1917 case NEG:
1918 d = real_value_negate (&d);
1919 break;
1920 case FLOAT_TRUNCATE:
1921 d = real_value_truncate (mode, d);
1922 break;
1923 case FLOAT_EXTEND:
1924 /* All this does is change the mode, unless changing
1925 mode class. */
1926 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1927 real_convert (&d, mode, &d);
1928 break;
1929 case FIX:
1930 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1931 break;
1932 case NOT:
1934 long tmp[4];
1935 int i;
1937 real_to_target (tmp, &d, GET_MODE (op));
1938 for (i = 0; i < 4; i++)
1939 tmp[i] = ~tmp[i];
1940 real_from_target (&d, tmp, mode);
1941 break;
1943 default:
1944 gcc_unreachable ();
1946 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1949 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1950 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1951 && GET_MODE_CLASS (mode) == MODE_INT
1952 && width <= HOST_BITS_PER_DOUBLE_INT && width > 0)
1954 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1955 operators are intentionally left unspecified (to ease implementation
1956 by target backends), for consistency, this routine implements the
1957 same semantics for constant folding as used by the middle-end. */
1959 /* This was formerly used only for non-IEEE float.
1960 eggert@twinsun.com says it is safe for IEEE also. */
1961 HOST_WIDE_INT xh, xl, th, tl;
1962 REAL_VALUE_TYPE x, t;
1963 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1964 switch (code)
1966 case FIX:
1967 if (REAL_VALUE_ISNAN (x))
1968 return const0_rtx;
1970 /* Test against the signed upper bound. */
1971 if (width > HOST_BITS_PER_WIDE_INT)
1973 th = ((unsigned HOST_WIDE_INT) 1
1974 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1975 tl = -1;
1977 else
1979 th = 0;
1980 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1982 real_from_integer (&t, VOIDmode, tl, th, 0);
1983 if (REAL_VALUES_LESS (t, x))
1985 xh = th;
1986 xl = tl;
1987 break;
1990 /* Test against the signed lower bound. */
1991 if (width > HOST_BITS_PER_WIDE_INT)
1993 th = (unsigned HOST_WIDE_INT) (-1)
1994 << (width - HOST_BITS_PER_WIDE_INT - 1);
1995 tl = 0;
1997 else
1999 th = -1;
2000 tl = (unsigned HOST_WIDE_INT) (-1) << (width - 1);
2002 real_from_integer (&t, VOIDmode, tl, th, 0);
2003 if (REAL_VALUES_LESS (x, t))
2005 xh = th;
2006 xl = tl;
2007 break;
2009 REAL_VALUE_TO_INT (&xl, &xh, x);
2010 break;
2012 case UNSIGNED_FIX:
2013 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
2014 return const0_rtx;
2016 /* Test against the unsigned upper bound. */
2017 if (width == HOST_BITS_PER_DOUBLE_INT)
2019 th = -1;
2020 tl = -1;
2022 else if (width >= HOST_BITS_PER_WIDE_INT)
2024 th = ((unsigned HOST_WIDE_INT) 1
2025 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
2026 tl = -1;
2028 else
2030 th = 0;
2031 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
2033 real_from_integer (&t, VOIDmode, tl, th, 1);
2034 if (REAL_VALUES_LESS (t, x))
2036 xh = th;
2037 xl = tl;
2038 break;
2041 REAL_VALUE_TO_INT (&xl, &xh, x);
2042 break;
2044 default:
2045 gcc_unreachable ();
2047 return immed_double_const (xl, xh, mode);
2050 return NULL_RTX;
2053 /* Subroutine of simplify_binary_operation to simplify a commutative,
2054 associative binary operation CODE with result mode MODE, operating
2055 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2056 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2057 canonicalization is possible. */
2059 static rtx
2060 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
2061 rtx op0, rtx op1)
2063 rtx tem;
2065 /* Linearize the operator to the left. */
2066 if (GET_CODE (op1) == code)
2068 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2069 if (GET_CODE (op0) == code)
2071 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2072 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2075 /* "a op (b op c)" becomes "(b op c) op a". */
2076 if (! swap_commutative_operands_p (op1, op0))
2077 return simplify_gen_binary (code, mode, op1, op0);
2079 tem = op0;
2080 op0 = op1;
2081 op1 = tem;
2084 if (GET_CODE (op0) == code)
2086 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2087 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2089 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2090 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2093 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2094 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2095 if (tem != 0)
2096 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2098 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2099 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2100 if (tem != 0)
2101 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2104 return 0;
2108 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2109 and OP1. Return 0 if no simplification is possible.
2111 Don't use this for relational operations such as EQ or LT.
2112 Use simplify_relational_operation instead. */
2114 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
2115 rtx op0, rtx op1)
2117 rtx trueop0, trueop1;
2118 rtx tem;
2120 /* Relational operations don't work here. We must know the mode
2121 of the operands in order to do the comparison correctly.
2122 Assuming a full word can give incorrect results.
2123 Consider comparing 128 with -128 in QImode. */
2124 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2125 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2127 /* Make sure the constant is second. */
2128 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2129 && swap_commutative_operands_p (op0, op1))
2131 tem = op0, op0 = op1, op1 = tem;
2134 trueop0 = avoid_constant_pool_reference (op0);
2135 trueop1 = avoid_constant_pool_reference (op1);
2137 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2138 if (tem)
2139 return tem;
2140 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2143 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2144 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2145 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2146 actual constants. */
2148 static rtx
2149 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
2150 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2152 rtx tem, reversed, opleft, opright;
2153 HOST_WIDE_INT val;
2154 unsigned int width = GET_MODE_PRECISION (mode);
2156 /* Even if we can't compute a constant result,
2157 there are some cases worth simplifying. */
2159 switch (code)
2161 case PLUS:
2162 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2163 when x is NaN, infinite, or finite and nonzero. They aren't
2164 when x is -0 and the rounding mode is not towards -infinity,
2165 since (-0) + 0 is then 0. */
2166 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2167 return op0;
2169 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2170 transformations are safe even for IEEE. */
2171 if (GET_CODE (op0) == NEG)
2172 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2173 else if (GET_CODE (op1) == NEG)
2174 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2176 /* (~a) + 1 -> -a */
2177 if (INTEGRAL_MODE_P (mode)
2178 && GET_CODE (op0) == NOT
2179 && trueop1 == const1_rtx)
2180 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2182 /* Handle both-operands-constant cases. We can only add
2183 CONST_INTs to constants since the sum of relocatable symbols
2184 can't be handled by most assemblers. Don't add CONST_INT
2185 to CONST_INT since overflow won't be computed properly if wider
2186 than HOST_BITS_PER_WIDE_INT. */
2188 if ((GET_CODE (op0) == CONST
2189 || GET_CODE (op0) == SYMBOL_REF
2190 || GET_CODE (op0) == LABEL_REF)
2191 && CONST_INT_P (op1))
2192 return plus_constant (mode, op0, INTVAL (op1));
2193 else if ((GET_CODE (op1) == CONST
2194 || GET_CODE (op1) == SYMBOL_REF
2195 || GET_CODE (op1) == LABEL_REF)
2196 && CONST_INT_P (op0))
2197 return plus_constant (mode, op1, INTVAL (op0));
2199 /* See if this is something like X * C - X or vice versa or
2200 if the multiplication is written as a shift. If so, we can
2201 distribute and make a new multiply, shift, or maybe just
2202 have X (if C is 2 in the example above). But don't make
2203 something more expensive than we had before. */
2205 if (SCALAR_INT_MODE_P (mode))
2207 double_int coeff0, coeff1;
2208 rtx lhs = op0, rhs = op1;
2210 coeff0 = double_int_one;
2211 coeff1 = double_int_one;
2213 if (GET_CODE (lhs) == NEG)
2215 coeff0 = double_int_minus_one;
2216 lhs = XEXP (lhs, 0);
2218 else if (GET_CODE (lhs) == MULT
2219 && CONST_INT_P (XEXP (lhs, 1)))
2221 coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1)));
2222 lhs = XEXP (lhs, 0);
2224 else if (GET_CODE (lhs) == ASHIFT
2225 && CONST_INT_P (XEXP (lhs, 1))
2226 && INTVAL (XEXP (lhs, 1)) >= 0
2227 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2229 coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1)));
2230 lhs = XEXP (lhs, 0);
2233 if (GET_CODE (rhs) == NEG)
2235 coeff1 = double_int_minus_one;
2236 rhs = XEXP (rhs, 0);
2238 else if (GET_CODE (rhs) == MULT
2239 && CONST_INT_P (XEXP (rhs, 1)))
2241 coeff1 = double_int::from_shwi (INTVAL (XEXP (rhs, 1)));
2242 rhs = XEXP (rhs, 0);
2244 else if (GET_CODE (rhs) == ASHIFT
2245 && CONST_INT_P (XEXP (rhs, 1))
2246 && INTVAL (XEXP (rhs, 1)) >= 0
2247 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2249 coeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1)));
2250 rhs = XEXP (rhs, 0);
2253 if (rtx_equal_p (lhs, rhs))
2255 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2256 rtx coeff;
2257 double_int val;
2258 bool speed = optimize_function_for_speed_p (cfun);
2260 val = coeff0 + coeff1;
2261 coeff = immed_double_int_const (val, mode);
2263 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2264 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2265 ? tem : 0;
2269 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2270 if (CONST_SCALAR_INT_P (op1)
2271 && GET_CODE (op0) == XOR
2272 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2273 && mode_signbit_p (mode, op1))
2274 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2275 simplify_gen_binary (XOR, mode, op1,
2276 XEXP (op0, 1)));
2278 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2279 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2280 && GET_CODE (op0) == MULT
2281 && GET_CODE (XEXP (op0, 0)) == NEG)
2283 rtx in1, in2;
2285 in1 = XEXP (XEXP (op0, 0), 0);
2286 in2 = XEXP (op0, 1);
2287 return simplify_gen_binary (MINUS, mode, op1,
2288 simplify_gen_binary (MULT, mode,
2289 in1, in2));
2292 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2293 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2294 is 1. */
2295 if (COMPARISON_P (op0)
2296 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2297 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2298 && (reversed = reversed_comparison (op0, mode)))
2299 return
2300 simplify_gen_unary (NEG, mode, reversed, mode);
2302 /* If one of the operands is a PLUS or a MINUS, see if we can
2303 simplify this by the associative law.
2304 Don't use the associative law for floating point.
2305 The inaccuracy makes it nonassociative,
2306 and subtle programs can break if operations are associated. */
2308 if (INTEGRAL_MODE_P (mode)
2309 && (plus_minus_operand_p (op0)
2310 || plus_minus_operand_p (op1))
2311 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2312 return tem;
2314 /* Reassociate floating point addition only when the user
2315 specifies associative math operations. */
2316 if (FLOAT_MODE_P (mode)
2317 && flag_associative_math)
2319 tem = simplify_associative_operation (code, mode, op0, op1);
2320 if (tem)
2321 return tem;
2323 break;
2325 case COMPARE:
2326 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2327 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2328 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2329 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2331 rtx xop00 = XEXP (op0, 0);
2332 rtx xop10 = XEXP (op1, 0);
2334 #ifdef HAVE_cc0
2335 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2336 #else
2337 if (REG_P (xop00) && REG_P (xop10)
2338 && GET_MODE (xop00) == GET_MODE (xop10)
2339 && REGNO (xop00) == REGNO (xop10)
2340 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2341 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2342 #endif
2343 return xop00;
2345 break;
2347 case MINUS:
2348 /* We can't assume x-x is 0 even with non-IEEE floating point,
2349 but since it is zero except in very strange circumstances, we
2350 will treat it as zero with -ffinite-math-only. */
2351 if (rtx_equal_p (trueop0, trueop1)
2352 && ! side_effects_p (op0)
2353 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2354 return CONST0_RTX (mode);
2356 /* Change subtraction from zero into negation. (0 - x) is the
2357 same as -x when x is NaN, infinite, or finite and nonzero.
2358 But if the mode has signed zeros, and does not round towards
2359 -infinity, then 0 - 0 is 0, not -0. */
2360 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2361 return simplify_gen_unary (NEG, mode, op1, mode);
2363 /* (-1 - a) is ~a. */
2364 if (trueop0 == constm1_rtx)
2365 return simplify_gen_unary (NOT, mode, op1, mode);
2367 /* Subtracting 0 has no effect unless the mode has signed zeros
2368 and supports rounding towards -infinity. In such a case,
2369 0 - 0 is -0. */
2370 if (!(HONOR_SIGNED_ZEROS (mode)
2371 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2372 && trueop1 == CONST0_RTX (mode))
2373 return op0;
2375 /* See if this is something like X * C - X or vice versa or
2376 if the multiplication is written as a shift. If so, we can
2377 distribute and make a new multiply, shift, or maybe just
2378 have X (if C is 2 in the example above). But don't make
2379 something more expensive than we had before. */
2381 if (SCALAR_INT_MODE_P (mode))
2383 double_int coeff0, negcoeff1;
2384 rtx lhs = op0, rhs = op1;
2386 coeff0 = double_int_one;
2387 negcoeff1 = double_int_minus_one;
2389 if (GET_CODE (lhs) == NEG)
2391 coeff0 = double_int_minus_one;
2392 lhs = XEXP (lhs, 0);
2394 else if (GET_CODE (lhs) == MULT
2395 && CONST_INT_P (XEXP (lhs, 1)))
2397 coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1)));
2398 lhs = XEXP (lhs, 0);
2400 else if (GET_CODE (lhs) == ASHIFT
2401 && CONST_INT_P (XEXP (lhs, 1))
2402 && INTVAL (XEXP (lhs, 1)) >= 0
2403 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2405 coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1)));
2406 lhs = XEXP (lhs, 0);
2409 if (GET_CODE (rhs) == NEG)
2411 negcoeff1 = double_int_one;
2412 rhs = XEXP (rhs, 0);
2414 else if (GET_CODE (rhs) == MULT
2415 && CONST_INT_P (XEXP (rhs, 1)))
2417 negcoeff1 = double_int::from_shwi (-INTVAL (XEXP (rhs, 1)));
2418 rhs = XEXP (rhs, 0);
2420 else if (GET_CODE (rhs) == ASHIFT
2421 && CONST_INT_P (XEXP (rhs, 1))
2422 && INTVAL (XEXP (rhs, 1)) >= 0
2423 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2425 negcoeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1)));
2426 negcoeff1 = -negcoeff1;
2427 rhs = XEXP (rhs, 0);
2430 if (rtx_equal_p (lhs, rhs))
2432 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2433 rtx coeff;
2434 double_int val;
2435 bool speed = optimize_function_for_speed_p (cfun);
2437 val = coeff0 + negcoeff1;
2438 coeff = immed_double_int_const (val, mode);
2440 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2441 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2442 ? tem : 0;
2446 /* (a - (-b)) -> (a + b). True even for IEEE. */
2447 if (GET_CODE (op1) == NEG)
2448 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2450 /* (-x - c) may be simplified as (-c - x). */
2451 if (GET_CODE (op0) == NEG
2452 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2454 tem = simplify_unary_operation (NEG, mode, op1, mode);
2455 if (tem)
2456 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2459 /* Don't let a relocatable value get a negative coeff. */
2460 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2461 return simplify_gen_binary (PLUS, mode,
2462 op0,
2463 neg_const_int (mode, op1));
2465 /* (x - (x & y)) -> (x & ~y) */
2466 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2468 if (rtx_equal_p (op0, XEXP (op1, 0)))
2470 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2471 GET_MODE (XEXP (op1, 1)));
2472 return simplify_gen_binary (AND, mode, op0, tem);
2474 if (rtx_equal_p (op0, XEXP (op1, 1)))
2476 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2477 GET_MODE (XEXP (op1, 0)));
2478 return simplify_gen_binary (AND, mode, op0, tem);
2482 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2483 by reversing the comparison code if valid. */
2484 if (STORE_FLAG_VALUE == 1
2485 && trueop0 == const1_rtx
2486 && COMPARISON_P (op1)
2487 && (reversed = reversed_comparison (op1, mode)))
2488 return reversed;
2490 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2491 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2492 && GET_CODE (op1) == MULT
2493 && GET_CODE (XEXP (op1, 0)) == NEG)
2495 rtx in1, in2;
2497 in1 = XEXP (XEXP (op1, 0), 0);
2498 in2 = XEXP (op1, 1);
2499 return simplify_gen_binary (PLUS, mode,
2500 simplify_gen_binary (MULT, mode,
2501 in1, in2),
2502 op0);
2505 /* Canonicalize (minus (neg A) (mult B C)) to
2506 (minus (mult (neg B) C) A). */
2507 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2508 && GET_CODE (op1) == MULT
2509 && GET_CODE (op0) == NEG)
2511 rtx in1, in2;
2513 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2514 in2 = XEXP (op1, 1);
2515 return simplify_gen_binary (MINUS, mode,
2516 simplify_gen_binary (MULT, mode,
2517 in1, in2),
2518 XEXP (op0, 0));
2521 /* If one of the operands is a PLUS or a MINUS, see if we can
2522 simplify this by the associative law. This will, for example,
2523 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2524 Don't use the associative law for floating point.
2525 The inaccuracy makes it nonassociative,
2526 and subtle programs can break if operations are associated. */
2528 if (INTEGRAL_MODE_P (mode)
2529 && (plus_minus_operand_p (op0)
2530 || plus_minus_operand_p (op1))
2531 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2532 return tem;
2533 break;
2535 case MULT:
2536 if (trueop1 == constm1_rtx)
2537 return simplify_gen_unary (NEG, mode, op0, mode);
2539 if (GET_CODE (op0) == NEG)
2541 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2542 /* If op1 is a MULT as well and simplify_unary_operation
2543 just moved the NEG to the second operand, simplify_gen_binary
2544 below could through simplify_associative_operation move
2545 the NEG around again and recurse endlessly. */
2546 if (temp
2547 && GET_CODE (op1) == MULT
2548 && GET_CODE (temp) == MULT
2549 && XEXP (op1, 0) == XEXP (temp, 0)
2550 && GET_CODE (XEXP (temp, 1)) == NEG
2551 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2552 temp = NULL_RTX;
2553 if (temp)
2554 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2556 if (GET_CODE (op1) == NEG)
2558 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2559 /* If op0 is a MULT as well and simplify_unary_operation
2560 just moved the NEG to the second operand, simplify_gen_binary
2561 below could through simplify_associative_operation move
2562 the NEG around again and recurse endlessly. */
2563 if (temp
2564 && GET_CODE (op0) == MULT
2565 && GET_CODE (temp) == MULT
2566 && XEXP (op0, 0) == XEXP (temp, 0)
2567 && GET_CODE (XEXP (temp, 1)) == NEG
2568 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2569 temp = NULL_RTX;
2570 if (temp)
2571 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2574 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2575 x is NaN, since x * 0 is then also NaN. Nor is it valid
2576 when the mode has signed zeros, since multiplying a negative
2577 number by 0 will give -0, not 0. */
2578 if (!HONOR_NANS (mode)
2579 && !HONOR_SIGNED_ZEROS (mode)
2580 && trueop1 == CONST0_RTX (mode)
2581 && ! side_effects_p (op0))
2582 return op1;
2584 /* In IEEE floating point, x*1 is not equivalent to x for
2585 signalling NaNs. */
2586 if (!HONOR_SNANS (mode)
2587 && trueop1 == CONST1_RTX (mode))
2588 return op0;
2590 /* Convert multiply by constant power of two into shift unless
2591 we are still generating RTL. This test is a kludge. */
2592 if (CONST_INT_P (trueop1)
2593 && (val = exact_log2 (UINTVAL (trueop1))) >= 0
2594 /* If the mode is larger than the host word size, and the
2595 uppermost bit is set, then this isn't a power of two due
2596 to implicit sign extension. */
2597 && (width <= HOST_BITS_PER_WIDE_INT
2598 || val != HOST_BITS_PER_WIDE_INT - 1))
2599 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2601 /* Likewise for multipliers wider than a word. */
2602 if (CONST_DOUBLE_AS_INT_P (trueop1)
2603 && GET_MODE (op0) == mode
2604 && CONST_DOUBLE_LOW (trueop1) == 0
2605 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0
2606 && (val < HOST_BITS_PER_DOUBLE_INT - 1
2607 || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_DOUBLE_INT))
2608 return simplify_gen_binary (ASHIFT, mode, op0,
2609 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2611 /* x*2 is x+x and x*(-1) is -x */
2612 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2613 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2614 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2615 && GET_MODE (op0) == mode)
2617 REAL_VALUE_TYPE d;
2618 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2620 if (REAL_VALUES_EQUAL (d, dconst2))
2621 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2623 if (!HONOR_SNANS (mode)
2624 && REAL_VALUES_EQUAL (d, dconstm1))
2625 return simplify_gen_unary (NEG, mode, op0, mode);
2628 /* Optimize -x * -x as x * x. */
2629 if (FLOAT_MODE_P (mode)
2630 && GET_CODE (op0) == NEG
2631 && GET_CODE (op1) == NEG
2632 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2633 && !side_effects_p (XEXP (op0, 0)))
2634 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2636 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2637 if (SCALAR_FLOAT_MODE_P (mode)
2638 && GET_CODE (op0) == ABS
2639 && GET_CODE (op1) == ABS
2640 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2641 && !side_effects_p (XEXP (op0, 0)))
2642 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2644 /* Reassociate multiplication, but for floating point MULTs
2645 only when the user specifies unsafe math optimizations. */
2646 if (! FLOAT_MODE_P (mode)
2647 || flag_unsafe_math_optimizations)
2649 tem = simplify_associative_operation (code, mode, op0, op1);
2650 if (tem)
2651 return tem;
2653 break;
2655 case IOR:
2656 if (trueop1 == CONST0_RTX (mode))
2657 return op0;
2658 if (INTEGRAL_MODE_P (mode)
2659 && trueop1 == CONSTM1_RTX (mode)
2660 && !side_effects_p (op0))
2661 return op1;
2662 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2663 return op0;
2664 /* A | (~A) -> -1 */
2665 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2666 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2667 && ! side_effects_p (op0)
2668 && SCALAR_INT_MODE_P (mode))
2669 return constm1_rtx;
2671 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2672 if (CONST_INT_P (op1)
2673 && HWI_COMPUTABLE_MODE_P (mode)
2674 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2675 && !side_effects_p (op0))
2676 return op1;
2678 /* Canonicalize (X & C1) | C2. */
2679 if (GET_CODE (op0) == AND
2680 && CONST_INT_P (trueop1)
2681 && CONST_INT_P (XEXP (op0, 1)))
2683 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2684 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2685 HOST_WIDE_INT c2 = INTVAL (trueop1);
2687 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2688 if ((c1 & c2) == c1
2689 && !side_effects_p (XEXP (op0, 0)))
2690 return trueop1;
2692 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2693 if (((c1|c2) & mask) == mask)
2694 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2696 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2697 if (((c1 & ~c2) & mask) != (c1 & mask))
2699 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2700 gen_int_mode (c1 & ~c2, mode));
2701 return simplify_gen_binary (IOR, mode, tem, op1);
2705 /* Convert (A & B) | A to A. */
2706 if (GET_CODE (op0) == AND
2707 && (rtx_equal_p (XEXP (op0, 0), op1)
2708 || rtx_equal_p (XEXP (op0, 1), op1))
2709 && ! side_effects_p (XEXP (op0, 0))
2710 && ! side_effects_p (XEXP (op0, 1)))
2711 return op1;
2713 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2714 mode size to (rotate A CX). */
2716 if (GET_CODE (op1) == ASHIFT
2717 || GET_CODE (op1) == SUBREG)
2719 opleft = op1;
2720 opright = op0;
2722 else
2724 opright = op1;
2725 opleft = op0;
2728 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2729 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2730 && CONST_INT_P (XEXP (opleft, 1))
2731 && CONST_INT_P (XEXP (opright, 1))
2732 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2733 == GET_MODE_PRECISION (mode)))
2734 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2736 /* Same, but for ashift that has been "simplified" to a wider mode
2737 by simplify_shift_const. */
2739 if (GET_CODE (opleft) == SUBREG
2740 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2741 && GET_CODE (opright) == LSHIFTRT
2742 && GET_CODE (XEXP (opright, 0)) == SUBREG
2743 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2744 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2745 && (GET_MODE_SIZE (GET_MODE (opleft))
2746 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2747 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2748 SUBREG_REG (XEXP (opright, 0)))
2749 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2750 && CONST_INT_P (XEXP (opright, 1))
2751 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2752 == GET_MODE_PRECISION (mode)))
2753 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2754 XEXP (SUBREG_REG (opleft), 1));
2756 /* If we have (ior (and (X C1) C2)), simplify this by making
2757 C1 as small as possible if C1 actually changes. */
2758 if (CONST_INT_P (op1)
2759 && (HWI_COMPUTABLE_MODE_P (mode)
2760 || INTVAL (op1) > 0)
2761 && GET_CODE (op0) == AND
2762 && CONST_INT_P (XEXP (op0, 1))
2763 && CONST_INT_P (op1)
2764 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2765 return simplify_gen_binary (IOR, mode,
2766 simplify_gen_binary
2767 (AND, mode, XEXP (op0, 0),
2768 GEN_INT (UINTVAL (XEXP (op0, 1))
2769 & ~UINTVAL (op1))),
2770 op1);
2772 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2773 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2774 the PLUS does not affect any of the bits in OP1: then we can do
2775 the IOR as a PLUS and we can associate. This is valid if OP1
2776 can be safely shifted left C bits. */
2777 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2778 && GET_CODE (XEXP (op0, 0)) == PLUS
2779 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2780 && CONST_INT_P (XEXP (op0, 1))
2781 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2783 int count = INTVAL (XEXP (op0, 1));
2784 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2786 if (mask >> count == INTVAL (trueop1)
2787 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2788 return simplify_gen_binary (ASHIFTRT, mode,
2789 plus_constant (mode, XEXP (op0, 0),
2790 mask),
2791 XEXP (op0, 1));
2794 tem = simplify_associative_operation (code, mode, op0, op1);
2795 if (tem)
2796 return tem;
2797 break;
2799 case XOR:
2800 if (trueop1 == CONST0_RTX (mode))
2801 return op0;
2802 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2803 return simplify_gen_unary (NOT, mode, op0, mode);
2804 if (rtx_equal_p (trueop0, trueop1)
2805 && ! side_effects_p (op0)
2806 && GET_MODE_CLASS (mode) != MODE_CC)
2807 return CONST0_RTX (mode);
2809 /* Canonicalize XOR of the most significant bit to PLUS. */
2810 if (CONST_SCALAR_INT_P (op1)
2811 && mode_signbit_p (mode, op1))
2812 return simplify_gen_binary (PLUS, mode, op0, op1);
2813 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2814 if (CONST_SCALAR_INT_P (op1)
2815 && GET_CODE (op0) == PLUS
2816 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2817 && mode_signbit_p (mode, XEXP (op0, 1)))
2818 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2819 simplify_gen_binary (XOR, mode, op1,
2820 XEXP (op0, 1)));
2822 /* If we are XORing two things that have no bits in common,
2823 convert them into an IOR. This helps to detect rotation encoded
2824 using those methods and possibly other simplifications. */
2826 if (HWI_COMPUTABLE_MODE_P (mode)
2827 && (nonzero_bits (op0, mode)
2828 & nonzero_bits (op1, mode)) == 0)
2829 return (simplify_gen_binary (IOR, mode, op0, op1));
2831 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2832 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2833 (NOT y). */
2835 int num_negated = 0;
2837 if (GET_CODE (op0) == NOT)
2838 num_negated++, op0 = XEXP (op0, 0);
2839 if (GET_CODE (op1) == NOT)
2840 num_negated++, op1 = XEXP (op1, 0);
2842 if (num_negated == 2)
2843 return simplify_gen_binary (XOR, mode, op0, op1);
2844 else if (num_negated == 1)
2845 return simplify_gen_unary (NOT, mode,
2846 simplify_gen_binary (XOR, mode, op0, op1),
2847 mode);
2850 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2851 correspond to a machine insn or result in further simplifications
2852 if B is a constant. */
2854 if (GET_CODE (op0) == AND
2855 && rtx_equal_p (XEXP (op0, 1), op1)
2856 && ! side_effects_p (op1))
2857 return simplify_gen_binary (AND, mode,
2858 simplify_gen_unary (NOT, mode,
2859 XEXP (op0, 0), mode),
2860 op1);
2862 else if (GET_CODE (op0) == AND
2863 && rtx_equal_p (XEXP (op0, 0), op1)
2864 && ! side_effects_p (op1))
2865 return simplify_gen_binary (AND, mode,
2866 simplify_gen_unary (NOT, mode,
2867 XEXP (op0, 1), mode),
2868 op1);
2870 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2871 we can transform like this:
2872 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2873 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2874 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2875 Attempt a few simplifications when B and C are both constants. */
2876 if (GET_CODE (op0) == AND
2877 && CONST_INT_P (op1)
2878 && CONST_INT_P (XEXP (op0, 1)))
2880 rtx a = XEXP (op0, 0);
2881 rtx b = XEXP (op0, 1);
2882 rtx c = op1;
2883 HOST_WIDE_INT bval = INTVAL (b);
2884 HOST_WIDE_INT cval = INTVAL (c);
2886 rtx na_c
2887 = simplify_binary_operation (AND, mode,
2888 simplify_gen_unary (NOT, mode, a, mode),
2890 if ((~cval & bval) == 0)
2892 /* Try to simplify ~A&C | ~B&C. */
2893 if (na_c != NULL_RTX)
2894 return simplify_gen_binary (IOR, mode, na_c,
2895 GEN_INT (~bval & cval));
2897 else
2899 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2900 if (na_c == const0_rtx)
2902 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2903 GEN_INT (~cval & bval));
2904 return simplify_gen_binary (IOR, mode, a_nc_b,
2905 GEN_INT (~bval & cval));
2910 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2911 comparison if STORE_FLAG_VALUE is 1. */
2912 if (STORE_FLAG_VALUE == 1
2913 && trueop1 == const1_rtx
2914 && COMPARISON_P (op0)
2915 && (reversed = reversed_comparison (op0, mode)))
2916 return reversed;
2918 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2919 is (lt foo (const_int 0)), so we can perform the above
2920 simplification if STORE_FLAG_VALUE is 1. */
2922 if (STORE_FLAG_VALUE == 1
2923 && trueop1 == const1_rtx
2924 && GET_CODE (op0) == LSHIFTRT
2925 && CONST_INT_P (XEXP (op0, 1))
2926 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2927 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2929 /* (xor (comparison foo bar) (const_int sign-bit))
2930 when STORE_FLAG_VALUE is the sign bit. */
2931 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2932 && trueop1 == const_true_rtx
2933 && COMPARISON_P (op0)
2934 && (reversed = reversed_comparison (op0, mode)))
2935 return reversed;
2937 tem = simplify_associative_operation (code, mode, op0, op1);
2938 if (tem)
2939 return tem;
2940 break;
2942 case AND:
2943 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2944 return trueop1;
2945 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2946 return op0;
2947 if (HWI_COMPUTABLE_MODE_P (mode))
2949 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2950 HOST_WIDE_INT nzop1;
2951 if (CONST_INT_P (trueop1))
2953 HOST_WIDE_INT val1 = INTVAL (trueop1);
2954 /* If we are turning off bits already known off in OP0, we need
2955 not do an AND. */
2956 if ((nzop0 & ~val1) == 0)
2957 return op0;
2959 nzop1 = nonzero_bits (trueop1, mode);
2960 /* If we are clearing all the nonzero bits, the result is zero. */
2961 if ((nzop1 & nzop0) == 0
2962 && !side_effects_p (op0) && !side_effects_p (op1))
2963 return CONST0_RTX (mode);
2965 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2966 && GET_MODE_CLASS (mode) != MODE_CC)
2967 return op0;
2968 /* A & (~A) -> 0 */
2969 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2970 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2971 && ! side_effects_p (op0)
2972 && GET_MODE_CLASS (mode) != MODE_CC)
2973 return CONST0_RTX (mode);
2975 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2976 there are no nonzero bits of C outside of X's mode. */
2977 if ((GET_CODE (op0) == SIGN_EXTEND
2978 || GET_CODE (op0) == ZERO_EXTEND)
2979 && CONST_INT_P (trueop1)
2980 && HWI_COMPUTABLE_MODE_P (mode)
2981 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2982 & UINTVAL (trueop1)) == 0)
2984 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2985 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2986 gen_int_mode (INTVAL (trueop1),
2987 imode));
2988 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2991 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2992 we might be able to further simplify the AND with X and potentially
2993 remove the truncation altogether. */
2994 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2996 rtx x = XEXP (op0, 0);
2997 enum machine_mode xmode = GET_MODE (x);
2998 tem = simplify_gen_binary (AND, xmode, x,
2999 gen_int_mode (INTVAL (trueop1), xmode));
3000 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
3003 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3004 if (GET_CODE (op0) == IOR
3005 && CONST_INT_P (trueop1)
3006 && CONST_INT_P (XEXP (op0, 1)))
3008 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
3009 return simplify_gen_binary (IOR, mode,
3010 simplify_gen_binary (AND, mode,
3011 XEXP (op0, 0), op1),
3012 gen_int_mode (tmp, mode));
3015 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3016 insn (and may simplify more). */
3017 if (GET_CODE (op0) == XOR
3018 && rtx_equal_p (XEXP (op0, 0), op1)
3019 && ! side_effects_p (op1))
3020 return simplify_gen_binary (AND, mode,
3021 simplify_gen_unary (NOT, mode,
3022 XEXP (op0, 1), mode),
3023 op1);
3025 if (GET_CODE (op0) == XOR
3026 && rtx_equal_p (XEXP (op0, 1), op1)
3027 && ! side_effects_p (op1))
3028 return simplify_gen_binary (AND, mode,
3029 simplify_gen_unary (NOT, mode,
3030 XEXP (op0, 0), mode),
3031 op1);
3033 /* Similarly for (~(A ^ B)) & A. */
3034 if (GET_CODE (op0) == NOT
3035 && GET_CODE (XEXP (op0, 0)) == XOR
3036 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3037 && ! side_effects_p (op1))
3038 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3040 if (GET_CODE (op0) == NOT
3041 && GET_CODE (XEXP (op0, 0)) == XOR
3042 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3043 && ! side_effects_p (op1))
3044 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3046 /* Convert (A | B) & A to A. */
3047 if (GET_CODE (op0) == IOR
3048 && (rtx_equal_p (XEXP (op0, 0), op1)
3049 || rtx_equal_p (XEXP (op0, 1), op1))
3050 && ! side_effects_p (XEXP (op0, 0))
3051 && ! side_effects_p (XEXP (op0, 1)))
3052 return op1;
3054 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3055 ((A & N) + B) & M -> (A + B) & M
3056 Similarly if (N & M) == 0,
3057 ((A | N) + B) & M -> (A + B) & M
3058 and for - instead of + and/or ^ instead of |.
3059 Also, if (N & M) == 0, then
3060 (A +- N) & M -> A & M. */
3061 if (CONST_INT_P (trueop1)
3062 && HWI_COMPUTABLE_MODE_P (mode)
3063 && ~UINTVAL (trueop1)
3064 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3065 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3067 rtx pmop[2];
3068 int which;
3070 pmop[0] = XEXP (op0, 0);
3071 pmop[1] = XEXP (op0, 1);
3073 if (CONST_INT_P (pmop[1])
3074 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3075 return simplify_gen_binary (AND, mode, pmop[0], op1);
3077 for (which = 0; which < 2; which++)
3079 tem = pmop[which];
3080 switch (GET_CODE (tem))
3082 case AND:
3083 if (CONST_INT_P (XEXP (tem, 1))
3084 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3085 == UINTVAL (trueop1))
3086 pmop[which] = XEXP (tem, 0);
3087 break;
3088 case IOR:
3089 case XOR:
3090 if (CONST_INT_P (XEXP (tem, 1))
3091 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3092 pmop[which] = XEXP (tem, 0);
3093 break;
3094 default:
3095 break;
3099 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3101 tem = simplify_gen_binary (GET_CODE (op0), mode,
3102 pmop[0], pmop[1]);
3103 return simplify_gen_binary (code, mode, tem, op1);
3107 /* (and X (ior (not X) Y) -> (and X Y) */
3108 if (GET_CODE (op1) == IOR
3109 && GET_CODE (XEXP (op1, 0)) == NOT
3110 && op0 == XEXP (XEXP (op1, 0), 0))
3111 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3113 /* (and (ior (not X) Y) X) -> (and X Y) */
3114 if (GET_CODE (op0) == IOR
3115 && GET_CODE (XEXP (op0, 0)) == NOT
3116 && op1 == XEXP (XEXP (op0, 0), 0))
3117 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3119 tem = simplify_associative_operation (code, mode, op0, op1);
3120 if (tem)
3121 return tem;
3122 break;
3124 case UDIV:
3125 /* 0/x is 0 (or x&0 if x has side-effects). */
3126 if (trueop0 == CONST0_RTX (mode))
3128 if (side_effects_p (op1))
3129 return simplify_gen_binary (AND, mode, op1, trueop0);
3130 return trueop0;
3132 /* x/1 is x. */
3133 if (trueop1 == CONST1_RTX (mode))
3135 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3136 if (tem)
3137 return tem;
3139 /* Convert divide by power of two into shift. */
3140 if (CONST_INT_P (trueop1)
3141 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3142 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3143 break;
3145 case DIV:
3146 /* Handle floating point and integers separately. */
3147 if (SCALAR_FLOAT_MODE_P (mode))
3149 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3150 safe for modes with NaNs, since 0.0 / 0.0 will then be
3151 NaN rather than 0.0. Nor is it safe for modes with signed
3152 zeros, since dividing 0 by a negative number gives -0.0 */
3153 if (trueop0 == CONST0_RTX (mode)
3154 && !HONOR_NANS (mode)
3155 && !HONOR_SIGNED_ZEROS (mode)
3156 && ! side_effects_p (op1))
3157 return op0;
3158 /* x/1.0 is x. */
3159 if (trueop1 == CONST1_RTX (mode)
3160 && !HONOR_SNANS (mode))
3161 return op0;
3163 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3164 && trueop1 != CONST0_RTX (mode))
3166 REAL_VALUE_TYPE d;
3167 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
3169 /* x/-1.0 is -x. */
3170 if (REAL_VALUES_EQUAL (d, dconstm1)
3171 && !HONOR_SNANS (mode))
3172 return simplify_gen_unary (NEG, mode, op0, mode);
3174 /* Change FP division by a constant into multiplication.
3175 Only do this with -freciprocal-math. */
3176 if (flag_reciprocal_math
3177 && !REAL_VALUES_EQUAL (d, dconst0))
3179 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
3180 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3181 return simplify_gen_binary (MULT, mode, op0, tem);
3185 else if (SCALAR_INT_MODE_P (mode))
3187 /* 0/x is 0 (or x&0 if x has side-effects). */
3188 if (trueop0 == CONST0_RTX (mode)
3189 && !cfun->can_throw_non_call_exceptions)
3191 if (side_effects_p (op1))
3192 return simplify_gen_binary (AND, mode, op1, trueop0);
3193 return trueop0;
3195 /* x/1 is x. */
3196 if (trueop1 == CONST1_RTX (mode))
3198 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3199 if (tem)
3200 return tem;
3202 /* x/-1 is -x. */
3203 if (trueop1 == constm1_rtx)
3205 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3206 if (x)
3207 return simplify_gen_unary (NEG, mode, x, mode);
3210 break;
3212 case UMOD:
3213 /* 0%x is 0 (or x&0 if x has side-effects). */
3214 if (trueop0 == CONST0_RTX (mode))
3216 if (side_effects_p (op1))
3217 return simplify_gen_binary (AND, mode, op1, trueop0);
3218 return trueop0;
3220 /* x%1 is 0 (of x&0 if x has side-effects). */
3221 if (trueop1 == CONST1_RTX (mode))
3223 if (side_effects_p (op0))
3224 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3225 return CONST0_RTX (mode);
3227 /* Implement modulus by power of two as AND. */
3228 if (CONST_INT_P (trueop1)
3229 && exact_log2 (UINTVAL (trueop1)) > 0)
3230 return simplify_gen_binary (AND, mode, op0,
3231 GEN_INT (INTVAL (op1) - 1));
3232 break;
3234 case MOD:
3235 /* 0%x is 0 (or x&0 if x has side-effects). */
3236 if (trueop0 == CONST0_RTX (mode))
3238 if (side_effects_p (op1))
3239 return simplify_gen_binary (AND, mode, op1, trueop0);
3240 return trueop0;
3242 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3243 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3245 if (side_effects_p (op0))
3246 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3247 return CONST0_RTX (mode);
3249 break;
3251 case ROTATERT:
3252 case ROTATE:
3253 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3254 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3255 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3256 amount instead. */
3257 if (CONST_INT_P (trueop1)
3258 && IN_RANGE (INTVAL (trueop1),
3259 GET_MODE_BITSIZE (mode) / 2 + (code == ROTATE),
3260 GET_MODE_BITSIZE (mode) - 1))
3261 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3262 mode, op0, GEN_INT (GET_MODE_BITSIZE (mode)
3263 - INTVAL (trueop1)));
3264 /* FALLTHRU */
3265 case ASHIFTRT:
3266 if (trueop1 == CONST0_RTX (mode))
3267 return op0;
3268 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3269 return op0;
3270 /* Rotating ~0 always results in ~0. */
3271 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3272 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3273 && ! side_effects_p (op1))
3274 return op0;
3275 canonicalize_shift:
3276 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3278 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
3279 if (val != INTVAL (op1))
3280 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3282 break;
3284 case ASHIFT:
3285 case SS_ASHIFT:
3286 case US_ASHIFT:
3287 if (trueop1 == CONST0_RTX (mode))
3288 return op0;
3289 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3290 return op0;
3291 goto canonicalize_shift;
3293 case LSHIFTRT:
3294 if (trueop1 == CONST0_RTX (mode))
3295 return op0;
3296 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3297 return op0;
3298 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3299 if (GET_CODE (op0) == CLZ
3300 && CONST_INT_P (trueop1)
3301 && STORE_FLAG_VALUE == 1
3302 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3304 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
3305 unsigned HOST_WIDE_INT zero_val = 0;
3307 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3308 && zero_val == GET_MODE_PRECISION (imode)
3309 && INTVAL (trueop1) == exact_log2 (zero_val))
3310 return simplify_gen_relational (EQ, mode, imode,
3311 XEXP (op0, 0), const0_rtx);
3313 goto canonicalize_shift;
3315 case SMIN:
3316 if (width <= HOST_BITS_PER_WIDE_INT
3317 && mode_signbit_p (mode, trueop1)
3318 && ! side_effects_p (op0))
3319 return op1;
3320 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3321 return op0;
3322 tem = simplify_associative_operation (code, mode, op0, op1);
3323 if (tem)
3324 return tem;
3325 break;
3327 case SMAX:
3328 if (width <= HOST_BITS_PER_WIDE_INT
3329 && CONST_INT_P (trueop1)
3330 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3331 && ! side_effects_p (op0))
3332 return op1;
3333 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3334 return op0;
3335 tem = simplify_associative_operation (code, mode, op0, op1);
3336 if (tem)
3337 return tem;
3338 break;
3340 case UMIN:
3341 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3342 return op1;
3343 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3344 return op0;
3345 tem = simplify_associative_operation (code, mode, op0, op1);
3346 if (tem)
3347 return tem;
3348 break;
3350 case UMAX:
3351 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3352 return op1;
3353 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3354 return op0;
3355 tem = simplify_associative_operation (code, mode, op0, op1);
3356 if (tem)
3357 return tem;
3358 break;
3360 case SS_PLUS:
3361 case US_PLUS:
3362 case SS_MINUS:
3363 case US_MINUS:
3364 case SS_MULT:
3365 case US_MULT:
3366 case SS_DIV:
3367 case US_DIV:
3368 /* ??? There are simplifications that can be done. */
3369 return 0;
3371 case VEC_SELECT:
3372 if (!VECTOR_MODE_P (mode))
3374 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3375 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3376 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3377 gcc_assert (XVECLEN (trueop1, 0) == 1);
3378 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3380 if (GET_CODE (trueop0) == CONST_VECTOR)
3381 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3382 (trueop1, 0, 0)));
3384 /* Extract a scalar element from a nested VEC_SELECT expression
3385 (with optional nested VEC_CONCAT expression). Some targets
3386 (i386) extract scalar element from a vector using chain of
3387 nested VEC_SELECT expressions. When input operand is a memory
3388 operand, this operation can be simplified to a simple scalar
3389 load from an offseted memory address. */
3390 if (GET_CODE (trueop0) == VEC_SELECT)
3392 rtx op0 = XEXP (trueop0, 0);
3393 rtx op1 = XEXP (trueop0, 1);
3395 enum machine_mode opmode = GET_MODE (op0);
3396 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3397 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3399 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3400 int elem;
3402 rtvec vec;
3403 rtx tmp_op, tmp;
3405 gcc_assert (GET_CODE (op1) == PARALLEL);
3406 gcc_assert (i < n_elts);
3408 /* Select element, pointed by nested selector. */
3409 elem = INTVAL (XVECEXP (op1, 0, i));
3411 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3412 if (GET_CODE (op0) == VEC_CONCAT)
3414 rtx op00 = XEXP (op0, 0);
3415 rtx op01 = XEXP (op0, 1);
3417 enum machine_mode mode00, mode01;
3418 int n_elts00, n_elts01;
3420 mode00 = GET_MODE (op00);
3421 mode01 = GET_MODE (op01);
3423 /* Find out number of elements of each operand. */
3424 if (VECTOR_MODE_P (mode00))
3426 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3427 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3429 else
3430 n_elts00 = 1;
3432 if (VECTOR_MODE_P (mode01))
3434 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3435 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3437 else
3438 n_elts01 = 1;
3440 gcc_assert (n_elts == n_elts00 + n_elts01);
3442 /* Select correct operand of VEC_CONCAT
3443 and adjust selector. */
3444 if (elem < n_elts01)
3445 tmp_op = op00;
3446 else
3448 tmp_op = op01;
3449 elem -= n_elts00;
3452 else
3453 tmp_op = op0;
3455 vec = rtvec_alloc (1);
3456 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3458 tmp = gen_rtx_fmt_ee (code, mode,
3459 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3460 return tmp;
3462 if (GET_CODE (trueop0) == VEC_DUPLICATE
3463 && GET_MODE (XEXP (trueop0, 0)) == mode)
3464 return XEXP (trueop0, 0);
3466 else
3468 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3469 gcc_assert (GET_MODE_INNER (mode)
3470 == GET_MODE_INNER (GET_MODE (trueop0)));
3471 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3473 if (GET_CODE (trueop0) == CONST_VECTOR)
3475 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3476 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3477 rtvec v = rtvec_alloc (n_elts);
3478 unsigned int i;
3480 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3481 for (i = 0; i < n_elts; i++)
3483 rtx x = XVECEXP (trueop1, 0, i);
3485 gcc_assert (CONST_INT_P (x));
3486 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3487 INTVAL (x));
3490 return gen_rtx_CONST_VECTOR (mode, v);
3493 /* Recognize the identity. */
3494 if (GET_MODE (trueop0) == mode)
3496 bool maybe_ident = true;
3497 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3499 rtx j = XVECEXP (trueop1, 0, i);
3500 if (!CONST_INT_P (j) || INTVAL (j) != i)
3502 maybe_ident = false;
3503 break;
3506 if (maybe_ident)
3507 return trueop0;
3510 /* If we build {a,b} then permute it, build the result directly. */
3511 if (XVECLEN (trueop1, 0) == 2
3512 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3513 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3514 && GET_CODE (trueop0) == VEC_CONCAT
3515 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3516 && GET_MODE (XEXP (trueop0, 0)) == mode
3517 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3518 && GET_MODE (XEXP (trueop0, 1)) == mode)
3520 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3521 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3522 rtx subop0, subop1;
3524 gcc_assert (i0 < 4 && i1 < 4);
3525 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3526 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3528 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3531 if (XVECLEN (trueop1, 0) == 2
3532 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3533 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3534 && GET_CODE (trueop0) == VEC_CONCAT
3535 && GET_MODE (trueop0) == mode)
3537 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3538 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3539 rtx subop0, subop1;
3541 gcc_assert (i0 < 2 && i1 < 2);
3542 subop0 = XEXP (trueop0, i0);
3543 subop1 = XEXP (trueop0, i1);
3545 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3549 if (XVECLEN (trueop1, 0) == 1
3550 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3551 && GET_CODE (trueop0) == VEC_CONCAT)
3553 rtx vec = trueop0;
3554 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3556 /* Try to find the element in the VEC_CONCAT. */
3557 while (GET_MODE (vec) != mode
3558 && GET_CODE (vec) == VEC_CONCAT)
3560 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3561 if (offset < vec_size)
3562 vec = XEXP (vec, 0);
3563 else
3565 offset -= vec_size;
3566 vec = XEXP (vec, 1);
3568 vec = avoid_constant_pool_reference (vec);
3571 if (GET_MODE (vec) == mode)
3572 return vec;
3575 /* If we select elements in a vec_merge that all come from the same
3576 operand, select from that operand directly. */
3577 if (GET_CODE (op0) == VEC_MERGE)
3579 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3580 if (CONST_INT_P (trueop02))
3582 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3583 bool all_operand0 = true;
3584 bool all_operand1 = true;
3585 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3587 rtx j = XVECEXP (trueop1, 0, i);
3588 if (sel & (1 << UINTVAL (j)))
3589 all_operand1 = false;
3590 else
3591 all_operand0 = false;
3593 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3594 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3595 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3596 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3600 return 0;
3601 case VEC_CONCAT:
3603 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3604 ? GET_MODE (trueop0)
3605 : GET_MODE_INNER (mode));
3606 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3607 ? GET_MODE (trueop1)
3608 : GET_MODE_INNER (mode));
3610 gcc_assert (VECTOR_MODE_P (mode));
3611 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3612 == GET_MODE_SIZE (mode));
3614 if (VECTOR_MODE_P (op0_mode))
3615 gcc_assert (GET_MODE_INNER (mode)
3616 == GET_MODE_INNER (op0_mode));
3617 else
3618 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3620 if (VECTOR_MODE_P (op1_mode))
3621 gcc_assert (GET_MODE_INNER (mode)
3622 == GET_MODE_INNER (op1_mode));
3623 else
3624 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3626 if ((GET_CODE (trueop0) == CONST_VECTOR
3627 || CONST_SCALAR_INT_P (trueop0)
3628 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3629 && (GET_CODE (trueop1) == CONST_VECTOR
3630 || CONST_SCALAR_INT_P (trueop1)
3631 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3633 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3634 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3635 rtvec v = rtvec_alloc (n_elts);
3636 unsigned int i;
3637 unsigned in_n_elts = 1;
3639 if (VECTOR_MODE_P (op0_mode))
3640 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3641 for (i = 0; i < n_elts; i++)
3643 if (i < in_n_elts)
3645 if (!VECTOR_MODE_P (op0_mode))
3646 RTVEC_ELT (v, i) = trueop0;
3647 else
3648 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3650 else
3652 if (!VECTOR_MODE_P (op1_mode))
3653 RTVEC_ELT (v, i) = trueop1;
3654 else
3655 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3656 i - in_n_elts);
3660 return gen_rtx_CONST_VECTOR (mode, v);
3663 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3664 Restrict the transformation to avoid generating a VEC_SELECT with a
3665 mode unrelated to its operand. */
3666 if (GET_CODE (trueop0) == VEC_SELECT
3667 && GET_CODE (trueop1) == VEC_SELECT
3668 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3669 && GET_MODE (XEXP (trueop0, 0)) == mode)
3671 rtx par0 = XEXP (trueop0, 1);
3672 rtx par1 = XEXP (trueop1, 1);
3673 int len0 = XVECLEN (par0, 0);
3674 int len1 = XVECLEN (par1, 0);
3675 rtvec vec = rtvec_alloc (len0 + len1);
3676 for (int i = 0; i < len0; i++)
3677 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3678 for (int i = 0; i < len1; i++)
3679 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3680 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3681 gen_rtx_PARALLEL (VOIDmode, vec));
3684 return 0;
3686 default:
3687 gcc_unreachable ();
3690 return 0;
3694 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3695 rtx op0, rtx op1)
3697 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3698 HOST_WIDE_INT val;
3699 unsigned int width = GET_MODE_PRECISION (mode);
3701 if (VECTOR_MODE_P (mode)
3702 && code != VEC_CONCAT
3703 && GET_CODE (op0) == CONST_VECTOR
3704 && GET_CODE (op1) == CONST_VECTOR)
3706 unsigned n_elts = GET_MODE_NUNITS (mode);
3707 enum machine_mode op0mode = GET_MODE (op0);
3708 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3709 enum machine_mode op1mode = GET_MODE (op1);
3710 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3711 rtvec v = rtvec_alloc (n_elts);
3712 unsigned int i;
3714 gcc_assert (op0_n_elts == n_elts);
3715 gcc_assert (op1_n_elts == n_elts);
3716 for (i = 0; i < n_elts; i++)
3718 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3719 CONST_VECTOR_ELT (op0, i),
3720 CONST_VECTOR_ELT (op1, i));
3721 if (!x)
3722 return 0;
3723 RTVEC_ELT (v, i) = x;
3726 return gen_rtx_CONST_VECTOR (mode, v);
3729 if (VECTOR_MODE_P (mode)
3730 && code == VEC_CONCAT
3731 && (CONST_SCALAR_INT_P (op0)
3732 || GET_CODE (op0) == CONST_FIXED
3733 || CONST_DOUBLE_AS_FLOAT_P (op0))
3734 && (CONST_SCALAR_INT_P (op1)
3735 || CONST_DOUBLE_AS_FLOAT_P (op1)
3736 || GET_CODE (op1) == CONST_FIXED))
3738 unsigned n_elts = GET_MODE_NUNITS (mode);
3739 rtvec v = rtvec_alloc (n_elts);
3741 gcc_assert (n_elts >= 2);
3742 if (n_elts == 2)
3744 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3745 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3747 RTVEC_ELT (v, 0) = op0;
3748 RTVEC_ELT (v, 1) = op1;
3750 else
3752 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3753 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3754 unsigned i;
3756 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3757 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3758 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3760 for (i = 0; i < op0_n_elts; ++i)
3761 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3762 for (i = 0; i < op1_n_elts; ++i)
3763 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3766 return gen_rtx_CONST_VECTOR (mode, v);
3769 if (SCALAR_FLOAT_MODE_P (mode)
3770 && CONST_DOUBLE_AS_FLOAT_P (op0)
3771 && CONST_DOUBLE_AS_FLOAT_P (op1)
3772 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3774 if (code == AND
3775 || code == IOR
3776 || code == XOR)
3778 long tmp0[4];
3779 long tmp1[4];
3780 REAL_VALUE_TYPE r;
3781 int i;
3783 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3784 GET_MODE (op0));
3785 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3786 GET_MODE (op1));
3787 for (i = 0; i < 4; i++)
3789 switch (code)
3791 case AND:
3792 tmp0[i] &= tmp1[i];
3793 break;
3794 case IOR:
3795 tmp0[i] |= tmp1[i];
3796 break;
3797 case XOR:
3798 tmp0[i] ^= tmp1[i];
3799 break;
3800 default:
3801 gcc_unreachable ();
3804 real_from_target (&r, tmp0, mode);
3805 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3807 else
3809 REAL_VALUE_TYPE f0, f1, value, result;
3810 bool inexact;
3812 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3813 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3814 real_convert (&f0, mode, &f0);
3815 real_convert (&f1, mode, &f1);
3817 if (HONOR_SNANS (mode)
3818 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3819 return 0;
3821 if (code == DIV
3822 && REAL_VALUES_EQUAL (f1, dconst0)
3823 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3824 return 0;
3826 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3827 && flag_trapping_math
3828 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3830 int s0 = REAL_VALUE_NEGATIVE (f0);
3831 int s1 = REAL_VALUE_NEGATIVE (f1);
3833 switch (code)
3835 case PLUS:
3836 /* Inf + -Inf = NaN plus exception. */
3837 if (s0 != s1)
3838 return 0;
3839 break;
3840 case MINUS:
3841 /* Inf - Inf = NaN plus exception. */
3842 if (s0 == s1)
3843 return 0;
3844 break;
3845 case DIV:
3846 /* Inf / Inf = NaN plus exception. */
3847 return 0;
3848 default:
3849 break;
3853 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3854 && flag_trapping_math
3855 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3856 || (REAL_VALUE_ISINF (f1)
3857 && REAL_VALUES_EQUAL (f0, dconst0))))
3858 /* Inf * 0 = NaN plus exception. */
3859 return 0;
3861 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3862 &f0, &f1);
3863 real_convert (&result, mode, &value);
3865 /* Don't constant fold this floating point operation if
3866 the result has overflowed and flag_trapping_math. */
3868 if (flag_trapping_math
3869 && MODE_HAS_INFINITIES (mode)
3870 && REAL_VALUE_ISINF (result)
3871 && !REAL_VALUE_ISINF (f0)
3872 && !REAL_VALUE_ISINF (f1))
3873 /* Overflow plus exception. */
3874 return 0;
3876 /* Don't constant fold this floating point operation if the
3877 result may dependent upon the run-time rounding mode and
3878 flag_rounding_math is set, or if GCC's software emulation
3879 is unable to accurately represent the result. */
3881 if ((flag_rounding_math
3882 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3883 && (inexact || !real_identical (&result, &value)))
3884 return NULL_RTX;
3886 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3890 /* We can fold some multi-word operations. */
3891 if (GET_MODE_CLASS (mode) == MODE_INT
3892 && width == HOST_BITS_PER_DOUBLE_INT
3893 && (CONST_DOUBLE_AS_INT_P (op0) || CONST_INT_P (op0))
3894 && (CONST_DOUBLE_AS_INT_P (op1) || CONST_INT_P (op1)))
3896 double_int o0, o1, res, tmp;
3897 bool overflow;
3899 o0 = rtx_to_double_int (op0);
3900 o1 = rtx_to_double_int (op1);
3902 switch (code)
3904 case MINUS:
3905 /* A - B == A + (-B). */
3906 o1 = -o1;
3908 /* Fall through.... */
3910 case PLUS:
3911 res = o0 + o1;
3912 break;
3914 case MULT:
3915 res = o0 * o1;
3916 break;
3918 case DIV:
3919 res = o0.divmod_with_overflow (o1, false, TRUNC_DIV_EXPR,
3920 &tmp, &overflow);
3921 if (overflow)
3922 return 0;
3923 break;
3925 case MOD:
3926 tmp = o0.divmod_with_overflow (o1, false, TRUNC_DIV_EXPR,
3927 &res, &overflow);
3928 if (overflow)
3929 return 0;
3930 break;
3932 case UDIV:
3933 res = o0.divmod_with_overflow (o1, true, TRUNC_DIV_EXPR,
3934 &tmp, &overflow);
3935 if (overflow)
3936 return 0;
3937 break;
3939 case UMOD:
3940 tmp = o0.divmod_with_overflow (o1, true, TRUNC_DIV_EXPR,
3941 &res, &overflow);
3942 if (overflow)
3943 return 0;
3944 break;
3946 case AND:
3947 res = o0 & o1;
3948 break;
3950 case IOR:
3951 res = o0 | o1;
3952 break;
3954 case XOR:
3955 res = o0 ^ o1;
3956 break;
3958 case SMIN:
3959 res = o0.smin (o1);
3960 break;
3962 case SMAX:
3963 res = o0.smax (o1);
3964 break;
3966 case UMIN:
3967 res = o0.umin (o1);
3968 break;
3970 case UMAX:
3971 res = o0.umax (o1);
3972 break;
3974 case LSHIFTRT: case ASHIFTRT:
3975 case ASHIFT:
3976 case ROTATE: case ROTATERT:
3978 unsigned HOST_WIDE_INT cnt;
3980 if (SHIFT_COUNT_TRUNCATED)
3982 o1.high = 0;
3983 o1.low &= GET_MODE_PRECISION (mode) - 1;
3986 if (!o1.fits_uhwi ()
3987 || o1.to_uhwi () >= GET_MODE_PRECISION (mode))
3988 return 0;
3990 cnt = o1.to_uhwi ();
3991 unsigned short prec = GET_MODE_PRECISION (mode);
3993 if (code == LSHIFTRT || code == ASHIFTRT)
3994 res = o0.rshift (cnt, prec, code == ASHIFTRT);
3995 else if (code == ASHIFT)
3996 res = o0.alshift (cnt, prec);
3997 else if (code == ROTATE)
3998 res = o0.lrotate (cnt, prec);
3999 else /* code == ROTATERT */
4000 res = o0.rrotate (cnt, prec);
4002 break;
4004 default:
4005 return 0;
4008 return immed_double_int_const (res, mode);
4011 if (CONST_INT_P (op0) && CONST_INT_P (op1)
4012 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
4014 /* Get the integer argument values in two forms:
4015 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
4017 arg0 = INTVAL (op0);
4018 arg1 = INTVAL (op1);
4020 if (width < HOST_BITS_PER_WIDE_INT)
4022 arg0 &= GET_MODE_MASK (mode);
4023 arg1 &= GET_MODE_MASK (mode);
4025 arg0s = arg0;
4026 if (val_signbit_known_set_p (mode, arg0s))
4027 arg0s |= ~GET_MODE_MASK (mode);
4029 arg1s = arg1;
4030 if (val_signbit_known_set_p (mode, arg1s))
4031 arg1s |= ~GET_MODE_MASK (mode);
4033 else
4035 arg0s = arg0;
4036 arg1s = arg1;
4039 /* Compute the value of the arithmetic. */
4041 switch (code)
4043 case PLUS:
4044 val = arg0s + arg1s;
4045 break;
4047 case MINUS:
4048 val = arg0s - arg1s;
4049 break;
4051 case MULT:
4052 val = arg0s * arg1s;
4053 break;
4055 case DIV:
4056 if (arg1s == 0
4057 || ((unsigned HOST_WIDE_INT) arg0s
4058 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4059 && arg1s == -1))
4060 return 0;
4061 val = arg0s / arg1s;
4062 break;
4064 case MOD:
4065 if (arg1s == 0
4066 || ((unsigned HOST_WIDE_INT) arg0s
4067 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4068 && arg1s == -1))
4069 return 0;
4070 val = arg0s % arg1s;
4071 break;
4073 case UDIV:
4074 if (arg1 == 0
4075 || ((unsigned HOST_WIDE_INT) arg0s
4076 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4077 && arg1s == -1))
4078 return 0;
4079 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
4080 break;
4082 case UMOD:
4083 if (arg1 == 0
4084 || ((unsigned HOST_WIDE_INT) arg0s
4085 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4086 && arg1s == -1))
4087 return 0;
4088 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
4089 break;
4091 case AND:
4092 val = arg0 & arg1;
4093 break;
4095 case IOR:
4096 val = arg0 | arg1;
4097 break;
4099 case XOR:
4100 val = arg0 ^ arg1;
4101 break;
4103 case LSHIFTRT:
4104 case ASHIFT:
4105 case ASHIFTRT:
4106 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
4107 the value is in range. We can't return any old value for
4108 out-of-range arguments because either the middle-end (via
4109 shift_truncation_mask) or the back-end might be relying on
4110 target-specific knowledge. Nor can we rely on
4111 shift_truncation_mask, since the shift might not be part of an
4112 ashlM3, lshrM3 or ashrM3 instruction. */
4113 if (SHIFT_COUNT_TRUNCATED)
4114 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
4115 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
4116 return 0;
4118 val = (code == ASHIFT
4119 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
4120 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
4122 /* Sign-extend the result for arithmetic right shifts. */
4123 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
4124 val |= ((unsigned HOST_WIDE_INT) (-1)) << (width - arg1);
4125 break;
4127 case ROTATERT:
4128 if (arg1 < 0)
4129 return 0;
4131 arg1 %= width;
4132 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
4133 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
4134 break;
4136 case ROTATE:
4137 if (arg1 < 0)
4138 return 0;
4140 arg1 %= width;
4141 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
4142 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
4143 break;
4145 case COMPARE:
4146 /* Do nothing here. */
4147 return 0;
4149 case SMIN:
4150 val = arg0s <= arg1s ? arg0s : arg1s;
4151 break;
4153 case UMIN:
4154 val = ((unsigned HOST_WIDE_INT) arg0
4155 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4156 break;
4158 case SMAX:
4159 val = arg0s > arg1s ? arg0s : arg1s;
4160 break;
4162 case UMAX:
4163 val = ((unsigned HOST_WIDE_INT) arg0
4164 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4165 break;
4167 case SS_PLUS:
4168 case US_PLUS:
4169 case SS_MINUS:
4170 case US_MINUS:
4171 case SS_MULT:
4172 case US_MULT:
4173 case SS_DIV:
4174 case US_DIV:
4175 case SS_ASHIFT:
4176 case US_ASHIFT:
4177 /* ??? There are simplifications that can be done. */
4178 return 0;
4180 default:
4181 gcc_unreachable ();
4184 return gen_int_mode (val, mode);
4187 return NULL_RTX;
4192 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
4193 PLUS or MINUS.
4195 Rather than test for specific case, we do this by a brute-force method
4196 and do all possible simplifications until no more changes occur. Then
4197 we rebuild the operation. */
4199 struct simplify_plus_minus_op_data
4201 rtx op;
4202 short neg;
4205 static bool
4206 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4208 int result;
4210 result = (commutative_operand_precedence (y)
4211 - commutative_operand_precedence (x));
4212 if (result)
4213 return result > 0;
4215 /* Group together equal REGs to do more simplification. */
4216 if (REG_P (x) && REG_P (y))
4217 return REGNO (x) > REGNO (y);
4218 else
4219 return false;
4222 static rtx
4223 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
4224 rtx op1)
4226 struct simplify_plus_minus_op_data ops[8];
4227 rtx result, tem;
4228 int n_ops = 2, input_ops = 2;
4229 int changed, n_constants = 0, canonicalized = 0;
4230 int i, j;
4232 memset (ops, 0, sizeof ops);
4234 /* Set up the two operands and then expand them until nothing has been
4235 changed. If we run out of room in our array, give up; this should
4236 almost never happen. */
4238 ops[0].op = op0;
4239 ops[0].neg = 0;
4240 ops[1].op = op1;
4241 ops[1].neg = (code == MINUS);
4245 changed = 0;
4247 for (i = 0; i < n_ops; i++)
4249 rtx this_op = ops[i].op;
4250 int this_neg = ops[i].neg;
4251 enum rtx_code this_code = GET_CODE (this_op);
4253 switch (this_code)
4255 case PLUS:
4256 case MINUS:
4257 if (n_ops == 7)
4258 return NULL_RTX;
4260 ops[n_ops].op = XEXP (this_op, 1);
4261 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4262 n_ops++;
4264 ops[i].op = XEXP (this_op, 0);
4265 input_ops++;
4266 changed = 1;
4267 canonicalized |= this_neg;
4268 break;
4270 case NEG:
4271 ops[i].op = XEXP (this_op, 0);
4272 ops[i].neg = ! this_neg;
4273 changed = 1;
4274 canonicalized = 1;
4275 break;
4277 case CONST:
4278 if (n_ops < 7
4279 && GET_CODE (XEXP (this_op, 0)) == PLUS
4280 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4281 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4283 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4284 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4285 ops[n_ops].neg = this_neg;
4286 n_ops++;
4287 changed = 1;
4288 canonicalized = 1;
4290 break;
4292 case NOT:
4293 /* ~a -> (-a - 1) */
4294 if (n_ops != 7)
4296 ops[n_ops].op = CONSTM1_RTX (mode);
4297 ops[n_ops++].neg = this_neg;
4298 ops[i].op = XEXP (this_op, 0);
4299 ops[i].neg = !this_neg;
4300 changed = 1;
4301 canonicalized = 1;
4303 break;
4305 case CONST_INT:
4306 n_constants++;
4307 if (this_neg)
4309 ops[i].op = neg_const_int (mode, this_op);
4310 ops[i].neg = 0;
4311 changed = 1;
4312 canonicalized = 1;
4314 break;
4316 default:
4317 break;
4321 while (changed);
4323 if (n_constants > 1)
4324 canonicalized = 1;
4326 gcc_assert (n_ops >= 2);
4328 /* If we only have two operands, we can avoid the loops. */
4329 if (n_ops == 2)
4331 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4332 rtx lhs, rhs;
4334 /* Get the two operands. Be careful with the order, especially for
4335 the cases where code == MINUS. */
4336 if (ops[0].neg && ops[1].neg)
4338 lhs = gen_rtx_NEG (mode, ops[0].op);
4339 rhs = ops[1].op;
4341 else if (ops[0].neg)
4343 lhs = ops[1].op;
4344 rhs = ops[0].op;
4346 else
4348 lhs = ops[0].op;
4349 rhs = ops[1].op;
4352 return simplify_const_binary_operation (code, mode, lhs, rhs);
4355 /* Now simplify each pair of operands until nothing changes. */
4358 /* Insertion sort is good enough for an eight-element array. */
4359 for (i = 1; i < n_ops; i++)
4361 struct simplify_plus_minus_op_data save;
4362 j = i - 1;
4363 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
4364 continue;
4366 canonicalized = 1;
4367 save = ops[i];
4369 ops[j + 1] = ops[j];
4370 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
4371 ops[j + 1] = save;
4374 changed = 0;
4375 for (i = n_ops - 1; i > 0; i--)
4376 for (j = i - 1; j >= 0; j--)
4378 rtx lhs = ops[j].op, rhs = ops[i].op;
4379 int lneg = ops[j].neg, rneg = ops[i].neg;
4381 if (lhs != 0 && rhs != 0)
4383 enum rtx_code ncode = PLUS;
4385 if (lneg != rneg)
4387 ncode = MINUS;
4388 if (lneg)
4389 tem = lhs, lhs = rhs, rhs = tem;
4391 else if (swap_commutative_operands_p (lhs, rhs))
4392 tem = lhs, lhs = rhs, rhs = tem;
4394 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4395 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4397 rtx tem_lhs, tem_rhs;
4399 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4400 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4401 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
4403 if (tem && !CONSTANT_P (tem))
4404 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4406 else
4407 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4409 /* Reject "simplifications" that just wrap the two
4410 arguments in a CONST. Failure to do so can result
4411 in infinite recursion with simplify_binary_operation
4412 when it calls us to simplify CONST operations. */
4413 if (tem
4414 && ! (GET_CODE (tem) == CONST
4415 && GET_CODE (XEXP (tem, 0)) == ncode
4416 && XEXP (XEXP (tem, 0), 0) == lhs
4417 && XEXP (XEXP (tem, 0), 1) == rhs))
4419 lneg &= rneg;
4420 if (GET_CODE (tem) == NEG)
4421 tem = XEXP (tem, 0), lneg = !lneg;
4422 if (CONST_INT_P (tem) && lneg)
4423 tem = neg_const_int (mode, tem), lneg = 0;
4425 ops[i].op = tem;
4426 ops[i].neg = lneg;
4427 ops[j].op = NULL_RTX;
4428 changed = 1;
4429 canonicalized = 1;
4434 /* If nothing changed, fail. */
4435 if (!canonicalized)
4436 return NULL_RTX;
4438 /* Pack all the operands to the lower-numbered entries. */
4439 for (i = 0, j = 0; j < n_ops; j++)
4440 if (ops[j].op)
4442 ops[i] = ops[j];
4443 i++;
4445 n_ops = i;
4447 while (changed);
4449 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4450 if (n_ops == 2
4451 && CONST_INT_P (ops[1].op)
4452 && CONSTANT_P (ops[0].op)
4453 && ops[0].neg)
4454 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4456 /* We suppressed creation of trivial CONST expressions in the
4457 combination loop to avoid recursion. Create one manually now.
4458 The combination loop should have ensured that there is exactly
4459 one CONST_INT, and the sort will have ensured that it is last
4460 in the array and that any other constant will be next-to-last. */
4462 if (n_ops > 1
4463 && CONST_INT_P (ops[n_ops - 1].op)
4464 && CONSTANT_P (ops[n_ops - 2].op))
4466 rtx value = ops[n_ops - 1].op;
4467 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4468 value = neg_const_int (mode, value);
4469 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4470 INTVAL (value));
4471 n_ops--;
4474 /* Put a non-negated operand first, if possible. */
4476 for (i = 0; i < n_ops && ops[i].neg; i++)
4477 continue;
4478 if (i == n_ops)
4479 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4480 else if (i != 0)
4482 tem = ops[0].op;
4483 ops[0] = ops[i];
4484 ops[i].op = tem;
4485 ops[i].neg = 1;
4488 /* Now make the result by performing the requested operations. */
4489 result = ops[0].op;
4490 for (i = 1; i < n_ops; i++)
4491 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4492 mode, result, ops[i].op);
4494 return result;
4497 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4498 static bool
4499 plus_minus_operand_p (const_rtx x)
4501 return GET_CODE (x) == PLUS
4502 || GET_CODE (x) == MINUS
4503 || (GET_CODE (x) == CONST
4504 && GET_CODE (XEXP (x, 0)) == PLUS
4505 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4506 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4509 /* Like simplify_binary_operation except used for relational operators.
4510 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4511 not also be VOIDmode.
4513 CMP_MODE specifies in which mode the comparison is done in, so it is
4514 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4515 the operands or, if both are VOIDmode, the operands are compared in
4516 "infinite precision". */
4518 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
4519 enum machine_mode cmp_mode, rtx op0, rtx op1)
4521 rtx tem, trueop0, trueop1;
4523 if (cmp_mode == VOIDmode)
4524 cmp_mode = GET_MODE (op0);
4525 if (cmp_mode == VOIDmode)
4526 cmp_mode = GET_MODE (op1);
4528 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4529 if (tem)
4531 if (SCALAR_FLOAT_MODE_P (mode))
4533 if (tem == const0_rtx)
4534 return CONST0_RTX (mode);
4535 #ifdef FLOAT_STORE_FLAG_VALUE
4537 REAL_VALUE_TYPE val;
4538 val = FLOAT_STORE_FLAG_VALUE (mode);
4539 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4541 #else
4542 return NULL_RTX;
4543 #endif
4545 if (VECTOR_MODE_P (mode))
4547 if (tem == const0_rtx)
4548 return CONST0_RTX (mode);
4549 #ifdef VECTOR_STORE_FLAG_VALUE
4551 int i, units;
4552 rtvec v;
4554 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4555 if (val == NULL_RTX)
4556 return NULL_RTX;
4557 if (val == const1_rtx)
4558 return CONST1_RTX (mode);
4560 units = GET_MODE_NUNITS (mode);
4561 v = rtvec_alloc (units);
4562 for (i = 0; i < units; i++)
4563 RTVEC_ELT (v, i) = val;
4564 return gen_rtx_raw_CONST_VECTOR (mode, v);
4566 #else
4567 return NULL_RTX;
4568 #endif
4571 return tem;
4574 /* For the following tests, ensure const0_rtx is op1. */
4575 if (swap_commutative_operands_p (op0, op1)
4576 || (op0 == const0_rtx && op1 != const0_rtx))
4577 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4579 /* If op0 is a compare, extract the comparison arguments from it. */
4580 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4581 return simplify_gen_relational (code, mode, VOIDmode,
4582 XEXP (op0, 0), XEXP (op0, 1));
4584 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4585 || CC0_P (op0))
4586 return NULL_RTX;
4588 trueop0 = avoid_constant_pool_reference (op0);
4589 trueop1 = avoid_constant_pool_reference (op1);
4590 return simplify_relational_operation_1 (code, mode, cmp_mode,
4591 trueop0, trueop1);
4594 /* This part of simplify_relational_operation is only used when CMP_MODE
4595 is not in class MODE_CC (i.e. it is a real comparison).
4597 MODE is the mode of the result, while CMP_MODE specifies in which
4598 mode the comparison is done in, so it is the mode of the operands. */
4600 static rtx
4601 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
4602 enum machine_mode cmp_mode, rtx op0, rtx op1)
4604 enum rtx_code op0code = GET_CODE (op0);
4606 if (op1 == const0_rtx && COMPARISON_P (op0))
4608 /* If op0 is a comparison, extract the comparison arguments
4609 from it. */
4610 if (code == NE)
4612 if (GET_MODE (op0) == mode)
4613 return simplify_rtx (op0);
4614 else
4615 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4616 XEXP (op0, 0), XEXP (op0, 1));
4618 else if (code == EQ)
4620 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4621 if (new_code != UNKNOWN)
4622 return simplify_gen_relational (new_code, mode, VOIDmode,
4623 XEXP (op0, 0), XEXP (op0, 1));
4627 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4628 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4629 if ((code == LTU || code == GEU)
4630 && GET_CODE (op0) == PLUS
4631 && CONST_INT_P (XEXP (op0, 1))
4632 && (rtx_equal_p (op1, XEXP (op0, 0))
4633 || rtx_equal_p (op1, XEXP (op0, 1)))
4634 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4635 && XEXP (op0, 1) != const0_rtx)
4637 rtx new_cmp
4638 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4639 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4640 cmp_mode, XEXP (op0, 0), new_cmp);
4643 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4644 if ((code == LTU || code == GEU)
4645 && GET_CODE (op0) == PLUS
4646 && rtx_equal_p (op1, XEXP (op0, 1))
4647 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4648 && !rtx_equal_p (op1, XEXP (op0, 0)))
4649 return simplify_gen_relational (code, mode, cmp_mode, op0,
4650 copy_rtx (XEXP (op0, 0)));
4652 if (op1 == const0_rtx)
4654 /* Canonicalize (GTU x 0) as (NE x 0). */
4655 if (code == GTU)
4656 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4657 /* Canonicalize (LEU x 0) as (EQ x 0). */
4658 if (code == LEU)
4659 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4661 else if (op1 == const1_rtx)
4663 switch (code)
4665 case GE:
4666 /* Canonicalize (GE x 1) as (GT x 0). */
4667 return simplify_gen_relational (GT, mode, cmp_mode,
4668 op0, const0_rtx);
4669 case GEU:
4670 /* Canonicalize (GEU x 1) as (NE x 0). */
4671 return simplify_gen_relational (NE, mode, cmp_mode,
4672 op0, const0_rtx);
4673 case LT:
4674 /* Canonicalize (LT x 1) as (LE x 0). */
4675 return simplify_gen_relational (LE, mode, cmp_mode,
4676 op0, const0_rtx);
4677 case LTU:
4678 /* Canonicalize (LTU x 1) as (EQ x 0). */
4679 return simplify_gen_relational (EQ, mode, cmp_mode,
4680 op0, const0_rtx);
4681 default:
4682 break;
4685 else if (op1 == constm1_rtx)
4687 /* Canonicalize (LE x -1) as (LT x 0). */
4688 if (code == LE)
4689 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4690 /* Canonicalize (GT x -1) as (GE x 0). */
4691 if (code == GT)
4692 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4695 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4696 if ((code == EQ || code == NE)
4697 && (op0code == PLUS || op0code == MINUS)
4698 && CONSTANT_P (op1)
4699 && CONSTANT_P (XEXP (op0, 1))
4700 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4702 rtx x = XEXP (op0, 0);
4703 rtx c = XEXP (op0, 1);
4704 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4705 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4707 /* Detect an infinite recursive condition, where we oscillate at this
4708 simplification case between:
4709 A + B == C <---> C - B == A,
4710 where A, B, and C are all constants with non-simplifiable expressions,
4711 usually SYMBOL_REFs. */
4712 if (GET_CODE (tem) == invcode
4713 && CONSTANT_P (x)
4714 && rtx_equal_p (c, XEXP (tem, 1)))
4715 return NULL_RTX;
4717 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4720 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4721 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4722 if (code == NE
4723 && op1 == const0_rtx
4724 && GET_MODE_CLASS (mode) == MODE_INT
4725 && cmp_mode != VOIDmode
4726 /* ??? Work-around BImode bugs in the ia64 backend. */
4727 && mode != BImode
4728 && cmp_mode != BImode
4729 && nonzero_bits (op0, cmp_mode) == 1
4730 && STORE_FLAG_VALUE == 1)
4731 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4732 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4733 : lowpart_subreg (mode, op0, cmp_mode);
4735 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4736 if ((code == EQ || code == NE)
4737 && op1 == const0_rtx
4738 && op0code == XOR)
4739 return simplify_gen_relational (code, mode, cmp_mode,
4740 XEXP (op0, 0), XEXP (op0, 1));
4742 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4743 if ((code == EQ || code == NE)
4744 && op0code == XOR
4745 && rtx_equal_p (XEXP (op0, 0), op1)
4746 && !side_effects_p (XEXP (op0, 0)))
4747 return simplify_gen_relational (code, mode, cmp_mode,
4748 XEXP (op0, 1), const0_rtx);
4750 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4751 if ((code == EQ || code == NE)
4752 && op0code == XOR
4753 && rtx_equal_p (XEXP (op0, 1), op1)
4754 && !side_effects_p (XEXP (op0, 1)))
4755 return simplify_gen_relational (code, mode, cmp_mode,
4756 XEXP (op0, 0), const0_rtx);
4758 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4759 if ((code == EQ || code == NE)
4760 && op0code == XOR
4761 && CONST_SCALAR_INT_P (op1)
4762 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4763 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4764 simplify_gen_binary (XOR, cmp_mode,
4765 XEXP (op0, 1), op1));
4767 if (op0code == POPCOUNT && op1 == const0_rtx)
4768 switch (code)
4770 case EQ:
4771 case LE:
4772 case LEU:
4773 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4774 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4775 XEXP (op0, 0), const0_rtx);
4777 case NE:
4778 case GT:
4779 case GTU:
4780 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4781 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4782 XEXP (op0, 0), const0_rtx);
4784 default:
4785 break;
4788 return NULL_RTX;
4791 enum
4793 CMP_EQ = 1,
4794 CMP_LT = 2,
4795 CMP_GT = 4,
4796 CMP_LTU = 8,
4797 CMP_GTU = 16
4801 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4802 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4803 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4804 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4805 For floating-point comparisons, assume that the operands were ordered. */
4807 static rtx
4808 comparison_result (enum rtx_code code, int known_results)
4810 switch (code)
4812 case EQ:
4813 case UNEQ:
4814 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4815 case NE:
4816 case LTGT:
4817 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4819 case LT:
4820 case UNLT:
4821 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4822 case GE:
4823 case UNGE:
4824 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4826 case GT:
4827 case UNGT:
4828 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4829 case LE:
4830 case UNLE:
4831 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4833 case LTU:
4834 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4835 case GEU:
4836 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4838 case GTU:
4839 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4840 case LEU:
4841 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4843 case ORDERED:
4844 return const_true_rtx;
4845 case UNORDERED:
4846 return const0_rtx;
4847 default:
4848 gcc_unreachable ();
4852 /* Check if the given comparison (done in the given MODE) is actually a
4853 tautology or a contradiction.
4854 If no simplification is possible, this function returns zero.
4855 Otherwise, it returns either const_true_rtx or const0_rtx. */
4858 simplify_const_relational_operation (enum rtx_code code,
4859 enum machine_mode mode,
4860 rtx op0, rtx op1)
4862 rtx tem;
4863 rtx trueop0;
4864 rtx trueop1;
4866 gcc_assert (mode != VOIDmode
4867 || (GET_MODE (op0) == VOIDmode
4868 && GET_MODE (op1) == VOIDmode));
4870 /* If op0 is a compare, extract the comparison arguments from it. */
4871 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4873 op1 = XEXP (op0, 1);
4874 op0 = XEXP (op0, 0);
4876 if (GET_MODE (op0) != VOIDmode)
4877 mode = GET_MODE (op0);
4878 else if (GET_MODE (op1) != VOIDmode)
4879 mode = GET_MODE (op1);
4880 else
4881 return 0;
4884 /* We can't simplify MODE_CC values since we don't know what the
4885 actual comparison is. */
4886 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4887 return 0;
4889 /* Make sure the constant is second. */
4890 if (swap_commutative_operands_p (op0, op1))
4892 tem = op0, op0 = op1, op1 = tem;
4893 code = swap_condition (code);
4896 trueop0 = avoid_constant_pool_reference (op0);
4897 trueop1 = avoid_constant_pool_reference (op1);
4899 /* For integer comparisons of A and B maybe we can simplify A - B and can
4900 then simplify a comparison of that with zero. If A and B are both either
4901 a register or a CONST_INT, this can't help; testing for these cases will
4902 prevent infinite recursion here and speed things up.
4904 We can only do this for EQ and NE comparisons as otherwise we may
4905 lose or introduce overflow which we cannot disregard as undefined as
4906 we do not know the signedness of the operation on either the left or
4907 the right hand side of the comparison. */
4909 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4910 && (code == EQ || code == NE)
4911 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4912 && (REG_P (op1) || CONST_INT_P (trueop1)))
4913 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4914 /* We cannot do this if tem is a nonzero address. */
4915 && ! nonzero_address_p (tem))
4916 return simplify_const_relational_operation (signed_condition (code),
4917 mode, tem, const0_rtx);
4919 if (! HONOR_NANS (mode) && code == ORDERED)
4920 return const_true_rtx;
4922 if (! HONOR_NANS (mode) && code == UNORDERED)
4923 return const0_rtx;
4925 /* For modes without NaNs, if the two operands are equal, we know the
4926 result except if they have side-effects. Even with NaNs we know
4927 the result of unordered comparisons and, if signaling NaNs are
4928 irrelevant, also the result of LT/GT/LTGT. */
4929 if ((! HONOR_NANS (GET_MODE (trueop0))
4930 || code == UNEQ || code == UNLE || code == UNGE
4931 || ((code == LT || code == GT || code == LTGT)
4932 && ! HONOR_SNANS (GET_MODE (trueop0))))
4933 && rtx_equal_p (trueop0, trueop1)
4934 && ! side_effects_p (trueop0))
4935 return comparison_result (code, CMP_EQ);
4937 /* If the operands are floating-point constants, see if we can fold
4938 the result. */
4939 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
4940 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
4941 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4943 REAL_VALUE_TYPE d0, d1;
4945 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4946 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4948 /* Comparisons are unordered iff at least one of the values is NaN. */
4949 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4950 switch (code)
4952 case UNEQ:
4953 case UNLT:
4954 case UNGT:
4955 case UNLE:
4956 case UNGE:
4957 case NE:
4958 case UNORDERED:
4959 return const_true_rtx;
4960 case EQ:
4961 case LT:
4962 case GT:
4963 case LE:
4964 case GE:
4965 case LTGT:
4966 case ORDERED:
4967 return const0_rtx;
4968 default:
4969 return 0;
4972 return comparison_result (code,
4973 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4974 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4977 /* Otherwise, see if the operands are both integers. */
4978 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4979 && (CONST_DOUBLE_AS_INT_P (trueop0) || CONST_INT_P (trueop0))
4980 && (CONST_DOUBLE_AS_INT_P (trueop1) || CONST_INT_P (trueop1)))
4982 int width = GET_MODE_PRECISION (mode);
4983 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4984 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4986 /* Get the two words comprising each integer constant. */
4987 if (CONST_DOUBLE_AS_INT_P (trueop0))
4989 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4990 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4992 else
4994 l0u = l0s = INTVAL (trueop0);
4995 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4998 if (CONST_DOUBLE_AS_INT_P (trueop1))
5000 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
5001 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
5003 else
5005 l1u = l1s = INTVAL (trueop1);
5006 h1u = h1s = HWI_SIGN_EXTEND (l1s);
5009 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
5010 we have to sign or zero-extend the values. */
5011 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
5013 l0u &= GET_MODE_MASK (mode);
5014 l1u &= GET_MODE_MASK (mode);
5016 if (val_signbit_known_set_p (mode, l0s))
5017 l0s |= ~GET_MODE_MASK (mode);
5019 if (val_signbit_known_set_p (mode, l1s))
5020 l1s |= ~GET_MODE_MASK (mode);
5022 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
5023 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
5025 if (h0u == h1u && l0u == l1u)
5026 return comparison_result (code, CMP_EQ);
5027 else
5029 int cr;
5030 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
5031 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
5032 return comparison_result (code, cr);
5036 /* Optimize comparisons with upper and lower bounds. */
5037 if (HWI_COMPUTABLE_MODE_P (mode)
5038 && CONST_INT_P (trueop1))
5040 int sign;
5041 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
5042 HOST_WIDE_INT val = INTVAL (trueop1);
5043 HOST_WIDE_INT mmin, mmax;
5045 if (code == GEU
5046 || code == LEU
5047 || code == GTU
5048 || code == LTU)
5049 sign = 0;
5050 else
5051 sign = 1;
5053 /* Get a reduced range if the sign bit is zero. */
5054 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
5056 mmin = 0;
5057 mmax = nonzero;
5059 else
5061 rtx mmin_rtx, mmax_rtx;
5062 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
5064 mmin = INTVAL (mmin_rtx);
5065 mmax = INTVAL (mmax_rtx);
5066 if (sign)
5068 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
5070 mmin >>= (sign_copies - 1);
5071 mmax >>= (sign_copies - 1);
5075 switch (code)
5077 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5078 case GEU:
5079 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5080 return const_true_rtx;
5081 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5082 return const0_rtx;
5083 break;
5084 case GE:
5085 if (val <= mmin)
5086 return const_true_rtx;
5087 if (val > mmax)
5088 return const0_rtx;
5089 break;
5091 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5092 case LEU:
5093 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5094 return const_true_rtx;
5095 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5096 return const0_rtx;
5097 break;
5098 case LE:
5099 if (val >= mmax)
5100 return const_true_rtx;
5101 if (val < mmin)
5102 return const0_rtx;
5103 break;
5105 case EQ:
5106 /* x == y is always false for y out of range. */
5107 if (val < mmin || val > mmax)
5108 return const0_rtx;
5109 break;
5111 /* x > y is always false for y >= mmax, always true for y < mmin. */
5112 case GTU:
5113 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5114 return const0_rtx;
5115 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5116 return const_true_rtx;
5117 break;
5118 case GT:
5119 if (val >= mmax)
5120 return const0_rtx;
5121 if (val < mmin)
5122 return const_true_rtx;
5123 break;
5125 /* x < y is always false for y <= mmin, always true for y > mmax. */
5126 case LTU:
5127 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5128 return const0_rtx;
5129 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5130 return const_true_rtx;
5131 break;
5132 case LT:
5133 if (val <= mmin)
5134 return const0_rtx;
5135 if (val > mmax)
5136 return const_true_rtx;
5137 break;
5139 case NE:
5140 /* x != y is always true for y out of range. */
5141 if (val < mmin || val > mmax)
5142 return const_true_rtx;
5143 break;
5145 default:
5146 break;
5150 /* Optimize integer comparisons with zero. */
5151 if (trueop1 == const0_rtx)
5153 /* Some addresses are known to be nonzero. We don't know
5154 their sign, but equality comparisons are known. */
5155 if (nonzero_address_p (trueop0))
5157 if (code == EQ || code == LEU)
5158 return const0_rtx;
5159 if (code == NE || code == GTU)
5160 return const_true_rtx;
5163 /* See if the first operand is an IOR with a constant. If so, we
5164 may be able to determine the result of this comparison. */
5165 if (GET_CODE (op0) == IOR)
5167 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5168 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5170 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
5171 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5172 && (UINTVAL (inner_const)
5173 & ((unsigned HOST_WIDE_INT) 1
5174 << sign_bitnum)));
5176 switch (code)
5178 case EQ:
5179 case LEU:
5180 return const0_rtx;
5181 case NE:
5182 case GTU:
5183 return const_true_rtx;
5184 case LT:
5185 case LE:
5186 if (has_sign)
5187 return const_true_rtx;
5188 break;
5189 case GT:
5190 case GE:
5191 if (has_sign)
5192 return const0_rtx;
5193 break;
5194 default:
5195 break;
5201 /* Optimize comparison of ABS with zero. */
5202 if (trueop1 == CONST0_RTX (mode)
5203 && (GET_CODE (trueop0) == ABS
5204 || (GET_CODE (trueop0) == FLOAT_EXTEND
5205 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5207 switch (code)
5209 case LT:
5210 /* Optimize abs(x) < 0.0. */
5211 if (!HONOR_SNANS (mode)
5212 && (!INTEGRAL_MODE_P (mode)
5213 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5215 if (INTEGRAL_MODE_P (mode)
5216 && (issue_strict_overflow_warning
5217 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5218 warning (OPT_Wstrict_overflow,
5219 ("assuming signed overflow does not occur when "
5220 "assuming abs (x) < 0 is false"));
5221 return const0_rtx;
5223 break;
5225 case GE:
5226 /* Optimize abs(x) >= 0.0. */
5227 if (!HONOR_NANS (mode)
5228 && (!INTEGRAL_MODE_P (mode)
5229 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5231 if (INTEGRAL_MODE_P (mode)
5232 && (issue_strict_overflow_warning
5233 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5234 warning (OPT_Wstrict_overflow,
5235 ("assuming signed overflow does not occur when "
5236 "assuming abs (x) >= 0 is true"));
5237 return const_true_rtx;
5239 break;
5241 case UNGE:
5242 /* Optimize ! (abs(x) < 0.0). */
5243 return const_true_rtx;
5245 default:
5246 break;
5250 return 0;
5253 /* Simplify CODE, an operation with result mode MODE and three operands,
5254 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5255 a constant. Return 0 if no simplifications is possible. */
5258 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
5259 enum machine_mode op0_mode, rtx op0, rtx op1,
5260 rtx op2)
5262 unsigned int width = GET_MODE_PRECISION (mode);
5263 bool any_change = false;
5264 rtx tem, trueop2;
5266 /* VOIDmode means "infinite" precision. */
5267 if (width == 0)
5268 width = HOST_BITS_PER_WIDE_INT;
5270 switch (code)
5272 case FMA:
5273 /* Simplify negations around the multiplication. */
5274 /* -a * -b + c => a * b + c. */
5275 if (GET_CODE (op0) == NEG)
5277 tem = simplify_unary_operation (NEG, mode, op1, mode);
5278 if (tem)
5279 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5281 else if (GET_CODE (op1) == NEG)
5283 tem = simplify_unary_operation (NEG, mode, op0, mode);
5284 if (tem)
5285 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5288 /* Canonicalize the two multiplication operands. */
5289 /* a * -b + c => -b * a + c. */
5290 if (swap_commutative_operands_p (op0, op1))
5291 tem = op0, op0 = op1, op1 = tem, any_change = true;
5293 if (any_change)
5294 return gen_rtx_FMA (mode, op0, op1, op2);
5295 return NULL_RTX;
5297 case SIGN_EXTRACT:
5298 case ZERO_EXTRACT:
5299 if (CONST_INT_P (op0)
5300 && CONST_INT_P (op1)
5301 && CONST_INT_P (op2)
5302 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5303 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5305 /* Extracting a bit-field from a constant */
5306 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5307 HOST_WIDE_INT op1val = INTVAL (op1);
5308 HOST_WIDE_INT op2val = INTVAL (op2);
5309 if (BITS_BIG_ENDIAN)
5310 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5311 else
5312 val >>= op2val;
5314 if (HOST_BITS_PER_WIDE_INT != op1val)
5316 /* First zero-extend. */
5317 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
5318 /* If desired, propagate sign bit. */
5319 if (code == SIGN_EXTRACT
5320 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
5321 != 0)
5322 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5325 return gen_int_mode (val, mode);
5327 break;
5329 case IF_THEN_ELSE:
5330 if (CONST_INT_P (op0))
5331 return op0 != const0_rtx ? op1 : op2;
5333 /* Convert c ? a : a into "a". */
5334 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5335 return op1;
5337 /* Convert a != b ? a : b into "a". */
5338 if (GET_CODE (op0) == NE
5339 && ! side_effects_p (op0)
5340 && ! HONOR_NANS (mode)
5341 && ! HONOR_SIGNED_ZEROS (mode)
5342 && ((rtx_equal_p (XEXP (op0, 0), op1)
5343 && rtx_equal_p (XEXP (op0, 1), op2))
5344 || (rtx_equal_p (XEXP (op0, 0), op2)
5345 && rtx_equal_p (XEXP (op0, 1), op1))))
5346 return op1;
5348 /* Convert a == b ? a : b into "b". */
5349 if (GET_CODE (op0) == EQ
5350 && ! side_effects_p (op0)
5351 && ! HONOR_NANS (mode)
5352 && ! HONOR_SIGNED_ZEROS (mode)
5353 && ((rtx_equal_p (XEXP (op0, 0), op1)
5354 && rtx_equal_p (XEXP (op0, 1), op2))
5355 || (rtx_equal_p (XEXP (op0, 0), op2)
5356 && rtx_equal_p (XEXP (op0, 1), op1))))
5357 return op2;
5359 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5361 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5362 ? GET_MODE (XEXP (op0, 1))
5363 : GET_MODE (XEXP (op0, 0)));
5364 rtx temp;
5366 /* Look for happy constants in op1 and op2. */
5367 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5369 HOST_WIDE_INT t = INTVAL (op1);
5370 HOST_WIDE_INT f = INTVAL (op2);
5372 if (t == STORE_FLAG_VALUE && f == 0)
5373 code = GET_CODE (op0);
5374 else if (t == 0 && f == STORE_FLAG_VALUE)
5376 enum rtx_code tmp;
5377 tmp = reversed_comparison_code (op0, NULL_RTX);
5378 if (tmp == UNKNOWN)
5379 break;
5380 code = tmp;
5382 else
5383 break;
5385 return simplify_gen_relational (code, mode, cmp_mode,
5386 XEXP (op0, 0), XEXP (op0, 1));
5389 if (cmp_mode == VOIDmode)
5390 cmp_mode = op0_mode;
5391 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5392 cmp_mode, XEXP (op0, 0),
5393 XEXP (op0, 1));
5395 /* See if any simplifications were possible. */
5396 if (temp)
5398 if (CONST_INT_P (temp))
5399 return temp == const0_rtx ? op2 : op1;
5400 else if (temp)
5401 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5404 break;
5406 case VEC_MERGE:
5407 gcc_assert (GET_MODE (op0) == mode);
5408 gcc_assert (GET_MODE (op1) == mode);
5409 gcc_assert (VECTOR_MODE_P (mode));
5410 trueop2 = avoid_constant_pool_reference (op2);
5411 if (CONST_INT_P (trueop2))
5413 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5414 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5415 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5416 unsigned HOST_WIDE_INT mask;
5417 if (n_elts == HOST_BITS_PER_WIDE_INT)
5418 mask = -1;
5419 else
5420 mask = ((unsigned HOST_WIDE_INT) 1 << n_elts) - 1;
5422 if (!(sel & mask) && !side_effects_p (op0))
5423 return op1;
5424 if ((sel & mask) == mask && !side_effects_p (op1))
5425 return op0;
5427 rtx trueop0 = avoid_constant_pool_reference (op0);
5428 rtx trueop1 = avoid_constant_pool_reference (op1);
5429 if (GET_CODE (trueop0) == CONST_VECTOR
5430 && GET_CODE (trueop1) == CONST_VECTOR)
5432 rtvec v = rtvec_alloc (n_elts);
5433 unsigned int i;
5435 for (i = 0; i < n_elts; i++)
5436 RTVEC_ELT (v, i) = ((sel & ((unsigned HOST_WIDE_INT) 1 << i))
5437 ? CONST_VECTOR_ELT (trueop0, i)
5438 : CONST_VECTOR_ELT (trueop1, i));
5439 return gen_rtx_CONST_VECTOR (mode, v);
5442 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5443 if no element from a appears in the result. */
5444 if (GET_CODE (op0) == VEC_MERGE)
5446 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5447 if (CONST_INT_P (tem))
5449 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5450 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5451 return simplify_gen_ternary (code, mode, mode,
5452 XEXP (op0, 1), op1, op2);
5453 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5454 return simplify_gen_ternary (code, mode, mode,
5455 XEXP (op0, 0), op1, op2);
5458 if (GET_CODE (op1) == VEC_MERGE)
5460 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5461 if (CONST_INT_P (tem))
5463 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5464 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5465 return simplify_gen_ternary (code, mode, mode,
5466 op0, XEXP (op1, 1), op2);
5467 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5468 return simplify_gen_ternary (code, mode, mode,
5469 op0, XEXP (op1, 0), op2);
5474 if (rtx_equal_p (op0, op1)
5475 && !side_effects_p (op2) && !side_effects_p (op1))
5476 return op0;
5478 break;
5480 default:
5481 gcc_unreachable ();
5484 return 0;
5487 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
5488 or CONST_VECTOR,
5489 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5491 Works by unpacking OP into a collection of 8-bit values
5492 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5493 and then repacking them again for OUTERMODE. */
5495 static rtx
5496 simplify_immed_subreg (enum machine_mode outermode, rtx op,
5497 enum machine_mode innermode, unsigned int byte)
5499 /* We support up to 512-bit values (for V8DFmode). */
5500 enum {
5501 max_bitsize = 512,
5502 value_bit = 8,
5503 value_mask = (1 << value_bit) - 1
5505 unsigned char value[max_bitsize / value_bit];
5506 int value_start;
5507 int i;
5508 int elem;
5510 int num_elem;
5511 rtx * elems;
5512 int elem_bitsize;
5513 rtx result_s;
5514 rtvec result_v = NULL;
5515 enum mode_class outer_class;
5516 enum machine_mode outer_submode;
5518 /* Some ports misuse CCmode. */
5519 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5520 return op;
5522 /* We have no way to represent a complex constant at the rtl level. */
5523 if (COMPLEX_MODE_P (outermode))
5524 return NULL_RTX;
5526 /* Unpack the value. */
5528 if (GET_CODE (op) == CONST_VECTOR)
5530 num_elem = CONST_VECTOR_NUNITS (op);
5531 elems = &CONST_VECTOR_ELT (op, 0);
5532 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5534 else
5536 num_elem = 1;
5537 elems = &op;
5538 elem_bitsize = max_bitsize;
5540 /* If this asserts, it is too complicated; reducing value_bit may help. */
5541 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5542 /* I don't know how to handle endianness of sub-units. */
5543 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5545 for (elem = 0; elem < num_elem; elem++)
5547 unsigned char * vp;
5548 rtx el = elems[elem];
5550 /* Vectors are kept in target memory order. (This is probably
5551 a mistake.) */
5553 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5554 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5555 / BITS_PER_UNIT);
5556 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5557 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5558 unsigned bytele = (subword_byte % UNITS_PER_WORD
5559 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5560 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5563 switch (GET_CODE (el))
5565 case CONST_INT:
5566 for (i = 0;
5567 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5568 i += value_bit)
5569 *vp++ = INTVAL (el) >> i;
5570 /* CONST_INTs are always logically sign-extended. */
5571 for (; i < elem_bitsize; i += value_bit)
5572 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5573 break;
5575 case CONST_DOUBLE:
5576 if (GET_MODE (el) == VOIDmode)
5578 unsigned char extend = 0;
5579 /* If this triggers, someone should have generated a
5580 CONST_INT instead. */
5581 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5583 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5584 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5585 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5587 *vp++
5588 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5589 i += value_bit;
5592 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5593 extend = -1;
5594 for (; i < elem_bitsize; i += value_bit)
5595 *vp++ = extend;
5597 else
5599 long tmp[max_bitsize / 32];
5600 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5602 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5603 gcc_assert (bitsize <= elem_bitsize);
5604 gcc_assert (bitsize % value_bit == 0);
5606 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5607 GET_MODE (el));
5609 /* real_to_target produces its result in words affected by
5610 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5611 and use WORDS_BIG_ENDIAN instead; see the documentation
5612 of SUBREG in rtl.texi. */
5613 for (i = 0; i < bitsize; i += value_bit)
5615 int ibase;
5616 if (WORDS_BIG_ENDIAN)
5617 ibase = bitsize - 1 - i;
5618 else
5619 ibase = i;
5620 *vp++ = tmp[ibase / 32] >> i % 32;
5623 /* It shouldn't matter what's done here, so fill it with
5624 zero. */
5625 for (; i < elem_bitsize; i += value_bit)
5626 *vp++ = 0;
5628 break;
5630 case CONST_FIXED:
5631 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5633 for (i = 0; i < elem_bitsize; i += value_bit)
5634 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5636 else
5638 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5639 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5640 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5641 i += value_bit)
5642 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5643 >> (i - HOST_BITS_PER_WIDE_INT);
5644 for (; i < elem_bitsize; i += value_bit)
5645 *vp++ = 0;
5647 break;
5649 default:
5650 gcc_unreachable ();
5654 /* Now, pick the right byte to start with. */
5655 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5656 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5657 will already have offset 0. */
5658 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5660 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5661 - byte);
5662 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5663 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5664 byte = (subword_byte % UNITS_PER_WORD
5665 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5668 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5669 so if it's become negative it will instead be very large.) */
5670 gcc_assert (byte < GET_MODE_SIZE (innermode));
5672 /* Convert from bytes to chunks of size value_bit. */
5673 value_start = byte * (BITS_PER_UNIT / value_bit);
5675 /* Re-pack the value. */
5677 if (VECTOR_MODE_P (outermode))
5679 num_elem = GET_MODE_NUNITS (outermode);
5680 result_v = rtvec_alloc (num_elem);
5681 elems = &RTVEC_ELT (result_v, 0);
5682 outer_submode = GET_MODE_INNER (outermode);
5684 else
5686 num_elem = 1;
5687 elems = &result_s;
5688 outer_submode = outermode;
5691 outer_class = GET_MODE_CLASS (outer_submode);
5692 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5694 gcc_assert (elem_bitsize % value_bit == 0);
5695 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5697 for (elem = 0; elem < num_elem; elem++)
5699 unsigned char *vp;
5701 /* Vectors are stored in target memory order. (This is probably
5702 a mistake.) */
5704 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5705 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5706 / BITS_PER_UNIT);
5707 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5708 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5709 unsigned bytele = (subword_byte % UNITS_PER_WORD
5710 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5711 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5714 switch (outer_class)
5716 case MODE_INT:
5717 case MODE_PARTIAL_INT:
5719 unsigned HOST_WIDE_INT hi = 0, lo = 0;
5721 for (i = 0;
5722 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5723 i += value_bit)
5724 lo |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5725 for (; i < elem_bitsize; i += value_bit)
5726 hi |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5727 << (i - HOST_BITS_PER_WIDE_INT);
5729 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5730 know why. */
5731 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5732 elems[elem] = gen_int_mode (lo, outer_submode);
5733 else if (elem_bitsize <= HOST_BITS_PER_DOUBLE_INT)
5734 elems[elem] = immed_double_const (lo, hi, outer_submode);
5735 else
5736 return NULL_RTX;
5738 break;
5740 case MODE_FLOAT:
5741 case MODE_DECIMAL_FLOAT:
5743 REAL_VALUE_TYPE r;
5744 long tmp[max_bitsize / 32];
5746 /* real_from_target wants its input in words affected by
5747 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5748 and use WORDS_BIG_ENDIAN instead; see the documentation
5749 of SUBREG in rtl.texi. */
5750 for (i = 0; i < max_bitsize / 32; i++)
5751 tmp[i] = 0;
5752 for (i = 0; i < elem_bitsize; i += value_bit)
5754 int ibase;
5755 if (WORDS_BIG_ENDIAN)
5756 ibase = elem_bitsize - 1 - i;
5757 else
5758 ibase = i;
5759 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5762 real_from_target (&r, tmp, outer_submode);
5763 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5765 break;
5767 case MODE_FRACT:
5768 case MODE_UFRACT:
5769 case MODE_ACCUM:
5770 case MODE_UACCUM:
5772 FIXED_VALUE_TYPE f;
5773 f.data.low = 0;
5774 f.data.high = 0;
5775 f.mode = outer_submode;
5777 for (i = 0;
5778 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5779 i += value_bit)
5780 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5781 for (; i < elem_bitsize; i += value_bit)
5782 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5783 << (i - HOST_BITS_PER_WIDE_INT));
5785 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5787 break;
5789 default:
5790 gcc_unreachable ();
5793 if (VECTOR_MODE_P (outermode))
5794 return gen_rtx_CONST_VECTOR (outermode, result_v);
5795 else
5796 return result_s;
5799 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5800 Return 0 if no simplifications are possible. */
5802 simplify_subreg (enum machine_mode outermode, rtx op,
5803 enum machine_mode innermode, unsigned int byte)
5805 /* Little bit of sanity checking. */
5806 gcc_assert (innermode != VOIDmode);
5807 gcc_assert (outermode != VOIDmode);
5808 gcc_assert (innermode != BLKmode);
5809 gcc_assert (outermode != BLKmode);
5811 gcc_assert (GET_MODE (op) == innermode
5812 || GET_MODE (op) == VOIDmode);
5814 if ((byte % GET_MODE_SIZE (outermode)) != 0)
5815 return NULL_RTX;
5817 if (byte >= GET_MODE_SIZE (innermode))
5818 return NULL_RTX;
5820 if (outermode == innermode && !byte)
5821 return op;
5823 if (CONST_SCALAR_INT_P (op)
5824 || CONST_DOUBLE_AS_FLOAT_P (op)
5825 || GET_CODE (op) == CONST_FIXED
5826 || GET_CODE (op) == CONST_VECTOR)
5827 return simplify_immed_subreg (outermode, op, innermode, byte);
5829 /* Changing mode twice with SUBREG => just change it once,
5830 or not at all if changing back op starting mode. */
5831 if (GET_CODE (op) == SUBREG)
5833 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5834 int final_offset = byte + SUBREG_BYTE (op);
5835 rtx newx;
5837 if (outermode == innermostmode
5838 && byte == 0 && SUBREG_BYTE (op) == 0)
5839 return SUBREG_REG (op);
5841 /* The SUBREG_BYTE represents offset, as if the value were stored
5842 in memory. Irritating exception is paradoxical subreg, where
5843 we define SUBREG_BYTE to be 0. On big endian machines, this
5844 value should be negative. For a moment, undo this exception. */
5845 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5847 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5848 if (WORDS_BIG_ENDIAN)
5849 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5850 if (BYTES_BIG_ENDIAN)
5851 final_offset += difference % UNITS_PER_WORD;
5853 if (SUBREG_BYTE (op) == 0
5854 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5856 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5857 if (WORDS_BIG_ENDIAN)
5858 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5859 if (BYTES_BIG_ENDIAN)
5860 final_offset += difference % UNITS_PER_WORD;
5863 /* See whether resulting subreg will be paradoxical. */
5864 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5866 /* In nonparadoxical subregs we can't handle negative offsets. */
5867 if (final_offset < 0)
5868 return NULL_RTX;
5869 /* Bail out in case resulting subreg would be incorrect. */
5870 if (final_offset % GET_MODE_SIZE (outermode)
5871 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5872 return NULL_RTX;
5874 else
5876 int offset = 0;
5877 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5879 /* In paradoxical subreg, see if we are still looking on lower part.
5880 If so, our SUBREG_BYTE will be 0. */
5881 if (WORDS_BIG_ENDIAN)
5882 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5883 if (BYTES_BIG_ENDIAN)
5884 offset += difference % UNITS_PER_WORD;
5885 if (offset == final_offset)
5886 final_offset = 0;
5887 else
5888 return NULL_RTX;
5891 /* Recurse for further possible simplifications. */
5892 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5893 final_offset);
5894 if (newx)
5895 return newx;
5896 if (validate_subreg (outermode, innermostmode,
5897 SUBREG_REG (op), final_offset))
5899 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5900 if (SUBREG_PROMOTED_VAR_P (op)
5901 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5902 && GET_MODE_CLASS (outermode) == MODE_INT
5903 && IN_RANGE (GET_MODE_SIZE (outermode),
5904 GET_MODE_SIZE (innermode),
5905 GET_MODE_SIZE (innermostmode))
5906 && subreg_lowpart_p (newx))
5908 SUBREG_PROMOTED_VAR_P (newx) = 1;
5909 SUBREG_PROMOTED_UNSIGNED_SET
5910 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5912 return newx;
5914 return NULL_RTX;
5917 /* SUBREG of a hard register => just change the register number
5918 and/or mode. If the hard register is not valid in that mode,
5919 suppress this simplification. If the hard register is the stack,
5920 frame, or argument pointer, leave this as a SUBREG. */
5922 if (REG_P (op) && HARD_REGISTER_P (op))
5924 unsigned int regno, final_regno;
5926 regno = REGNO (op);
5927 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5928 if (HARD_REGISTER_NUM_P (final_regno))
5930 rtx x;
5931 int final_offset = byte;
5933 /* Adjust offset for paradoxical subregs. */
5934 if (byte == 0
5935 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5937 int difference = (GET_MODE_SIZE (innermode)
5938 - GET_MODE_SIZE (outermode));
5939 if (WORDS_BIG_ENDIAN)
5940 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5941 if (BYTES_BIG_ENDIAN)
5942 final_offset += difference % UNITS_PER_WORD;
5945 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5947 /* Propagate original regno. We don't have any way to specify
5948 the offset inside original regno, so do so only for lowpart.
5949 The information is used only by alias analysis that can not
5950 grog partial register anyway. */
5952 if (subreg_lowpart_offset (outermode, innermode) == byte)
5953 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5954 return x;
5958 /* If we have a SUBREG of a register that we are replacing and we are
5959 replacing it with a MEM, make a new MEM and try replacing the
5960 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5961 or if we would be widening it. */
5963 if (MEM_P (op)
5964 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
5965 /* Allow splitting of volatile memory references in case we don't
5966 have instruction to move the whole thing. */
5967 && (! MEM_VOLATILE_P (op)
5968 || ! have_insn_for (SET, innermode))
5969 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5970 return adjust_address_nv (op, outermode, byte);
5972 /* Handle complex values represented as CONCAT
5973 of real and imaginary part. */
5974 if (GET_CODE (op) == CONCAT)
5976 unsigned int part_size, final_offset;
5977 rtx part, res;
5979 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5980 if (byte < part_size)
5982 part = XEXP (op, 0);
5983 final_offset = byte;
5985 else
5987 part = XEXP (op, 1);
5988 final_offset = byte - part_size;
5991 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5992 return NULL_RTX;
5994 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5995 if (res)
5996 return res;
5997 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5998 return gen_rtx_SUBREG (outermode, part, final_offset);
5999 return NULL_RTX;
6002 /* A SUBREG resulting from a zero extension may fold to zero if
6003 it extracts higher bits that the ZERO_EXTEND's source bits. */
6004 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
6006 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
6007 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
6008 return CONST0_RTX (outermode);
6011 if (SCALAR_INT_MODE_P (outermode)
6012 && SCALAR_INT_MODE_P (innermode)
6013 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
6014 && byte == subreg_lowpart_offset (outermode, innermode))
6016 rtx tem = simplify_truncation (outermode, op, innermode);
6017 if (tem)
6018 return tem;
6021 return NULL_RTX;
6024 /* Make a SUBREG operation or equivalent if it folds. */
6027 simplify_gen_subreg (enum machine_mode outermode, rtx op,
6028 enum machine_mode innermode, unsigned int byte)
6030 rtx newx;
6032 newx = simplify_subreg (outermode, op, innermode, byte);
6033 if (newx)
6034 return newx;
6036 if (GET_CODE (op) == SUBREG
6037 || GET_CODE (op) == CONCAT
6038 || GET_MODE (op) == VOIDmode)
6039 return NULL_RTX;
6041 if (validate_subreg (outermode, innermode, op, byte))
6042 return gen_rtx_SUBREG (outermode, op, byte);
6044 return NULL_RTX;
6047 /* Simplify X, an rtx expression.
6049 Return the simplified expression or NULL if no simplifications
6050 were possible.
6052 This is the preferred entry point into the simplification routines;
6053 however, we still allow passes to call the more specific routines.
6055 Right now GCC has three (yes, three) major bodies of RTL simplification
6056 code that need to be unified.
6058 1. fold_rtx in cse.c. This code uses various CSE specific
6059 information to aid in RTL simplification.
6061 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6062 it uses combine specific information to aid in RTL
6063 simplification.
6065 3. The routines in this file.
6068 Long term we want to only have one body of simplification code; to
6069 get to that state I recommend the following steps:
6071 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6072 which are not pass dependent state into these routines.
6074 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6075 use this routine whenever possible.
6077 3. Allow for pass dependent state to be provided to these
6078 routines and add simplifications based on the pass dependent
6079 state. Remove code from cse.c & combine.c that becomes
6080 redundant/dead.
6082 It will take time, but ultimately the compiler will be easier to
6083 maintain and improve. It's totally silly that when we add a
6084 simplification that it needs to be added to 4 places (3 for RTL
6085 simplification and 1 for tree simplification. */
6088 simplify_rtx (const_rtx x)
6090 const enum rtx_code code = GET_CODE (x);
6091 const enum machine_mode mode = GET_MODE (x);
6093 switch (GET_RTX_CLASS (code))
6095 case RTX_UNARY:
6096 return simplify_unary_operation (code, mode,
6097 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6098 case RTX_COMM_ARITH:
6099 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6100 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6102 /* Fall through.... */
6104 case RTX_BIN_ARITH:
6105 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6107 case RTX_TERNARY:
6108 case RTX_BITFIELD_OPS:
6109 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6110 XEXP (x, 0), XEXP (x, 1),
6111 XEXP (x, 2));
6113 case RTX_COMPARE:
6114 case RTX_COMM_COMPARE:
6115 return simplify_relational_operation (code, mode,
6116 ((GET_MODE (XEXP (x, 0))
6117 != VOIDmode)
6118 ? GET_MODE (XEXP (x, 0))
6119 : GET_MODE (XEXP (x, 1))),
6120 XEXP (x, 0),
6121 XEXP (x, 1));
6123 case RTX_EXTRA:
6124 if (code == SUBREG)
6125 return simplify_subreg (mode, SUBREG_REG (x),
6126 GET_MODE (SUBREG_REG (x)),
6127 SUBREG_BYTE (x));
6128 break;
6130 case RTX_OBJ:
6131 if (code == LO_SUM)
6133 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6134 if (GET_CODE (XEXP (x, 0)) == HIGH
6135 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6136 return XEXP (x, 1);
6138 break;
6140 default:
6141 break;
6143 return NULL;