2013-08-27 Marc Glisse <marc.glisse@inria.fr>
[official-gcc.git] / gcc / simplify-rtx.c
blob65a59062a4823829918ef39af79330512a5133fb
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2013 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "tm_p.h"
28 #include "regs.h"
29 #include "hard-reg-set.h"
30 #include "flags.h"
31 #include "insn-config.h"
32 #include "recog.h"
33 #include "function.h"
34 #include "expr.h"
35 #include "diagnostic-core.h"
36 #include "ggc.h"
37 #include "target.h"
39 /* Simplification and canonicalization of RTL. */
41 /* Much code operates on (low, high) pairs; the low value is an
42 unsigned wide int, the high value a signed wide int. We
43 occasionally need to sign extend from low to high as if low were a
44 signed wide int. */
45 #define HWI_SIGN_EXTEND(low) \
46 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
48 static rtx neg_const_int (enum machine_mode, const_rtx);
49 static bool plus_minus_operand_p (const_rtx);
50 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
51 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
52 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
53 unsigned int);
54 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
55 rtx, rtx);
56 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
57 enum machine_mode, rtx, rtx);
58 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
59 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
60 rtx, rtx, rtx, rtx);
62 /* Negate a CONST_INT rtx, truncating (because a conversion from a
63 maximally negative number can overflow). */
64 static rtx
65 neg_const_int (enum machine_mode mode, const_rtx i)
67 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
70 /* Test whether expression, X, is an immediate constant that represents
71 the most significant bit of machine mode MODE. */
73 bool
74 mode_signbit_p (enum machine_mode mode, const_rtx x)
76 unsigned HOST_WIDE_INT val;
77 unsigned int width;
79 if (GET_MODE_CLASS (mode) != MODE_INT)
80 return false;
82 width = GET_MODE_PRECISION (mode);
83 if (width == 0)
84 return false;
86 if (width <= HOST_BITS_PER_WIDE_INT
87 && CONST_INT_P (x))
88 val = INTVAL (x);
89 else if (width <= HOST_BITS_PER_DOUBLE_INT
90 && CONST_DOUBLE_AS_INT_P (x)
91 && CONST_DOUBLE_LOW (x) == 0)
93 val = CONST_DOUBLE_HIGH (x);
94 width -= HOST_BITS_PER_WIDE_INT;
96 else
97 /* FIXME: We don't yet have a representation for wider modes. */
98 return false;
100 if (width < HOST_BITS_PER_WIDE_INT)
101 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
102 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
105 /* Test whether VAL is equal to the most significant bit of mode MODE
106 (after masking with the mode mask of MODE). Returns false if the
107 precision of MODE is too large to handle. */
109 bool
110 val_signbit_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
112 unsigned int width;
114 if (GET_MODE_CLASS (mode) != MODE_INT)
115 return false;
117 width = GET_MODE_PRECISION (mode);
118 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
119 return false;
121 val &= GET_MODE_MASK (mode);
122 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
125 /* Test whether the most significant bit of mode MODE is set in VAL.
126 Returns false if the precision of MODE is too large to handle. */
127 bool
128 val_signbit_known_set_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
130 unsigned int width;
132 if (GET_MODE_CLASS (mode) != MODE_INT)
133 return false;
135 width = GET_MODE_PRECISION (mode);
136 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
137 return false;
139 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
140 return val != 0;
143 /* Test whether the most significant bit of mode MODE is clear in VAL.
144 Returns false if the precision of MODE is too large to handle. */
145 bool
146 val_signbit_known_clear_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
148 unsigned int width;
150 if (GET_MODE_CLASS (mode) != MODE_INT)
151 return false;
153 width = GET_MODE_PRECISION (mode);
154 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
155 return false;
157 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
158 return val == 0;
161 /* Make a binary operation by properly ordering the operands and
162 seeing if the expression folds. */
165 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
166 rtx op1)
168 rtx tem;
170 /* If this simplifies, do it. */
171 tem = simplify_binary_operation (code, mode, op0, op1);
172 if (tem)
173 return tem;
175 /* Put complex operands first and constants second if commutative. */
176 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
177 && swap_commutative_operands_p (op0, op1))
178 tem = op0, op0 = op1, op1 = tem;
180 return gen_rtx_fmt_ee (code, mode, op0, op1);
183 /* If X is a MEM referencing the constant pool, return the real value.
184 Otherwise return X. */
186 avoid_constant_pool_reference (rtx x)
188 rtx c, tmp, addr;
189 enum machine_mode cmode;
190 HOST_WIDE_INT offset = 0;
192 switch (GET_CODE (x))
194 case MEM:
195 break;
197 case FLOAT_EXTEND:
198 /* Handle float extensions of constant pool references. */
199 tmp = XEXP (x, 0);
200 c = avoid_constant_pool_reference (tmp);
201 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
203 REAL_VALUE_TYPE d;
205 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
206 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
208 return x;
210 default:
211 return x;
214 if (GET_MODE (x) == BLKmode)
215 return x;
217 addr = XEXP (x, 0);
219 /* Call target hook to avoid the effects of -fpic etc.... */
220 addr = targetm.delegitimize_address (addr);
222 /* Split the address into a base and integer offset. */
223 if (GET_CODE (addr) == CONST
224 && GET_CODE (XEXP (addr, 0)) == PLUS
225 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
227 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
228 addr = XEXP (XEXP (addr, 0), 0);
231 if (GET_CODE (addr) == LO_SUM)
232 addr = XEXP (addr, 1);
234 /* If this is a constant pool reference, we can turn it into its
235 constant and hope that simplifications happen. */
236 if (GET_CODE (addr) == SYMBOL_REF
237 && CONSTANT_POOL_ADDRESS_P (addr))
239 c = get_pool_constant (addr);
240 cmode = get_pool_mode (addr);
242 /* If we're accessing the constant in a different mode than it was
243 originally stored, attempt to fix that up via subreg simplifications.
244 If that fails we have no choice but to return the original memory. */
245 if ((offset != 0 || cmode != GET_MODE (x))
246 && offset >= 0 && offset < GET_MODE_SIZE (cmode))
248 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
249 if (tem && CONSTANT_P (tem))
250 return tem;
252 else
253 return c;
256 return x;
259 /* Simplify a MEM based on its attributes. This is the default
260 delegitimize_address target hook, and it's recommended that every
261 overrider call it. */
264 delegitimize_mem_from_attrs (rtx x)
266 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
267 use their base addresses as equivalent. */
268 if (MEM_P (x)
269 && MEM_EXPR (x)
270 && MEM_OFFSET_KNOWN_P (x))
272 tree decl = MEM_EXPR (x);
273 enum machine_mode mode = GET_MODE (x);
274 HOST_WIDE_INT offset = 0;
276 switch (TREE_CODE (decl))
278 default:
279 decl = NULL;
280 break;
282 case VAR_DECL:
283 break;
285 case ARRAY_REF:
286 case ARRAY_RANGE_REF:
287 case COMPONENT_REF:
288 case BIT_FIELD_REF:
289 case REALPART_EXPR:
290 case IMAGPART_EXPR:
291 case VIEW_CONVERT_EXPR:
293 HOST_WIDE_INT bitsize, bitpos;
294 tree toffset;
295 int unsignedp, volatilep = 0;
297 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
298 &mode, &unsignedp, &volatilep, false);
299 if (bitsize != GET_MODE_BITSIZE (mode)
300 || (bitpos % BITS_PER_UNIT)
301 || (toffset && !host_integerp (toffset, 0)))
302 decl = NULL;
303 else
305 offset += bitpos / BITS_PER_UNIT;
306 if (toffset)
307 offset += TREE_INT_CST_LOW (toffset);
309 break;
313 if (decl
314 && mode == GET_MODE (x)
315 && TREE_CODE (decl) == VAR_DECL
316 && (TREE_STATIC (decl)
317 || DECL_THREAD_LOCAL_P (decl))
318 && DECL_RTL_SET_P (decl)
319 && MEM_P (DECL_RTL (decl)))
321 rtx newx;
323 offset += MEM_OFFSET (x);
325 newx = DECL_RTL (decl);
327 if (MEM_P (newx))
329 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
331 /* Avoid creating a new MEM needlessly if we already had
332 the same address. We do if there's no OFFSET and the
333 old address X is identical to NEWX, or if X is of the
334 form (plus NEWX OFFSET), or the NEWX is of the form
335 (plus Y (const_int Z)) and X is that with the offset
336 added: (plus Y (const_int Z+OFFSET)). */
337 if (!((offset == 0
338 || (GET_CODE (o) == PLUS
339 && GET_CODE (XEXP (o, 1)) == CONST_INT
340 && (offset == INTVAL (XEXP (o, 1))
341 || (GET_CODE (n) == PLUS
342 && GET_CODE (XEXP (n, 1)) == CONST_INT
343 && (INTVAL (XEXP (n, 1)) + offset
344 == INTVAL (XEXP (o, 1)))
345 && (n = XEXP (n, 0))))
346 && (o = XEXP (o, 0))))
347 && rtx_equal_p (o, n)))
348 x = adjust_address_nv (newx, mode, offset);
350 else if (GET_MODE (x) == GET_MODE (newx)
351 && offset == 0)
352 x = newx;
356 return x;
359 /* Make a unary operation by first seeing if it folds and otherwise making
360 the specified operation. */
363 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
364 enum machine_mode op_mode)
366 rtx tem;
368 /* If this simplifies, use it. */
369 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
370 return tem;
372 return gen_rtx_fmt_e (code, mode, op);
375 /* Likewise for ternary operations. */
378 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
379 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
381 rtx tem;
383 /* If this simplifies, use it. */
384 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
385 op0, op1, op2)))
386 return tem;
388 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
391 /* Likewise, for relational operations.
392 CMP_MODE specifies mode comparison is done in. */
395 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
396 enum machine_mode cmp_mode, rtx op0, rtx op1)
398 rtx tem;
400 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
401 op0, op1)))
402 return tem;
404 return gen_rtx_fmt_ee (code, mode, op0, op1);
407 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
408 and simplify the result. If FN is non-NULL, call this callback on each
409 X, if it returns non-NULL, replace X with its return value and simplify the
410 result. */
413 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
414 rtx (*fn) (rtx, const_rtx, void *), void *data)
416 enum rtx_code code = GET_CODE (x);
417 enum machine_mode mode = GET_MODE (x);
418 enum machine_mode op_mode;
419 const char *fmt;
420 rtx op0, op1, op2, newx, op;
421 rtvec vec, newvec;
422 int i, j;
424 if (__builtin_expect (fn != NULL, 0))
426 newx = fn (x, old_rtx, data);
427 if (newx)
428 return newx;
430 else if (rtx_equal_p (x, old_rtx))
431 return copy_rtx ((rtx) data);
433 switch (GET_RTX_CLASS (code))
435 case RTX_UNARY:
436 op0 = XEXP (x, 0);
437 op_mode = GET_MODE (op0);
438 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
439 if (op0 == XEXP (x, 0))
440 return x;
441 return simplify_gen_unary (code, mode, op0, op_mode);
443 case RTX_BIN_ARITH:
444 case RTX_COMM_ARITH:
445 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
446 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
447 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
448 return x;
449 return simplify_gen_binary (code, mode, op0, op1);
451 case RTX_COMPARE:
452 case RTX_COMM_COMPARE:
453 op0 = XEXP (x, 0);
454 op1 = XEXP (x, 1);
455 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
456 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
457 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
458 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
459 return x;
460 return simplify_gen_relational (code, mode, op_mode, op0, op1);
462 case RTX_TERNARY:
463 case RTX_BITFIELD_OPS:
464 op0 = XEXP (x, 0);
465 op_mode = GET_MODE (op0);
466 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
467 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
468 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
469 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
470 return x;
471 if (op_mode == VOIDmode)
472 op_mode = GET_MODE (op0);
473 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
475 case RTX_EXTRA:
476 if (code == SUBREG)
478 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
479 if (op0 == SUBREG_REG (x))
480 return x;
481 op0 = simplify_gen_subreg (GET_MODE (x), op0,
482 GET_MODE (SUBREG_REG (x)),
483 SUBREG_BYTE (x));
484 return op0 ? op0 : x;
486 break;
488 case RTX_OBJ:
489 if (code == MEM)
491 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
492 if (op0 == XEXP (x, 0))
493 return x;
494 return replace_equiv_address_nv (x, op0);
496 else if (code == LO_SUM)
498 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
499 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
501 /* (lo_sum (high x) x) -> x */
502 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
503 return op1;
505 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
506 return x;
507 return gen_rtx_LO_SUM (mode, op0, op1);
509 break;
511 default:
512 break;
515 newx = x;
516 fmt = GET_RTX_FORMAT (code);
517 for (i = 0; fmt[i]; i++)
518 switch (fmt[i])
520 case 'E':
521 vec = XVEC (x, i);
522 newvec = XVEC (newx, i);
523 for (j = 0; j < GET_NUM_ELEM (vec); j++)
525 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
526 old_rtx, fn, data);
527 if (op != RTVEC_ELT (vec, j))
529 if (newvec == vec)
531 newvec = shallow_copy_rtvec (vec);
532 if (x == newx)
533 newx = shallow_copy_rtx (x);
534 XVEC (newx, i) = newvec;
536 RTVEC_ELT (newvec, j) = op;
539 break;
541 case 'e':
542 if (XEXP (x, i))
544 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
545 if (op != XEXP (x, i))
547 if (x == newx)
548 newx = shallow_copy_rtx (x);
549 XEXP (newx, i) = op;
552 break;
554 return newx;
557 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
558 resulting RTX. Return a new RTX which is as simplified as possible. */
561 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
563 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
566 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
567 Only handle cases where the truncated value is inherently an rvalue.
569 RTL provides two ways of truncating a value:
571 1. a lowpart subreg. This form is only a truncation when both
572 the outer and inner modes (here MODE and OP_MODE respectively)
573 are scalar integers, and only then when the subreg is used as
574 an rvalue.
576 It is only valid to form such truncating subregs if the
577 truncation requires no action by the target. The onus for
578 proving this is on the creator of the subreg -- e.g. the
579 caller to simplify_subreg or simplify_gen_subreg -- and typically
580 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
582 2. a TRUNCATE. This form handles both scalar and compound integers.
584 The first form is preferred where valid. However, the TRUNCATE
585 handling in simplify_unary_operation turns the second form into the
586 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
587 so it is generally safe to form rvalue truncations using:
589 simplify_gen_unary (TRUNCATE, ...)
591 and leave simplify_unary_operation to work out which representation
592 should be used.
594 Because of the proof requirements on (1), simplify_truncation must
595 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
596 regardless of whether the outer truncation came from a SUBREG or a
597 TRUNCATE. For example, if the caller has proven that an SImode
598 truncation of:
600 (and:DI X Y)
602 is a no-op and can be represented as a subreg, it does not follow
603 that SImode truncations of X and Y are also no-ops. On a target
604 like 64-bit MIPS that requires SImode values to be stored in
605 sign-extended form, an SImode truncation of:
607 (and:DI (reg:DI X) (const_int 63))
609 is trivially a no-op because only the lower 6 bits can be set.
610 However, X is still an arbitrary 64-bit number and so we cannot
611 assume that truncating it too is a no-op. */
613 static rtx
614 simplify_truncation (enum machine_mode mode, rtx op,
615 enum machine_mode op_mode)
617 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
618 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
619 gcc_assert (precision <= op_precision);
621 /* Optimize truncations of zero and sign extended values. */
622 if (GET_CODE (op) == ZERO_EXTEND
623 || GET_CODE (op) == SIGN_EXTEND)
625 /* There are three possibilities. If MODE is the same as the
626 origmode, we can omit both the extension and the subreg.
627 If MODE is not larger than the origmode, we can apply the
628 truncation without the extension. Finally, if the outermode
629 is larger than the origmode, we can just extend to the appropriate
630 mode. */
631 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
632 if (mode == origmode)
633 return XEXP (op, 0);
634 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
635 return simplify_gen_unary (TRUNCATE, mode,
636 XEXP (op, 0), origmode);
637 else
638 return simplify_gen_unary (GET_CODE (op), mode,
639 XEXP (op, 0), origmode);
642 /* Simplify (truncate:SI (op:DI (x:DI) (y:DI)))
643 to (op:SI (truncate:SI (x:DI)) (truncate:SI (x:DI))). */
644 if (GET_CODE (op) == PLUS
645 || GET_CODE (op) == MINUS
646 || GET_CODE (op) == MULT)
648 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
649 if (op0)
651 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
652 if (op1)
653 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
657 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
658 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
659 the outer subreg is effectively a truncation to the original mode. */
660 if ((GET_CODE (op) == LSHIFTRT
661 || GET_CODE (op) == ASHIFTRT)
662 /* Ensure that OP_MODE is at least twice as wide as MODE
663 to avoid the possibility that an outer LSHIFTRT shifts by more
664 than the sign extension's sign_bit_copies and introduces zeros
665 into the high bits of the result. */
666 && 2 * precision <= op_precision
667 && CONST_INT_P (XEXP (op, 1))
668 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
669 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
670 && UINTVAL (XEXP (op, 1)) < precision)
671 return simplify_gen_binary (ASHIFTRT, mode,
672 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
674 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
675 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
676 the outer subreg is effectively a truncation to the original mode. */
677 if ((GET_CODE (op) == LSHIFTRT
678 || GET_CODE (op) == ASHIFTRT)
679 && CONST_INT_P (XEXP (op, 1))
680 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
681 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
682 && UINTVAL (XEXP (op, 1)) < precision)
683 return simplify_gen_binary (LSHIFTRT, mode,
684 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
686 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
687 to (ashift:QI (x:QI) C), where C is a suitable small constant and
688 the outer subreg is effectively a truncation to the original mode. */
689 if (GET_CODE (op) == ASHIFT
690 && CONST_INT_P (XEXP (op, 1))
691 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
692 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
693 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
694 && UINTVAL (XEXP (op, 1)) < precision)
695 return simplify_gen_binary (ASHIFT, mode,
696 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
698 /* Recognize a word extraction from a multi-word subreg. */
699 if ((GET_CODE (op) == LSHIFTRT
700 || GET_CODE (op) == ASHIFTRT)
701 && SCALAR_INT_MODE_P (mode)
702 && SCALAR_INT_MODE_P (op_mode)
703 && precision >= BITS_PER_WORD
704 && 2 * precision <= op_precision
705 && CONST_INT_P (XEXP (op, 1))
706 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
707 && UINTVAL (XEXP (op, 1)) < op_precision)
709 int byte = subreg_lowpart_offset (mode, op_mode);
710 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
711 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
712 (WORDS_BIG_ENDIAN
713 ? byte - shifted_bytes
714 : byte + shifted_bytes));
717 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
718 and try replacing the TRUNCATE and shift with it. Don't do this
719 if the MEM has a mode-dependent address. */
720 if ((GET_CODE (op) == LSHIFTRT
721 || GET_CODE (op) == ASHIFTRT)
722 && SCALAR_INT_MODE_P (op_mode)
723 && MEM_P (XEXP (op, 0))
724 && CONST_INT_P (XEXP (op, 1))
725 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
726 && INTVAL (XEXP (op, 1)) > 0
727 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
728 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
729 MEM_ADDR_SPACE (XEXP (op, 0)))
730 && ! MEM_VOLATILE_P (XEXP (op, 0))
731 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
732 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
734 int byte = subreg_lowpart_offset (mode, op_mode);
735 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
736 return adjust_address_nv (XEXP (op, 0), mode,
737 (WORDS_BIG_ENDIAN
738 ? byte - shifted_bytes
739 : byte + shifted_bytes));
742 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
743 (OP:SI foo:SI) if OP is NEG or ABS. */
744 if ((GET_CODE (op) == ABS
745 || GET_CODE (op) == NEG)
746 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
747 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
748 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
749 return simplify_gen_unary (GET_CODE (op), mode,
750 XEXP (XEXP (op, 0), 0), mode);
752 /* (truncate:A (subreg:B (truncate:C X) 0)) is
753 (truncate:A X). */
754 if (GET_CODE (op) == SUBREG
755 && SCALAR_INT_MODE_P (mode)
756 && SCALAR_INT_MODE_P (op_mode)
757 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
758 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
759 && subreg_lowpart_p (op))
761 rtx inner = XEXP (SUBREG_REG (op), 0);
762 if (GET_MODE_PRECISION (mode)
763 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
764 return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
765 else
766 /* If subreg above is paradoxical and C is narrower
767 than A, return (subreg:A (truncate:C X) 0). */
768 return simplify_gen_subreg (mode, SUBREG_REG (op),
769 GET_MODE (SUBREG_REG (op)), 0);
772 /* (truncate:A (truncate:B X)) is (truncate:A X). */
773 if (GET_CODE (op) == TRUNCATE)
774 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
775 GET_MODE (XEXP (op, 0)));
777 return NULL_RTX;
780 /* Try to simplify a unary operation CODE whose output mode is to be
781 MODE with input operand OP whose mode was originally OP_MODE.
782 Return zero if no simplification can be made. */
784 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
785 rtx op, enum machine_mode op_mode)
787 rtx trueop, tem;
789 trueop = avoid_constant_pool_reference (op);
791 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
792 if (tem)
793 return tem;
795 return simplify_unary_operation_1 (code, mode, op);
798 /* Perform some simplifications we can do even if the operands
799 aren't constant. */
800 static rtx
801 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
803 enum rtx_code reversed;
804 rtx temp;
806 switch (code)
808 case NOT:
809 /* (not (not X)) == X. */
810 if (GET_CODE (op) == NOT)
811 return XEXP (op, 0);
813 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
814 comparison is all ones. */
815 if (COMPARISON_P (op)
816 && (mode == BImode || STORE_FLAG_VALUE == -1)
817 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
818 return simplify_gen_relational (reversed, mode, VOIDmode,
819 XEXP (op, 0), XEXP (op, 1));
821 /* (not (plus X -1)) can become (neg X). */
822 if (GET_CODE (op) == PLUS
823 && XEXP (op, 1) == constm1_rtx)
824 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
826 /* Similarly, (not (neg X)) is (plus X -1). */
827 if (GET_CODE (op) == NEG)
828 return plus_constant (mode, XEXP (op, 0), -1);
830 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
831 if (GET_CODE (op) == XOR
832 && CONST_INT_P (XEXP (op, 1))
833 && (temp = simplify_unary_operation (NOT, mode,
834 XEXP (op, 1), mode)) != 0)
835 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
837 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
838 if (GET_CODE (op) == PLUS
839 && CONST_INT_P (XEXP (op, 1))
840 && mode_signbit_p (mode, XEXP (op, 1))
841 && (temp = simplify_unary_operation (NOT, mode,
842 XEXP (op, 1), mode)) != 0)
843 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
846 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
847 operands other than 1, but that is not valid. We could do a
848 similar simplification for (not (lshiftrt C X)) where C is
849 just the sign bit, but this doesn't seem common enough to
850 bother with. */
851 if (GET_CODE (op) == ASHIFT
852 && XEXP (op, 0) == const1_rtx)
854 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
855 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
858 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
859 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
860 so we can perform the above simplification. */
861 if (STORE_FLAG_VALUE == -1
862 && GET_CODE (op) == ASHIFTRT
863 && GET_CODE (XEXP (op, 1))
864 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
865 return simplify_gen_relational (GE, mode, VOIDmode,
866 XEXP (op, 0), const0_rtx);
869 if (GET_CODE (op) == SUBREG
870 && subreg_lowpart_p (op)
871 && (GET_MODE_SIZE (GET_MODE (op))
872 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
873 && GET_CODE (SUBREG_REG (op)) == ASHIFT
874 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
876 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
877 rtx x;
879 x = gen_rtx_ROTATE (inner_mode,
880 simplify_gen_unary (NOT, inner_mode, const1_rtx,
881 inner_mode),
882 XEXP (SUBREG_REG (op), 1));
883 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
884 if (temp)
885 return temp;
888 /* Apply De Morgan's laws to reduce number of patterns for machines
889 with negating logical insns (and-not, nand, etc.). If result has
890 only one NOT, put it first, since that is how the patterns are
891 coded. */
892 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
894 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
895 enum machine_mode op_mode;
897 op_mode = GET_MODE (in1);
898 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
900 op_mode = GET_MODE (in2);
901 if (op_mode == VOIDmode)
902 op_mode = mode;
903 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
905 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
907 rtx tem = in2;
908 in2 = in1; in1 = tem;
911 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
912 mode, in1, in2);
915 /* (not (bswap x)) -> (bswap (not x)). */
916 if (GET_CODE (op) == BSWAP)
918 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
919 return simplify_gen_unary (BSWAP, mode, x, mode);
921 break;
923 case NEG:
924 /* (neg (neg X)) == X. */
925 if (GET_CODE (op) == NEG)
926 return XEXP (op, 0);
928 /* (neg (plus X 1)) can become (not X). */
929 if (GET_CODE (op) == PLUS
930 && XEXP (op, 1) == const1_rtx)
931 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
933 /* Similarly, (neg (not X)) is (plus X 1). */
934 if (GET_CODE (op) == NOT)
935 return plus_constant (mode, XEXP (op, 0), 1);
937 /* (neg (minus X Y)) can become (minus Y X). This transformation
938 isn't safe for modes with signed zeros, since if X and Y are
939 both +0, (minus Y X) is the same as (minus X Y). If the
940 rounding mode is towards +infinity (or -infinity) then the two
941 expressions will be rounded differently. */
942 if (GET_CODE (op) == MINUS
943 && !HONOR_SIGNED_ZEROS (mode)
944 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
945 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
947 if (GET_CODE (op) == PLUS
948 && !HONOR_SIGNED_ZEROS (mode)
949 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
951 /* (neg (plus A C)) is simplified to (minus -C A). */
952 if (CONST_SCALAR_INT_P (XEXP (op, 1))
953 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
955 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
956 if (temp)
957 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
960 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
961 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
962 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
965 /* (neg (mult A B)) becomes (mult A (neg B)).
966 This works even for floating-point values. */
967 if (GET_CODE (op) == MULT
968 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
970 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
971 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
974 /* NEG commutes with ASHIFT since it is multiplication. Only do
975 this if we can then eliminate the NEG (e.g., if the operand
976 is a constant). */
977 if (GET_CODE (op) == ASHIFT)
979 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
980 if (temp)
981 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
984 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
985 C is equal to the width of MODE minus 1. */
986 if (GET_CODE (op) == ASHIFTRT
987 && CONST_INT_P (XEXP (op, 1))
988 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
989 return simplify_gen_binary (LSHIFTRT, mode,
990 XEXP (op, 0), XEXP (op, 1));
992 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
993 C is equal to the width of MODE minus 1. */
994 if (GET_CODE (op) == LSHIFTRT
995 && CONST_INT_P (XEXP (op, 1))
996 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
997 return simplify_gen_binary (ASHIFTRT, mode,
998 XEXP (op, 0), XEXP (op, 1));
1000 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1001 if (GET_CODE (op) == XOR
1002 && XEXP (op, 1) == const1_rtx
1003 && nonzero_bits (XEXP (op, 0), mode) == 1)
1004 return plus_constant (mode, XEXP (op, 0), -1);
1006 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1007 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1008 if (GET_CODE (op) == LT
1009 && XEXP (op, 1) == const0_rtx
1010 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1012 enum machine_mode inner = GET_MODE (XEXP (op, 0));
1013 int isize = GET_MODE_PRECISION (inner);
1014 if (STORE_FLAG_VALUE == 1)
1016 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1017 GEN_INT (isize - 1));
1018 if (mode == inner)
1019 return temp;
1020 if (GET_MODE_PRECISION (mode) > isize)
1021 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1022 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1024 else if (STORE_FLAG_VALUE == -1)
1026 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1027 GEN_INT (isize - 1));
1028 if (mode == inner)
1029 return temp;
1030 if (GET_MODE_PRECISION (mode) > isize)
1031 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1032 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1035 break;
1037 case TRUNCATE:
1038 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1039 with the umulXi3_highpart patterns. */
1040 if (GET_CODE (op) == LSHIFTRT
1041 && GET_CODE (XEXP (op, 0)) == MULT)
1042 break;
1044 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1046 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1048 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1049 if (temp)
1050 return temp;
1052 /* We can't handle truncation to a partial integer mode here
1053 because we don't know the real bitsize of the partial
1054 integer mode. */
1055 break;
1058 if (GET_MODE (op) != VOIDmode)
1060 temp = simplify_truncation (mode, op, GET_MODE (op));
1061 if (temp)
1062 return temp;
1065 /* If we know that the value is already truncated, we can
1066 replace the TRUNCATE with a SUBREG. */
1067 if (GET_MODE_NUNITS (mode) == 1
1068 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1069 || truncated_to_mode (mode, op)))
1071 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1072 if (temp)
1073 return temp;
1076 /* A truncate of a comparison can be replaced with a subreg if
1077 STORE_FLAG_VALUE permits. This is like the previous test,
1078 but it works even if the comparison is done in a mode larger
1079 than HOST_BITS_PER_WIDE_INT. */
1080 if (HWI_COMPUTABLE_MODE_P (mode)
1081 && COMPARISON_P (op)
1082 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1084 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1085 if (temp)
1086 return temp;
1089 /* A truncate of a memory is just loading the low part of the memory
1090 if we are not changing the meaning of the address. */
1091 if (GET_CODE (op) == MEM
1092 && !VECTOR_MODE_P (mode)
1093 && !MEM_VOLATILE_P (op)
1094 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1096 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1097 if (temp)
1098 return temp;
1101 break;
1103 case FLOAT_TRUNCATE:
1104 if (DECIMAL_FLOAT_MODE_P (mode))
1105 break;
1107 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1108 if (GET_CODE (op) == FLOAT_EXTEND
1109 && GET_MODE (XEXP (op, 0)) == mode)
1110 return XEXP (op, 0);
1112 /* (float_truncate:SF (float_truncate:DF foo:XF))
1113 = (float_truncate:SF foo:XF).
1114 This may eliminate double rounding, so it is unsafe.
1116 (float_truncate:SF (float_extend:XF foo:DF))
1117 = (float_truncate:SF foo:DF).
1119 (float_truncate:DF (float_extend:XF foo:SF))
1120 = (float_extend:SF foo:DF). */
1121 if ((GET_CODE (op) == FLOAT_TRUNCATE
1122 && flag_unsafe_math_optimizations)
1123 || GET_CODE (op) == FLOAT_EXTEND)
1124 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1125 0)))
1126 > GET_MODE_SIZE (mode)
1127 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1128 mode,
1129 XEXP (op, 0), mode);
1131 /* (float_truncate (float x)) is (float x) */
1132 if (GET_CODE (op) == FLOAT
1133 && (flag_unsafe_math_optimizations
1134 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
1135 && ((unsigned)significand_size (GET_MODE (op))
1136 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1137 - num_sign_bit_copies (XEXP (op, 0),
1138 GET_MODE (XEXP (op, 0))))))))
1139 return simplify_gen_unary (FLOAT, mode,
1140 XEXP (op, 0),
1141 GET_MODE (XEXP (op, 0)));
1143 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1144 (OP:SF foo:SF) if OP is NEG or ABS. */
1145 if ((GET_CODE (op) == ABS
1146 || GET_CODE (op) == NEG)
1147 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1148 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1149 return simplify_gen_unary (GET_CODE (op), mode,
1150 XEXP (XEXP (op, 0), 0), mode);
1152 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1153 is (float_truncate:SF x). */
1154 if (GET_CODE (op) == SUBREG
1155 && subreg_lowpart_p (op)
1156 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1157 return SUBREG_REG (op);
1158 break;
1160 case FLOAT_EXTEND:
1161 if (DECIMAL_FLOAT_MODE_P (mode))
1162 break;
1164 /* (float_extend (float_extend x)) is (float_extend x)
1166 (float_extend (float x)) is (float x) assuming that double
1167 rounding can't happen.
1169 if (GET_CODE (op) == FLOAT_EXTEND
1170 || (GET_CODE (op) == FLOAT
1171 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1172 && ((unsigned)significand_size (GET_MODE (op))
1173 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1174 - num_sign_bit_copies (XEXP (op, 0),
1175 GET_MODE (XEXP (op, 0)))))))
1176 return simplify_gen_unary (GET_CODE (op), mode,
1177 XEXP (op, 0),
1178 GET_MODE (XEXP (op, 0)));
1180 break;
1182 case ABS:
1183 /* (abs (neg <foo>)) -> (abs <foo>) */
1184 if (GET_CODE (op) == NEG)
1185 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1186 GET_MODE (XEXP (op, 0)));
1188 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1189 do nothing. */
1190 if (GET_MODE (op) == VOIDmode)
1191 break;
1193 /* If operand is something known to be positive, ignore the ABS. */
1194 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1195 || val_signbit_known_clear_p (GET_MODE (op),
1196 nonzero_bits (op, GET_MODE (op))))
1197 return op;
1199 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1200 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1201 return gen_rtx_NEG (mode, op);
1203 break;
1205 case FFS:
1206 /* (ffs (*_extend <X>)) = (ffs <X>) */
1207 if (GET_CODE (op) == SIGN_EXTEND
1208 || GET_CODE (op) == ZERO_EXTEND)
1209 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1210 GET_MODE (XEXP (op, 0)));
1211 break;
1213 case POPCOUNT:
1214 switch (GET_CODE (op))
1216 case BSWAP:
1217 case ZERO_EXTEND:
1218 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1219 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1220 GET_MODE (XEXP (op, 0)));
1222 case ROTATE:
1223 case ROTATERT:
1224 /* Rotations don't affect popcount. */
1225 if (!side_effects_p (XEXP (op, 1)))
1226 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1227 GET_MODE (XEXP (op, 0)));
1228 break;
1230 default:
1231 break;
1233 break;
1235 case PARITY:
1236 switch (GET_CODE (op))
1238 case NOT:
1239 case BSWAP:
1240 case ZERO_EXTEND:
1241 case SIGN_EXTEND:
1242 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1243 GET_MODE (XEXP (op, 0)));
1245 case ROTATE:
1246 case ROTATERT:
1247 /* Rotations don't affect parity. */
1248 if (!side_effects_p (XEXP (op, 1)))
1249 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1250 GET_MODE (XEXP (op, 0)));
1251 break;
1253 default:
1254 break;
1256 break;
1258 case BSWAP:
1259 /* (bswap (bswap x)) -> x. */
1260 if (GET_CODE (op) == BSWAP)
1261 return XEXP (op, 0);
1262 break;
1264 case FLOAT:
1265 /* (float (sign_extend <X>)) = (float <X>). */
1266 if (GET_CODE (op) == SIGN_EXTEND)
1267 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1268 GET_MODE (XEXP (op, 0)));
1269 break;
1271 case SIGN_EXTEND:
1272 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1273 becomes just the MINUS if its mode is MODE. This allows
1274 folding switch statements on machines using casesi (such as
1275 the VAX). */
1276 if (GET_CODE (op) == TRUNCATE
1277 && GET_MODE (XEXP (op, 0)) == mode
1278 && GET_CODE (XEXP (op, 0)) == MINUS
1279 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1280 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1281 return XEXP (op, 0);
1283 /* Extending a widening multiplication should be canonicalized to
1284 a wider widening multiplication. */
1285 if (GET_CODE (op) == MULT)
1287 rtx lhs = XEXP (op, 0);
1288 rtx rhs = XEXP (op, 1);
1289 enum rtx_code lcode = GET_CODE (lhs);
1290 enum rtx_code rcode = GET_CODE (rhs);
1292 /* Widening multiplies usually extend both operands, but sometimes
1293 they use a shift to extract a portion of a register. */
1294 if ((lcode == SIGN_EXTEND
1295 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1296 && (rcode == SIGN_EXTEND
1297 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1299 enum machine_mode lmode = GET_MODE (lhs);
1300 enum machine_mode rmode = GET_MODE (rhs);
1301 int bits;
1303 if (lcode == ASHIFTRT)
1304 /* Number of bits not shifted off the end. */
1305 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1306 else /* lcode == SIGN_EXTEND */
1307 /* Size of inner mode. */
1308 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1310 if (rcode == ASHIFTRT)
1311 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1312 else /* rcode == SIGN_EXTEND */
1313 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1315 /* We can only widen multiplies if the result is mathematiclly
1316 equivalent. I.e. if overflow was impossible. */
1317 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1318 return simplify_gen_binary
1319 (MULT, mode,
1320 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1321 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1325 /* Check for a sign extension of a subreg of a promoted
1326 variable, where the promotion is sign-extended, and the
1327 target mode is the same as the variable's promotion. */
1328 if (GET_CODE (op) == SUBREG
1329 && SUBREG_PROMOTED_VAR_P (op)
1330 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1331 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1333 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1334 if (temp)
1335 return temp;
1338 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1339 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1340 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1342 gcc_assert (GET_MODE_BITSIZE (mode)
1343 > GET_MODE_BITSIZE (GET_MODE (op)));
1344 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1345 GET_MODE (XEXP (op, 0)));
1348 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1349 is (sign_extend:M (subreg:O <X>)) if there is mode with
1350 GET_MODE_BITSIZE (N) - I bits.
1351 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1352 is similarly (zero_extend:M (subreg:O <X>)). */
1353 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1354 && GET_CODE (XEXP (op, 0)) == ASHIFT
1355 && CONST_INT_P (XEXP (op, 1))
1356 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1357 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1359 enum machine_mode tmode
1360 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1361 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1362 gcc_assert (GET_MODE_BITSIZE (mode)
1363 > GET_MODE_BITSIZE (GET_MODE (op)));
1364 if (tmode != BLKmode)
1366 rtx inner =
1367 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1368 if (inner)
1369 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1370 ? SIGN_EXTEND : ZERO_EXTEND,
1371 mode, inner, tmode);
1375 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1376 /* As we do not know which address space the pointer is referring to,
1377 we can do this only if the target does not support different pointer
1378 or address modes depending on the address space. */
1379 if (target_default_pointer_address_modes_p ()
1380 && ! POINTERS_EXTEND_UNSIGNED
1381 && mode == Pmode && GET_MODE (op) == ptr_mode
1382 && (CONSTANT_P (op)
1383 || (GET_CODE (op) == SUBREG
1384 && REG_P (SUBREG_REG (op))
1385 && REG_POINTER (SUBREG_REG (op))
1386 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1387 return convert_memory_address (Pmode, op);
1388 #endif
1389 break;
1391 case ZERO_EXTEND:
1392 /* Check for a zero extension of a subreg of a promoted
1393 variable, where the promotion is zero-extended, and the
1394 target mode is the same as the variable's promotion. */
1395 if (GET_CODE (op) == SUBREG
1396 && SUBREG_PROMOTED_VAR_P (op)
1397 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1398 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1400 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1401 if (temp)
1402 return temp;
1405 /* Extending a widening multiplication should be canonicalized to
1406 a wider widening multiplication. */
1407 if (GET_CODE (op) == MULT)
1409 rtx lhs = XEXP (op, 0);
1410 rtx rhs = XEXP (op, 1);
1411 enum rtx_code lcode = GET_CODE (lhs);
1412 enum rtx_code rcode = GET_CODE (rhs);
1414 /* Widening multiplies usually extend both operands, but sometimes
1415 they use a shift to extract a portion of a register. */
1416 if ((lcode == ZERO_EXTEND
1417 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1418 && (rcode == ZERO_EXTEND
1419 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1421 enum machine_mode lmode = GET_MODE (lhs);
1422 enum machine_mode rmode = GET_MODE (rhs);
1423 int bits;
1425 if (lcode == LSHIFTRT)
1426 /* Number of bits not shifted off the end. */
1427 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1428 else /* lcode == ZERO_EXTEND */
1429 /* Size of inner mode. */
1430 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1432 if (rcode == LSHIFTRT)
1433 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1434 else /* rcode == ZERO_EXTEND */
1435 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1437 /* We can only widen multiplies if the result is mathematiclly
1438 equivalent. I.e. if overflow was impossible. */
1439 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1440 return simplify_gen_binary
1441 (MULT, mode,
1442 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1443 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1447 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1448 if (GET_CODE (op) == ZERO_EXTEND)
1449 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1450 GET_MODE (XEXP (op, 0)));
1452 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1453 is (zero_extend:M (subreg:O <X>)) if there is mode with
1454 GET_MODE_BITSIZE (N) - I bits. */
1455 if (GET_CODE (op) == LSHIFTRT
1456 && GET_CODE (XEXP (op, 0)) == ASHIFT
1457 && CONST_INT_P (XEXP (op, 1))
1458 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1459 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1461 enum machine_mode tmode
1462 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1463 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1464 if (tmode != BLKmode)
1466 rtx inner =
1467 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1468 if (inner)
1469 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1473 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1474 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1475 of mode N. E.g.
1476 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1477 (and:SI (reg:SI) (const_int 63)). */
1478 if (GET_CODE (op) == SUBREG
1479 && GET_MODE_PRECISION (GET_MODE (op))
1480 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1481 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1482 <= HOST_BITS_PER_WIDE_INT
1483 && GET_MODE_PRECISION (mode)
1484 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1485 && subreg_lowpart_p (op)
1486 && (nonzero_bits (SUBREG_REG (op), GET_MODE (SUBREG_REG (op)))
1487 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1489 if (GET_MODE_PRECISION (mode)
1490 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
1491 return SUBREG_REG (op);
1492 return simplify_gen_unary (ZERO_EXTEND, mode, SUBREG_REG (op),
1493 GET_MODE (SUBREG_REG (op)));
1496 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1497 /* As we do not know which address space the pointer is referring to,
1498 we can do this only if the target does not support different pointer
1499 or address modes depending on the address space. */
1500 if (target_default_pointer_address_modes_p ()
1501 && POINTERS_EXTEND_UNSIGNED > 0
1502 && mode == Pmode && GET_MODE (op) == ptr_mode
1503 && (CONSTANT_P (op)
1504 || (GET_CODE (op) == SUBREG
1505 && REG_P (SUBREG_REG (op))
1506 && REG_POINTER (SUBREG_REG (op))
1507 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1508 return convert_memory_address (Pmode, op);
1509 #endif
1510 break;
1512 default:
1513 break;
1516 return 0;
1519 /* Try to compute the value of a unary operation CODE whose output mode is to
1520 be MODE with input operand OP whose mode was originally OP_MODE.
1521 Return zero if the value cannot be computed. */
1523 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1524 rtx op, enum machine_mode op_mode)
1526 unsigned int width = GET_MODE_PRECISION (mode);
1527 unsigned int op_width = GET_MODE_PRECISION (op_mode);
1529 if (code == VEC_DUPLICATE)
1531 gcc_assert (VECTOR_MODE_P (mode));
1532 if (GET_MODE (op) != VOIDmode)
1534 if (!VECTOR_MODE_P (GET_MODE (op)))
1535 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1536 else
1537 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1538 (GET_MODE (op)));
1540 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1541 || GET_CODE (op) == CONST_VECTOR)
1543 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1544 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1545 rtvec v = rtvec_alloc (n_elts);
1546 unsigned int i;
1548 if (GET_CODE (op) != CONST_VECTOR)
1549 for (i = 0; i < n_elts; i++)
1550 RTVEC_ELT (v, i) = op;
1551 else
1553 enum machine_mode inmode = GET_MODE (op);
1554 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1555 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1557 gcc_assert (in_n_elts < n_elts);
1558 gcc_assert ((n_elts % in_n_elts) == 0);
1559 for (i = 0; i < n_elts; i++)
1560 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1562 return gen_rtx_CONST_VECTOR (mode, v);
1566 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1568 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1569 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1570 enum machine_mode opmode = GET_MODE (op);
1571 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1572 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1573 rtvec v = rtvec_alloc (n_elts);
1574 unsigned int i;
1576 gcc_assert (op_n_elts == n_elts);
1577 for (i = 0; i < n_elts; i++)
1579 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1580 CONST_VECTOR_ELT (op, i),
1581 GET_MODE_INNER (opmode));
1582 if (!x)
1583 return 0;
1584 RTVEC_ELT (v, i) = x;
1586 return gen_rtx_CONST_VECTOR (mode, v);
1589 /* The order of these tests is critical so that, for example, we don't
1590 check the wrong mode (input vs. output) for a conversion operation,
1591 such as FIX. At some point, this should be simplified. */
1593 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1595 HOST_WIDE_INT hv, lv;
1596 REAL_VALUE_TYPE d;
1598 if (CONST_INT_P (op))
1599 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1600 else
1601 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1603 REAL_VALUE_FROM_INT (d, lv, hv, mode);
1604 d = real_value_truncate (mode, d);
1605 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1607 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1609 HOST_WIDE_INT hv, lv;
1610 REAL_VALUE_TYPE d;
1612 if (CONST_INT_P (op))
1613 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1614 else
1615 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1617 if (op_mode == VOIDmode
1618 || GET_MODE_PRECISION (op_mode) > HOST_BITS_PER_DOUBLE_INT)
1619 /* We should never get a negative number. */
1620 gcc_assert (hv >= 0);
1621 else if (GET_MODE_PRECISION (op_mode) <= HOST_BITS_PER_WIDE_INT)
1622 hv = 0, lv &= GET_MODE_MASK (op_mode);
1624 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1625 d = real_value_truncate (mode, d);
1626 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1629 if (CONST_INT_P (op)
1630 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1632 HOST_WIDE_INT arg0 = INTVAL (op);
1633 HOST_WIDE_INT val;
1635 switch (code)
1637 case NOT:
1638 val = ~ arg0;
1639 break;
1641 case NEG:
1642 val = - arg0;
1643 break;
1645 case ABS:
1646 val = (arg0 >= 0 ? arg0 : - arg0);
1647 break;
1649 case FFS:
1650 arg0 &= GET_MODE_MASK (mode);
1651 val = ffs_hwi (arg0);
1652 break;
1654 case CLZ:
1655 arg0 &= GET_MODE_MASK (mode);
1656 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1658 else
1659 val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 1;
1660 break;
1662 case CLRSB:
1663 arg0 &= GET_MODE_MASK (mode);
1664 if (arg0 == 0)
1665 val = GET_MODE_PRECISION (mode) - 1;
1666 else if (arg0 >= 0)
1667 val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 2;
1668 else if (arg0 < 0)
1669 val = GET_MODE_PRECISION (mode) - floor_log2 (~arg0) - 2;
1670 break;
1672 case CTZ:
1673 arg0 &= GET_MODE_MASK (mode);
1674 if (arg0 == 0)
1676 /* Even if the value at zero is undefined, we have to come
1677 up with some replacement. Seems good enough. */
1678 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1679 val = GET_MODE_PRECISION (mode);
1681 else
1682 val = ctz_hwi (arg0);
1683 break;
1685 case POPCOUNT:
1686 arg0 &= GET_MODE_MASK (mode);
1687 val = 0;
1688 while (arg0)
1689 val++, arg0 &= arg0 - 1;
1690 break;
1692 case PARITY:
1693 arg0 &= GET_MODE_MASK (mode);
1694 val = 0;
1695 while (arg0)
1696 val++, arg0 &= arg0 - 1;
1697 val &= 1;
1698 break;
1700 case BSWAP:
1702 unsigned int s;
1704 val = 0;
1705 for (s = 0; s < width; s += 8)
1707 unsigned int d = width - s - 8;
1708 unsigned HOST_WIDE_INT byte;
1709 byte = (arg0 >> s) & 0xff;
1710 val |= byte << d;
1713 break;
1715 case TRUNCATE:
1716 val = arg0;
1717 break;
1719 case ZERO_EXTEND:
1720 /* When zero-extending a CONST_INT, we need to know its
1721 original mode. */
1722 gcc_assert (op_mode != VOIDmode);
1723 if (op_width == HOST_BITS_PER_WIDE_INT)
1725 /* If we were really extending the mode,
1726 we would have to distinguish between zero-extension
1727 and sign-extension. */
1728 gcc_assert (width == op_width);
1729 val = arg0;
1731 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1732 val = arg0 & GET_MODE_MASK (op_mode);
1733 else
1734 return 0;
1735 break;
1737 case SIGN_EXTEND:
1738 if (op_mode == VOIDmode)
1739 op_mode = mode;
1740 op_width = GET_MODE_PRECISION (op_mode);
1741 if (op_width == HOST_BITS_PER_WIDE_INT)
1743 /* If we were really extending the mode,
1744 we would have to distinguish between zero-extension
1745 and sign-extension. */
1746 gcc_assert (width == op_width);
1747 val = arg0;
1749 else if (op_width < HOST_BITS_PER_WIDE_INT)
1751 val = arg0 & GET_MODE_MASK (op_mode);
1752 if (val_signbit_known_set_p (op_mode, val))
1753 val |= ~GET_MODE_MASK (op_mode);
1755 else
1756 return 0;
1757 break;
1759 case SQRT:
1760 case FLOAT_EXTEND:
1761 case FLOAT_TRUNCATE:
1762 case SS_TRUNCATE:
1763 case US_TRUNCATE:
1764 case SS_NEG:
1765 case US_NEG:
1766 case SS_ABS:
1767 return 0;
1769 default:
1770 gcc_unreachable ();
1773 return gen_int_mode (val, mode);
1776 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1777 for a DImode operation on a CONST_INT. */
1778 else if (width <= HOST_BITS_PER_DOUBLE_INT
1779 && (CONST_DOUBLE_AS_INT_P (op) || CONST_INT_P (op)))
1781 double_int first, value;
1783 if (CONST_DOUBLE_AS_INT_P (op))
1784 first = double_int::from_pair (CONST_DOUBLE_HIGH (op),
1785 CONST_DOUBLE_LOW (op));
1786 else
1787 first = double_int::from_shwi (INTVAL (op));
1789 switch (code)
1791 case NOT:
1792 value = ~first;
1793 break;
1795 case NEG:
1796 value = -first;
1797 break;
1799 case ABS:
1800 if (first.is_negative ())
1801 value = -first;
1802 else
1803 value = first;
1804 break;
1806 case FFS:
1807 value.high = 0;
1808 if (first.low != 0)
1809 value.low = ffs_hwi (first.low);
1810 else if (first.high != 0)
1811 value.low = HOST_BITS_PER_WIDE_INT + ffs_hwi (first.high);
1812 else
1813 value.low = 0;
1814 break;
1816 case CLZ:
1817 value.high = 0;
1818 if (first.high != 0)
1819 value.low = GET_MODE_PRECISION (mode) - floor_log2 (first.high) - 1
1820 - HOST_BITS_PER_WIDE_INT;
1821 else if (first.low != 0)
1822 value.low = GET_MODE_PRECISION (mode) - floor_log2 (first.low) - 1;
1823 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, value.low))
1824 value.low = GET_MODE_PRECISION (mode);
1825 break;
1827 case CTZ:
1828 value.high = 0;
1829 if (first.low != 0)
1830 value.low = ctz_hwi (first.low);
1831 else if (first.high != 0)
1832 value.low = HOST_BITS_PER_WIDE_INT + ctz_hwi (first.high);
1833 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, value.low))
1834 value.low = GET_MODE_PRECISION (mode);
1835 break;
1837 case POPCOUNT:
1838 value = double_int_zero;
1839 while (first.low)
1841 value.low++;
1842 first.low &= first.low - 1;
1844 while (first.high)
1846 value.low++;
1847 first.high &= first.high - 1;
1849 break;
1851 case PARITY:
1852 value = double_int_zero;
1853 while (first.low)
1855 value.low++;
1856 first.low &= first.low - 1;
1858 while (first.high)
1860 value.low++;
1861 first.high &= first.high - 1;
1863 value.low &= 1;
1864 break;
1866 case BSWAP:
1868 unsigned int s;
1870 value = double_int_zero;
1871 for (s = 0; s < width; s += 8)
1873 unsigned int d = width - s - 8;
1874 unsigned HOST_WIDE_INT byte;
1876 if (s < HOST_BITS_PER_WIDE_INT)
1877 byte = (first.low >> s) & 0xff;
1878 else
1879 byte = (first.high >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1881 if (d < HOST_BITS_PER_WIDE_INT)
1882 value.low |= byte << d;
1883 else
1884 value.high |= byte << (d - HOST_BITS_PER_WIDE_INT);
1887 break;
1889 case TRUNCATE:
1890 /* This is just a change-of-mode, so do nothing. */
1891 value = first;
1892 break;
1894 case ZERO_EXTEND:
1895 gcc_assert (op_mode != VOIDmode);
1897 if (op_width > HOST_BITS_PER_WIDE_INT)
1898 return 0;
1900 value = double_int::from_uhwi (first.low & GET_MODE_MASK (op_mode));
1901 break;
1903 case SIGN_EXTEND:
1904 if (op_mode == VOIDmode
1905 || op_width > HOST_BITS_PER_WIDE_INT)
1906 return 0;
1907 else
1909 value.low = first.low & GET_MODE_MASK (op_mode);
1910 if (val_signbit_known_set_p (op_mode, value.low))
1911 value.low |= ~GET_MODE_MASK (op_mode);
1913 value.high = HWI_SIGN_EXTEND (value.low);
1915 break;
1917 case SQRT:
1918 return 0;
1920 default:
1921 return 0;
1924 return immed_double_int_const (value, mode);
1927 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1928 && SCALAR_FLOAT_MODE_P (mode)
1929 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1931 REAL_VALUE_TYPE d, t;
1932 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1934 switch (code)
1936 case SQRT:
1937 if (HONOR_SNANS (mode) && real_isnan (&d))
1938 return 0;
1939 real_sqrt (&t, mode, &d);
1940 d = t;
1941 break;
1942 case ABS:
1943 d = real_value_abs (&d);
1944 break;
1945 case NEG:
1946 d = real_value_negate (&d);
1947 break;
1948 case FLOAT_TRUNCATE:
1949 d = real_value_truncate (mode, d);
1950 break;
1951 case FLOAT_EXTEND:
1952 /* All this does is change the mode, unless changing
1953 mode class. */
1954 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1955 real_convert (&d, mode, &d);
1956 break;
1957 case FIX:
1958 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1959 break;
1960 case NOT:
1962 long tmp[4];
1963 int i;
1965 real_to_target (tmp, &d, GET_MODE (op));
1966 for (i = 0; i < 4; i++)
1967 tmp[i] = ~tmp[i];
1968 real_from_target (&d, tmp, mode);
1969 break;
1971 default:
1972 gcc_unreachable ();
1974 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1977 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1978 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1979 && GET_MODE_CLASS (mode) == MODE_INT
1980 && width <= HOST_BITS_PER_DOUBLE_INT && width > 0)
1982 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1983 operators are intentionally left unspecified (to ease implementation
1984 by target backends), for consistency, this routine implements the
1985 same semantics for constant folding as used by the middle-end. */
1987 /* This was formerly used only for non-IEEE float.
1988 eggert@twinsun.com says it is safe for IEEE also. */
1989 HOST_WIDE_INT xh, xl, th, tl;
1990 REAL_VALUE_TYPE x, t;
1991 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1992 switch (code)
1994 case FIX:
1995 if (REAL_VALUE_ISNAN (x))
1996 return const0_rtx;
1998 /* Test against the signed upper bound. */
1999 if (width > HOST_BITS_PER_WIDE_INT)
2001 th = ((unsigned HOST_WIDE_INT) 1
2002 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
2003 tl = -1;
2005 else
2007 th = 0;
2008 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
2010 real_from_integer (&t, VOIDmode, tl, th, 0);
2011 if (REAL_VALUES_LESS (t, x))
2013 xh = th;
2014 xl = tl;
2015 break;
2018 /* Test against the signed lower bound. */
2019 if (width > HOST_BITS_PER_WIDE_INT)
2021 th = HOST_WIDE_INT_M1U << (width - HOST_BITS_PER_WIDE_INT - 1);
2022 tl = 0;
2024 else
2026 th = -1;
2027 tl = HOST_WIDE_INT_M1U << (width - 1);
2029 real_from_integer (&t, VOIDmode, tl, th, 0);
2030 if (REAL_VALUES_LESS (x, t))
2032 xh = th;
2033 xl = tl;
2034 break;
2036 REAL_VALUE_TO_INT (&xl, &xh, x);
2037 break;
2039 case UNSIGNED_FIX:
2040 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
2041 return const0_rtx;
2043 /* Test against the unsigned upper bound. */
2044 if (width == HOST_BITS_PER_DOUBLE_INT)
2046 th = -1;
2047 tl = -1;
2049 else if (width >= HOST_BITS_PER_WIDE_INT)
2051 th = ((unsigned HOST_WIDE_INT) 1
2052 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
2053 tl = -1;
2055 else
2057 th = 0;
2058 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
2060 real_from_integer (&t, VOIDmode, tl, th, 1);
2061 if (REAL_VALUES_LESS (t, x))
2063 xh = th;
2064 xl = tl;
2065 break;
2068 REAL_VALUE_TO_INT (&xl, &xh, x);
2069 break;
2071 default:
2072 gcc_unreachable ();
2074 return immed_double_const (xl, xh, mode);
2077 return NULL_RTX;
2080 /* Subroutine of simplify_binary_operation to simplify a binary operation
2081 CODE that can commute with byte swapping, with result mode MODE and
2082 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2083 Return zero if no simplification or canonicalization is possible. */
2085 static rtx
2086 simplify_byte_swapping_operation (enum rtx_code code, enum machine_mode mode,
2087 rtx op0, rtx op1)
2089 rtx tem;
2091 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2092 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
2094 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
2095 simplify_gen_unary (BSWAP, mode, op1, mode));
2096 return simplify_gen_unary (BSWAP, mode, tem, mode);
2099 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2100 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
2102 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
2103 return simplify_gen_unary (BSWAP, mode, tem, mode);
2106 return NULL_RTX;
2109 /* Subroutine of simplify_binary_operation to simplify a commutative,
2110 associative binary operation CODE with result mode MODE, operating
2111 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2112 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2113 canonicalization is possible. */
2115 static rtx
2116 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
2117 rtx op0, rtx op1)
2119 rtx tem;
2121 /* Linearize the operator to the left. */
2122 if (GET_CODE (op1) == code)
2124 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2125 if (GET_CODE (op0) == code)
2127 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2128 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2131 /* "a op (b op c)" becomes "(b op c) op a". */
2132 if (! swap_commutative_operands_p (op1, op0))
2133 return simplify_gen_binary (code, mode, op1, op0);
2135 tem = op0;
2136 op0 = op1;
2137 op1 = tem;
2140 if (GET_CODE (op0) == code)
2142 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2143 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2145 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2146 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2149 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2150 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2151 if (tem != 0)
2152 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2154 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2155 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2156 if (tem != 0)
2157 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2160 return 0;
2164 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2165 and OP1. Return 0 if no simplification is possible.
2167 Don't use this for relational operations such as EQ or LT.
2168 Use simplify_relational_operation instead. */
2170 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
2171 rtx op0, rtx op1)
2173 rtx trueop0, trueop1;
2174 rtx tem;
2176 /* Relational operations don't work here. We must know the mode
2177 of the operands in order to do the comparison correctly.
2178 Assuming a full word can give incorrect results.
2179 Consider comparing 128 with -128 in QImode. */
2180 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2181 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2183 /* Make sure the constant is second. */
2184 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2185 && swap_commutative_operands_p (op0, op1))
2187 tem = op0, op0 = op1, op1 = tem;
2190 trueop0 = avoid_constant_pool_reference (op0);
2191 trueop1 = avoid_constant_pool_reference (op1);
2193 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2194 if (tem)
2195 return tem;
2196 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2199 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2200 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2201 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2202 actual constants. */
2204 static rtx
2205 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
2206 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2208 rtx tem, reversed, opleft, opright;
2209 HOST_WIDE_INT val;
2210 unsigned int width = GET_MODE_PRECISION (mode);
2212 /* Even if we can't compute a constant result,
2213 there are some cases worth simplifying. */
2215 switch (code)
2217 case PLUS:
2218 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2219 when x is NaN, infinite, or finite and nonzero. They aren't
2220 when x is -0 and the rounding mode is not towards -infinity,
2221 since (-0) + 0 is then 0. */
2222 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2223 return op0;
2225 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2226 transformations are safe even for IEEE. */
2227 if (GET_CODE (op0) == NEG)
2228 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2229 else if (GET_CODE (op1) == NEG)
2230 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2232 /* (~a) + 1 -> -a */
2233 if (INTEGRAL_MODE_P (mode)
2234 && GET_CODE (op0) == NOT
2235 && trueop1 == const1_rtx)
2236 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2238 /* Handle both-operands-constant cases. We can only add
2239 CONST_INTs to constants since the sum of relocatable symbols
2240 can't be handled by most assemblers. Don't add CONST_INT
2241 to CONST_INT since overflow won't be computed properly if wider
2242 than HOST_BITS_PER_WIDE_INT. */
2244 if ((GET_CODE (op0) == CONST
2245 || GET_CODE (op0) == SYMBOL_REF
2246 || GET_CODE (op0) == LABEL_REF)
2247 && CONST_INT_P (op1))
2248 return plus_constant (mode, op0, INTVAL (op1));
2249 else if ((GET_CODE (op1) == CONST
2250 || GET_CODE (op1) == SYMBOL_REF
2251 || GET_CODE (op1) == LABEL_REF)
2252 && CONST_INT_P (op0))
2253 return plus_constant (mode, op1, INTVAL (op0));
2255 /* See if this is something like X * C - X or vice versa or
2256 if the multiplication is written as a shift. If so, we can
2257 distribute and make a new multiply, shift, or maybe just
2258 have X (if C is 2 in the example above). But don't make
2259 something more expensive than we had before. */
2261 if (SCALAR_INT_MODE_P (mode))
2263 double_int coeff0, coeff1;
2264 rtx lhs = op0, rhs = op1;
2266 coeff0 = double_int_one;
2267 coeff1 = double_int_one;
2269 if (GET_CODE (lhs) == NEG)
2271 coeff0 = double_int_minus_one;
2272 lhs = XEXP (lhs, 0);
2274 else if (GET_CODE (lhs) == MULT
2275 && CONST_INT_P (XEXP (lhs, 1)))
2277 coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1)));
2278 lhs = XEXP (lhs, 0);
2280 else if (GET_CODE (lhs) == ASHIFT
2281 && CONST_INT_P (XEXP (lhs, 1))
2282 && INTVAL (XEXP (lhs, 1)) >= 0
2283 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2285 coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1)));
2286 lhs = XEXP (lhs, 0);
2289 if (GET_CODE (rhs) == NEG)
2291 coeff1 = double_int_minus_one;
2292 rhs = XEXP (rhs, 0);
2294 else if (GET_CODE (rhs) == MULT
2295 && CONST_INT_P (XEXP (rhs, 1)))
2297 coeff1 = double_int::from_shwi (INTVAL (XEXP (rhs, 1)));
2298 rhs = XEXP (rhs, 0);
2300 else if (GET_CODE (rhs) == ASHIFT
2301 && CONST_INT_P (XEXP (rhs, 1))
2302 && INTVAL (XEXP (rhs, 1)) >= 0
2303 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2305 coeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1)));
2306 rhs = XEXP (rhs, 0);
2309 if (rtx_equal_p (lhs, rhs))
2311 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2312 rtx coeff;
2313 double_int val;
2314 bool speed = optimize_function_for_speed_p (cfun);
2316 val = coeff0 + coeff1;
2317 coeff = immed_double_int_const (val, mode);
2319 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2320 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2321 ? tem : 0;
2325 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2326 if (CONST_SCALAR_INT_P (op1)
2327 && GET_CODE (op0) == XOR
2328 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2329 && mode_signbit_p (mode, op1))
2330 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2331 simplify_gen_binary (XOR, mode, op1,
2332 XEXP (op0, 1)));
2334 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2335 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2336 && GET_CODE (op0) == MULT
2337 && GET_CODE (XEXP (op0, 0)) == NEG)
2339 rtx in1, in2;
2341 in1 = XEXP (XEXP (op0, 0), 0);
2342 in2 = XEXP (op0, 1);
2343 return simplify_gen_binary (MINUS, mode, op1,
2344 simplify_gen_binary (MULT, mode,
2345 in1, in2));
2348 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2349 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2350 is 1. */
2351 if (COMPARISON_P (op0)
2352 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2353 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2354 && (reversed = reversed_comparison (op0, mode)))
2355 return
2356 simplify_gen_unary (NEG, mode, reversed, mode);
2358 /* If one of the operands is a PLUS or a MINUS, see if we can
2359 simplify this by the associative law.
2360 Don't use the associative law for floating point.
2361 The inaccuracy makes it nonassociative,
2362 and subtle programs can break if operations are associated. */
2364 if (INTEGRAL_MODE_P (mode)
2365 && (plus_minus_operand_p (op0)
2366 || plus_minus_operand_p (op1))
2367 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2368 return tem;
2370 /* Reassociate floating point addition only when the user
2371 specifies associative math operations. */
2372 if (FLOAT_MODE_P (mode)
2373 && flag_associative_math)
2375 tem = simplify_associative_operation (code, mode, op0, op1);
2376 if (tem)
2377 return tem;
2379 break;
2381 case COMPARE:
2382 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2383 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2384 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2385 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2387 rtx xop00 = XEXP (op0, 0);
2388 rtx xop10 = XEXP (op1, 0);
2390 #ifdef HAVE_cc0
2391 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2392 #else
2393 if (REG_P (xop00) && REG_P (xop10)
2394 && GET_MODE (xop00) == GET_MODE (xop10)
2395 && REGNO (xop00) == REGNO (xop10)
2396 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2397 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2398 #endif
2399 return xop00;
2401 break;
2403 case MINUS:
2404 /* We can't assume x-x is 0 even with non-IEEE floating point,
2405 but since it is zero except in very strange circumstances, we
2406 will treat it as zero with -ffinite-math-only. */
2407 if (rtx_equal_p (trueop0, trueop1)
2408 && ! side_effects_p (op0)
2409 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2410 return CONST0_RTX (mode);
2412 /* Change subtraction from zero into negation. (0 - x) is the
2413 same as -x when x is NaN, infinite, or finite and nonzero.
2414 But if the mode has signed zeros, and does not round towards
2415 -infinity, then 0 - 0 is 0, not -0. */
2416 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2417 return simplify_gen_unary (NEG, mode, op1, mode);
2419 /* (-1 - a) is ~a. */
2420 if (trueop0 == constm1_rtx)
2421 return simplify_gen_unary (NOT, mode, op1, mode);
2423 /* Subtracting 0 has no effect unless the mode has signed zeros
2424 and supports rounding towards -infinity. In such a case,
2425 0 - 0 is -0. */
2426 if (!(HONOR_SIGNED_ZEROS (mode)
2427 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2428 && trueop1 == CONST0_RTX (mode))
2429 return op0;
2431 /* See if this is something like X * C - X or vice versa or
2432 if the multiplication is written as a shift. If so, we can
2433 distribute and make a new multiply, shift, or maybe just
2434 have X (if C is 2 in the example above). But don't make
2435 something more expensive than we had before. */
2437 if (SCALAR_INT_MODE_P (mode))
2439 double_int coeff0, negcoeff1;
2440 rtx lhs = op0, rhs = op1;
2442 coeff0 = double_int_one;
2443 negcoeff1 = double_int_minus_one;
2445 if (GET_CODE (lhs) == NEG)
2447 coeff0 = double_int_minus_one;
2448 lhs = XEXP (lhs, 0);
2450 else if (GET_CODE (lhs) == MULT
2451 && CONST_INT_P (XEXP (lhs, 1)))
2453 coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1)));
2454 lhs = XEXP (lhs, 0);
2456 else if (GET_CODE (lhs) == ASHIFT
2457 && CONST_INT_P (XEXP (lhs, 1))
2458 && INTVAL (XEXP (lhs, 1)) >= 0
2459 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2461 coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1)));
2462 lhs = XEXP (lhs, 0);
2465 if (GET_CODE (rhs) == NEG)
2467 negcoeff1 = double_int_one;
2468 rhs = XEXP (rhs, 0);
2470 else if (GET_CODE (rhs) == MULT
2471 && CONST_INT_P (XEXP (rhs, 1)))
2473 negcoeff1 = double_int::from_shwi (-INTVAL (XEXP (rhs, 1)));
2474 rhs = XEXP (rhs, 0);
2476 else if (GET_CODE (rhs) == ASHIFT
2477 && CONST_INT_P (XEXP (rhs, 1))
2478 && INTVAL (XEXP (rhs, 1)) >= 0
2479 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2481 negcoeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1)));
2482 negcoeff1 = -negcoeff1;
2483 rhs = XEXP (rhs, 0);
2486 if (rtx_equal_p (lhs, rhs))
2488 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2489 rtx coeff;
2490 double_int val;
2491 bool speed = optimize_function_for_speed_p (cfun);
2493 val = coeff0 + negcoeff1;
2494 coeff = immed_double_int_const (val, mode);
2496 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2497 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2498 ? tem : 0;
2502 /* (a - (-b)) -> (a + b). True even for IEEE. */
2503 if (GET_CODE (op1) == NEG)
2504 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2506 /* (-x - c) may be simplified as (-c - x). */
2507 if (GET_CODE (op0) == NEG
2508 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2510 tem = simplify_unary_operation (NEG, mode, op1, mode);
2511 if (tem)
2512 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2515 /* Don't let a relocatable value get a negative coeff. */
2516 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2517 return simplify_gen_binary (PLUS, mode,
2518 op0,
2519 neg_const_int (mode, op1));
2521 /* (x - (x & y)) -> (x & ~y) */
2522 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2524 if (rtx_equal_p (op0, XEXP (op1, 0)))
2526 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2527 GET_MODE (XEXP (op1, 1)));
2528 return simplify_gen_binary (AND, mode, op0, tem);
2530 if (rtx_equal_p (op0, XEXP (op1, 1)))
2532 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2533 GET_MODE (XEXP (op1, 0)));
2534 return simplify_gen_binary (AND, mode, op0, tem);
2538 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2539 by reversing the comparison code if valid. */
2540 if (STORE_FLAG_VALUE == 1
2541 && trueop0 == const1_rtx
2542 && COMPARISON_P (op1)
2543 && (reversed = reversed_comparison (op1, mode)))
2544 return reversed;
2546 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2547 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2548 && GET_CODE (op1) == MULT
2549 && GET_CODE (XEXP (op1, 0)) == NEG)
2551 rtx in1, in2;
2553 in1 = XEXP (XEXP (op1, 0), 0);
2554 in2 = XEXP (op1, 1);
2555 return simplify_gen_binary (PLUS, mode,
2556 simplify_gen_binary (MULT, mode,
2557 in1, in2),
2558 op0);
2561 /* Canonicalize (minus (neg A) (mult B C)) to
2562 (minus (mult (neg B) C) A). */
2563 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2564 && GET_CODE (op1) == MULT
2565 && GET_CODE (op0) == NEG)
2567 rtx in1, in2;
2569 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2570 in2 = XEXP (op1, 1);
2571 return simplify_gen_binary (MINUS, mode,
2572 simplify_gen_binary (MULT, mode,
2573 in1, in2),
2574 XEXP (op0, 0));
2577 /* If one of the operands is a PLUS or a MINUS, see if we can
2578 simplify this by the associative law. This will, for example,
2579 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2580 Don't use the associative law for floating point.
2581 The inaccuracy makes it nonassociative,
2582 and subtle programs can break if operations are associated. */
2584 if (INTEGRAL_MODE_P (mode)
2585 && (plus_minus_operand_p (op0)
2586 || plus_minus_operand_p (op1))
2587 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2588 return tem;
2589 break;
2591 case MULT:
2592 if (trueop1 == constm1_rtx)
2593 return simplify_gen_unary (NEG, mode, op0, mode);
2595 if (GET_CODE (op0) == NEG)
2597 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2598 /* If op1 is a MULT as well and simplify_unary_operation
2599 just moved the NEG to the second operand, simplify_gen_binary
2600 below could through simplify_associative_operation move
2601 the NEG around again and recurse endlessly. */
2602 if (temp
2603 && GET_CODE (op1) == MULT
2604 && GET_CODE (temp) == MULT
2605 && XEXP (op1, 0) == XEXP (temp, 0)
2606 && GET_CODE (XEXP (temp, 1)) == NEG
2607 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2608 temp = NULL_RTX;
2609 if (temp)
2610 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2612 if (GET_CODE (op1) == NEG)
2614 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2615 /* If op0 is a MULT as well and simplify_unary_operation
2616 just moved the NEG to the second operand, simplify_gen_binary
2617 below could through simplify_associative_operation move
2618 the NEG around again and recurse endlessly. */
2619 if (temp
2620 && GET_CODE (op0) == MULT
2621 && GET_CODE (temp) == MULT
2622 && XEXP (op0, 0) == XEXP (temp, 0)
2623 && GET_CODE (XEXP (temp, 1)) == NEG
2624 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2625 temp = NULL_RTX;
2626 if (temp)
2627 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2630 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2631 x is NaN, since x * 0 is then also NaN. Nor is it valid
2632 when the mode has signed zeros, since multiplying a negative
2633 number by 0 will give -0, not 0. */
2634 if (!HONOR_NANS (mode)
2635 && !HONOR_SIGNED_ZEROS (mode)
2636 && trueop1 == CONST0_RTX (mode)
2637 && ! side_effects_p (op0))
2638 return op1;
2640 /* In IEEE floating point, x*1 is not equivalent to x for
2641 signalling NaNs. */
2642 if (!HONOR_SNANS (mode)
2643 && trueop1 == CONST1_RTX (mode))
2644 return op0;
2646 /* Convert multiply by constant power of two into shift unless
2647 we are still generating RTL. This test is a kludge. */
2648 if (CONST_INT_P (trueop1)
2649 && (val = exact_log2 (UINTVAL (trueop1))) >= 0
2650 /* If the mode is larger than the host word size, and the
2651 uppermost bit is set, then this isn't a power of two due
2652 to implicit sign extension. */
2653 && (width <= HOST_BITS_PER_WIDE_INT
2654 || val != HOST_BITS_PER_WIDE_INT - 1))
2655 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2657 /* Likewise for multipliers wider than a word. */
2658 if (CONST_DOUBLE_AS_INT_P (trueop1)
2659 && GET_MODE (op0) == mode
2660 && CONST_DOUBLE_LOW (trueop1) == 0
2661 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0
2662 && (val < HOST_BITS_PER_DOUBLE_INT - 1
2663 || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_DOUBLE_INT))
2664 return simplify_gen_binary (ASHIFT, mode, op0,
2665 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2667 /* x*2 is x+x and x*(-1) is -x */
2668 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2669 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2670 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2671 && GET_MODE (op0) == mode)
2673 REAL_VALUE_TYPE d;
2674 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2676 if (REAL_VALUES_EQUAL (d, dconst2))
2677 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2679 if (!HONOR_SNANS (mode)
2680 && REAL_VALUES_EQUAL (d, dconstm1))
2681 return simplify_gen_unary (NEG, mode, op0, mode);
2684 /* Optimize -x * -x as x * x. */
2685 if (FLOAT_MODE_P (mode)
2686 && GET_CODE (op0) == NEG
2687 && GET_CODE (op1) == NEG
2688 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2689 && !side_effects_p (XEXP (op0, 0)))
2690 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2692 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2693 if (SCALAR_FLOAT_MODE_P (mode)
2694 && GET_CODE (op0) == ABS
2695 && GET_CODE (op1) == ABS
2696 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2697 && !side_effects_p (XEXP (op0, 0)))
2698 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2700 /* Reassociate multiplication, but for floating point MULTs
2701 only when the user specifies unsafe math optimizations. */
2702 if (! FLOAT_MODE_P (mode)
2703 || flag_unsafe_math_optimizations)
2705 tem = simplify_associative_operation (code, mode, op0, op1);
2706 if (tem)
2707 return tem;
2709 break;
2711 case IOR:
2712 if (trueop1 == CONST0_RTX (mode))
2713 return op0;
2714 if (INTEGRAL_MODE_P (mode)
2715 && trueop1 == CONSTM1_RTX (mode)
2716 && !side_effects_p (op0))
2717 return op1;
2718 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2719 return op0;
2720 /* A | (~A) -> -1 */
2721 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2722 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2723 && ! side_effects_p (op0)
2724 && SCALAR_INT_MODE_P (mode))
2725 return constm1_rtx;
2727 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2728 if (CONST_INT_P (op1)
2729 && HWI_COMPUTABLE_MODE_P (mode)
2730 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2731 && !side_effects_p (op0))
2732 return op1;
2734 /* Canonicalize (X & C1) | C2. */
2735 if (GET_CODE (op0) == AND
2736 && CONST_INT_P (trueop1)
2737 && CONST_INT_P (XEXP (op0, 1)))
2739 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2740 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2741 HOST_WIDE_INT c2 = INTVAL (trueop1);
2743 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2744 if ((c1 & c2) == c1
2745 && !side_effects_p (XEXP (op0, 0)))
2746 return trueop1;
2748 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2749 if (((c1|c2) & mask) == mask)
2750 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2752 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2753 if (((c1 & ~c2) & mask) != (c1 & mask))
2755 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2756 gen_int_mode (c1 & ~c2, mode));
2757 return simplify_gen_binary (IOR, mode, tem, op1);
2761 /* Convert (A & B) | A to A. */
2762 if (GET_CODE (op0) == AND
2763 && (rtx_equal_p (XEXP (op0, 0), op1)
2764 || rtx_equal_p (XEXP (op0, 1), op1))
2765 && ! side_effects_p (XEXP (op0, 0))
2766 && ! side_effects_p (XEXP (op0, 1)))
2767 return op1;
2769 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2770 mode size to (rotate A CX). */
2772 if (GET_CODE (op1) == ASHIFT
2773 || GET_CODE (op1) == SUBREG)
2775 opleft = op1;
2776 opright = op0;
2778 else
2780 opright = op1;
2781 opleft = op0;
2784 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2785 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2786 && CONST_INT_P (XEXP (opleft, 1))
2787 && CONST_INT_P (XEXP (opright, 1))
2788 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2789 == GET_MODE_PRECISION (mode)))
2790 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2792 /* Same, but for ashift that has been "simplified" to a wider mode
2793 by simplify_shift_const. */
2795 if (GET_CODE (opleft) == SUBREG
2796 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2797 && GET_CODE (opright) == LSHIFTRT
2798 && GET_CODE (XEXP (opright, 0)) == SUBREG
2799 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2800 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2801 && (GET_MODE_SIZE (GET_MODE (opleft))
2802 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2803 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2804 SUBREG_REG (XEXP (opright, 0)))
2805 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2806 && CONST_INT_P (XEXP (opright, 1))
2807 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2808 == GET_MODE_PRECISION (mode)))
2809 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2810 XEXP (SUBREG_REG (opleft), 1));
2812 /* If we have (ior (and (X C1) C2)), simplify this by making
2813 C1 as small as possible if C1 actually changes. */
2814 if (CONST_INT_P (op1)
2815 && (HWI_COMPUTABLE_MODE_P (mode)
2816 || INTVAL (op1) > 0)
2817 && GET_CODE (op0) == AND
2818 && CONST_INT_P (XEXP (op0, 1))
2819 && CONST_INT_P (op1)
2820 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2821 return simplify_gen_binary (IOR, mode,
2822 simplify_gen_binary
2823 (AND, mode, XEXP (op0, 0),
2824 GEN_INT (UINTVAL (XEXP (op0, 1))
2825 & ~UINTVAL (op1))),
2826 op1);
2828 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2829 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2830 the PLUS does not affect any of the bits in OP1: then we can do
2831 the IOR as a PLUS and we can associate. This is valid if OP1
2832 can be safely shifted left C bits. */
2833 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2834 && GET_CODE (XEXP (op0, 0)) == PLUS
2835 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2836 && CONST_INT_P (XEXP (op0, 1))
2837 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2839 int count = INTVAL (XEXP (op0, 1));
2840 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2842 if (mask >> count == INTVAL (trueop1)
2843 && trunc_int_for_mode (mask, mode) == mask
2844 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2845 return simplify_gen_binary (ASHIFTRT, mode,
2846 plus_constant (mode, XEXP (op0, 0),
2847 mask),
2848 XEXP (op0, 1));
2851 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2852 if (tem)
2853 return tem;
2855 tem = simplify_associative_operation (code, mode, op0, op1);
2856 if (tem)
2857 return tem;
2858 break;
2860 case XOR:
2861 if (trueop1 == CONST0_RTX (mode))
2862 return op0;
2863 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2864 return simplify_gen_unary (NOT, mode, op0, mode);
2865 if (rtx_equal_p (trueop0, trueop1)
2866 && ! side_effects_p (op0)
2867 && GET_MODE_CLASS (mode) != MODE_CC)
2868 return CONST0_RTX (mode);
2870 /* Canonicalize XOR of the most significant bit to PLUS. */
2871 if (CONST_SCALAR_INT_P (op1)
2872 && mode_signbit_p (mode, op1))
2873 return simplify_gen_binary (PLUS, mode, op0, op1);
2874 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2875 if (CONST_SCALAR_INT_P (op1)
2876 && GET_CODE (op0) == PLUS
2877 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2878 && mode_signbit_p (mode, XEXP (op0, 1)))
2879 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2880 simplify_gen_binary (XOR, mode, op1,
2881 XEXP (op0, 1)));
2883 /* If we are XORing two things that have no bits in common,
2884 convert them into an IOR. This helps to detect rotation encoded
2885 using those methods and possibly other simplifications. */
2887 if (HWI_COMPUTABLE_MODE_P (mode)
2888 && (nonzero_bits (op0, mode)
2889 & nonzero_bits (op1, mode)) == 0)
2890 return (simplify_gen_binary (IOR, mode, op0, op1));
2892 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2893 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2894 (NOT y). */
2896 int num_negated = 0;
2898 if (GET_CODE (op0) == NOT)
2899 num_negated++, op0 = XEXP (op0, 0);
2900 if (GET_CODE (op1) == NOT)
2901 num_negated++, op1 = XEXP (op1, 0);
2903 if (num_negated == 2)
2904 return simplify_gen_binary (XOR, mode, op0, op1);
2905 else if (num_negated == 1)
2906 return simplify_gen_unary (NOT, mode,
2907 simplify_gen_binary (XOR, mode, op0, op1),
2908 mode);
2911 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2912 correspond to a machine insn or result in further simplifications
2913 if B is a constant. */
2915 if (GET_CODE (op0) == AND
2916 && rtx_equal_p (XEXP (op0, 1), op1)
2917 && ! side_effects_p (op1))
2918 return simplify_gen_binary (AND, mode,
2919 simplify_gen_unary (NOT, mode,
2920 XEXP (op0, 0), mode),
2921 op1);
2923 else if (GET_CODE (op0) == AND
2924 && rtx_equal_p (XEXP (op0, 0), op1)
2925 && ! side_effects_p (op1))
2926 return simplify_gen_binary (AND, mode,
2927 simplify_gen_unary (NOT, mode,
2928 XEXP (op0, 1), mode),
2929 op1);
2931 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2932 we can transform like this:
2933 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2934 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2935 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2936 Attempt a few simplifications when B and C are both constants. */
2937 if (GET_CODE (op0) == AND
2938 && CONST_INT_P (op1)
2939 && CONST_INT_P (XEXP (op0, 1)))
2941 rtx a = XEXP (op0, 0);
2942 rtx b = XEXP (op0, 1);
2943 rtx c = op1;
2944 HOST_WIDE_INT bval = INTVAL (b);
2945 HOST_WIDE_INT cval = INTVAL (c);
2947 rtx na_c
2948 = simplify_binary_operation (AND, mode,
2949 simplify_gen_unary (NOT, mode, a, mode),
2951 if ((~cval & bval) == 0)
2953 /* Try to simplify ~A&C | ~B&C. */
2954 if (na_c != NULL_RTX)
2955 return simplify_gen_binary (IOR, mode, na_c,
2956 GEN_INT (~bval & cval));
2958 else
2960 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2961 if (na_c == const0_rtx)
2963 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2964 GEN_INT (~cval & bval));
2965 return simplify_gen_binary (IOR, mode, a_nc_b,
2966 GEN_INT (~bval & cval));
2971 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2972 comparison if STORE_FLAG_VALUE is 1. */
2973 if (STORE_FLAG_VALUE == 1
2974 && trueop1 == const1_rtx
2975 && COMPARISON_P (op0)
2976 && (reversed = reversed_comparison (op0, mode)))
2977 return reversed;
2979 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2980 is (lt foo (const_int 0)), so we can perform the above
2981 simplification if STORE_FLAG_VALUE is 1. */
2983 if (STORE_FLAG_VALUE == 1
2984 && trueop1 == const1_rtx
2985 && GET_CODE (op0) == LSHIFTRT
2986 && CONST_INT_P (XEXP (op0, 1))
2987 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2988 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2990 /* (xor (comparison foo bar) (const_int sign-bit))
2991 when STORE_FLAG_VALUE is the sign bit. */
2992 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2993 && trueop1 == const_true_rtx
2994 && COMPARISON_P (op0)
2995 && (reversed = reversed_comparison (op0, mode)))
2996 return reversed;
2998 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2999 if (tem)
3000 return tem;
3002 tem = simplify_associative_operation (code, mode, op0, op1);
3003 if (tem)
3004 return tem;
3005 break;
3007 case AND:
3008 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3009 return trueop1;
3010 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
3011 return op0;
3012 if (HWI_COMPUTABLE_MODE_P (mode))
3014 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
3015 HOST_WIDE_INT nzop1;
3016 if (CONST_INT_P (trueop1))
3018 HOST_WIDE_INT val1 = INTVAL (trueop1);
3019 /* If we are turning off bits already known off in OP0, we need
3020 not do an AND. */
3021 if ((nzop0 & ~val1) == 0)
3022 return op0;
3024 nzop1 = nonzero_bits (trueop1, mode);
3025 /* If we are clearing all the nonzero bits, the result is zero. */
3026 if ((nzop1 & nzop0) == 0
3027 && !side_effects_p (op0) && !side_effects_p (op1))
3028 return CONST0_RTX (mode);
3030 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
3031 && GET_MODE_CLASS (mode) != MODE_CC)
3032 return op0;
3033 /* A & (~A) -> 0 */
3034 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3035 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3036 && ! side_effects_p (op0)
3037 && GET_MODE_CLASS (mode) != MODE_CC)
3038 return CONST0_RTX (mode);
3040 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3041 there are no nonzero bits of C outside of X's mode. */
3042 if ((GET_CODE (op0) == SIGN_EXTEND
3043 || GET_CODE (op0) == ZERO_EXTEND)
3044 && CONST_INT_P (trueop1)
3045 && HWI_COMPUTABLE_MODE_P (mode)
3046 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
3047 & UINTVAL (trueop1)) == 0)
3049 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
3050 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
3051 gen_int_mode (INTVAL (trueop1),
3052 imode));
3053 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
3056 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3057 we might be able to further simplify the AND with X and potentially
3058 remove the truncation altogether. */
3059 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
3061 rtx x = XEXP (op0, 0);
3062 enum machine_mode xmode = GET_MODE (x);
3063 tem = simplify_gen_binary (AND, xmode, x,
3064 gen_int_mode (INTVAL (trueop1), xmode));
3065 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
3068 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3069 if (GET_CODE (op0) == IOR
3070 && CONST_INT_P (trueop1)
3071 && CONST_INT_P (XEXP (op0, 1)))
3073 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
3074 return simplify_gen_binary (IOR, mode,
3075 simplify_gen_binary (AND, mode,
3076 XEXP (op0, 0), op1),
3077 gen_int_mode (tmp, mode));
3080 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3081 insn (and may simplify more). */
3082 if (GET_CODE (op0) == XOR
3083 && rtx_equal_p (XEXP (op0, 0), op1)
3084 && ! side_effects_p (op1))
3085 return simplify_gen_binary (AND, mode,
3086 simplify_gen_unary (NOT, mode,
3087 XEXP (op0, 1), mode),
3088 op1);
3090 if (GET_CODE (op0) == XOR
3091 && rtx_equal_p (XEXP (op0, 1), op1)
3092 && ! side_effects_p (op1))
3093 return simplify_gen_binary (AND, mode,
3094 simplify_gen_unary (NOT, mode,
3095 XEXP (op0, 0), mode),
3096 op1);
3098 /* Similarly for (~(A ^ B)) & A. */
3099 if (GET_CODE (op0) == NOT
3100 && GET_CODE (XEXP (op0, 0)) == XOR
3101 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3102 && ! side_effects_p (op1))
3103 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3105 if (GET_CODE (op0) == NOT
3106 && GET_CODE (XEXP (op0, 0)) == XOR
3107 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3108 && ! side_effects_p (op1))
3109 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3111 /* Convert (A | B) & A to A. */
3112 if (GET_CODE (op0) == IOR
3113 && (rtx_equal_p (XEXP (op0, 0), op1)
3114 || rtx_equal_p (XEXP (op0, 1), op1))
3115 && ! side_effects_p (XEXP (op0, 0))
3116 && ! side_effects_p (XEXP (op0, 1)))
3117 return op1;
3119 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3120 ((A & N) + B) & M -> (A + B) & M
3121 Similarly if (N & M) == 0,
3122 ((A | N) + B) & M -> (A + B) & M
3123 and for - instead of + and/or ^ instead of |.
3124 Also, if (N & M) == 0, then
3125 (A +- N) & M -> A & M. */
3126 if (CONST_INT_P (trueop1)
3127 && HWI_COMPUTABLE_MODE_P (mode)
3128 && ~UINTVAL (trueop1)
3129 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3130 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3132 rtx pmop[2];
3133 int which;
3135 pmop[0] = XEXP (op0, 0);
3136 pmop[1] = XEXP (op0, 1);
3138 if (CONST_INT_P (pmop[1])
3139 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3140 return simplify_gen_binary (AND, mode, pmop[0], op1);
3142 for (which = 0; which < 2; which++)
3144 tem = pmop[which];
3145 switch (GET_CODE (tem))
3147 case AND:
3148 if (CONST_INT_P (XEXP (tem, 1))
3149 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3150 == UINTVAL (trueop1))
3151 pmop[which] = XEXP (tem, 0);
3152 break;
3153 case IOR:
3154 case XOR:
3155 if (CONST_INT_P (XEXP (tem, 1))
3156 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3157 pmop[which] = XEXP (tem, 0);
3158 break;
3159 default:
3160 break;
3164 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3166 tem = simplify_gen_binary (GET_CODE (op0), mode,
3167 pmop[0], pmop[1]);
3168 return simplify_gen_binary (code, mode, tem, op1);
3172 /* (and X (ior (not X) Y) -> (and X Y) */
3173 if (GET_CODE (op1) == IOR
3174 && GET_CODE (XEXP (op1, 0)) == NOT
3175 && op0 == XEXP (XEXP (op1, 0), 0))
3176 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3178 /* (and (ior (not X) Y) X) -> (and X Y) */
3179 if (GET_CODE (op0) == IOR
3180 && GET_CODE (XEXP (op0, 0)) == NOT
3181 && op1 == XEXP (XEXP (op0, 0), 0))
3182 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3184 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3185 if (tem)
3186 return tem;
3188 tem = simplify_associative_operation (code, mode, op0, op1);
3189 if (tem)
3190 return tem;
3191 break;
3193 case UDIV:
3194 /* 0/x is 0 (or x&0 if x has side-effects). */
3195 if (trueop0 == CONST0_RTX (mode))
3197 if (side_effects_p (op1))
3198 return simplify_gen_binary (AND, mode, op1, trueop0);
3199 return trueop0;
3201 /* x/1 is x. */
3202 if (trueop1 == CONST1_RTX (mode))
3204 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3205 if (tem)
3206 return tem;
3208 /* Convert divide by power of two into shift. */
3209 if (CONST_INT_P (trueop1)
3210 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3211 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3212 break;
3214 case DIV:
3215 /* Handle floating point and integers separately. */
3216 if (SCALAR_FLOAT_MODE_P (mode))
3218 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3219 safe for modes with NaNs, since 0.0 / 0.0 will then be
3220 NaN rather than 0.0. Nor is it safe for modes with signed
3221 zeros, since dividing 0 by a negative number gives -0.0 */
3222 if (trueop0 == CONST0_RTX (mode)
3223 && !HONOR_NANS (mode)
3224 && !HONOR_SIGNED_ZEROS (mode)
3225 && ! side_effects_p (op1))
3226 return op0;
3227 /* x/1.0 is x. */
3228 if (trueop1 == CONST1_RTX (mode)
3229 && !HONOR_SNANS (mode))
3230 return op0;
3232 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3233 && trueop1 != CONST0_RTX (mode))
3235 REAL_VALUE_TYPE d;
3236 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
3238 /* x/-1.0 is -x. */
3239 if (REAL_VALUES_EQUAL (d, dconstm1)
3240 && !HONOR_SNANS (mode))
3241 return simplify_gen_unary (NEG, mode, op0, mode);
3243 /* Change FP division by a constant into multiplication.
3244 Only do this with -freciprocal-math. */
3245 if (flag_reciprocal_math
3246 && !REAL_VALUES_EQUAL (d, dconst0))
3248 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
3249 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3250 return simplify_gen_binary (MULT, mode, op0, tem);
3254 else if (SCALAR_INT_MODE_P (mode))
3256 /* 0/x is 0 (or x&0 if x has side-effects). */
3257 if (trueop0 == CONST0_RTX (mode)
3258 && !cfun->can_throw_non_call_exceptions)
3260 if (side_effects_p (op1))
3261 return simplify_gen_binary (AND, mode, op1, trueop0);
3262 return trueop0;
3264 /* x/1 is x. */
3265 if (trueop1 == CONST1_RTX (mode))
3267 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3268 if (tem)
3269 return tem;
3271 /* x/-1 is -x. */
3272 if (trueop1 == constm1_rtx)
3274 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3275 if (x)
3276 return simplify_gen_unary (NEG, mode, x, mode);
3279 break;
3281 case UMOD:
3282 /* 0%x is 0 (or x&0 if x has side-effects). */
3283 if (trueop0 == CONST0_RTX (mode))
3285 if (side_effects_p (op1))
3286 return simplify_gen_binary (AND, mode, op1, trueop0);
3287 return trueop0;
3289 /* x%1 is 0 (of x&0 if x has side-effects). */
3290 if (trueop1 == CONST1_RTX (mode))
3292 if (side_effects_p (op0))
3293 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3294 return CONST0_RTX (mode);
3296 /* Implement modulus by power of two as AND. */
3297 if (CONST_INT_P (trueop1)
3298 && exact_log2 (UINTVAL (trueop1)) > 0)
3299 return simplify_gen_binary (AND, mode, op0,
3300 GEN_INT (INTVAL (op1) - 1));
3301 break;
3303 case MOD:
3304 /* 0%x is 0 (or x&0 if x has side-effects). */
3305 if (trueop0 == CONST0_RTX (mode))
3307 if (side_effects_p (op1))
3308 return simplify_gen_binary (AND, mode, op1, trueop0);
3309 return trueop0;
3311 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3312 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3314 if (side_effects_p (op0))
3315 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3316 return CONST0_RTX (mode);
3318 break;
3320 case ROTATERT:
3321 case ROTATE:
3322 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3323 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3324 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3325 amount instead. */
3326 if (CONST_INT_P (trueop1)
3327 && IN_RANGE (INTVAL (trueop1),
3328 GET_MODE_BITSIZE (mode) / 2 + (code == ROTATE),
3329 GET_MODE_BITSIZE (mode) - 1))
3330 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3331 mode, op0, GEN_INT (GET_MODE_BITSIZE (mode)
3332 - INTVAL (trueop1)));
3333 /* FALLTHRU */
3334 case ASHIFTRT:
3335 if (trueop1 == CONST0_RTX (mode))
3336 return op0;
3337 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3338 return op0;
3339 /* Rotating ~0 always results in ~0. */
3340 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3341 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3342 && ! side_effects_p (op1))
3343 return op0;
3344 canonicalize_shift:
3345 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3347 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
3348 if (val != INTVAL (op1))
3349 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3351 break;
3353 case ASHIFT:
3354 case SS_ASHIFT:
3355 case US_ASHIFT:
3356 if (trueop1 == CONST0_RTX (mode))
3357 return op0;
3358 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3359 return op0;
3360 goto canonicalize_shift;
3362 case LSHIFTRT:
3363 if (trueop1 == CONST0_RTX (mode))
3364 return op0;
3365 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3366 return op0;
3367 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3368 if (GET_CODE (op0) == CLZ
3369 && CONST_INT_P (trueop1)
3370 && STORE_FLAG_VALUE == 1
3371 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3373 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
3374 unsigned HOST_WIDE_INT zero_val = 0;
3376 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3377 && zero_val == GET_MODE_PRECISION (imode)
3378 && INTVAL (trueop1) == exact_log2 (zero_val))
3379 return simplify_gen_relational (EQ, mode, imode,
3380 XEXP (op0, 0), const0_rtx);
3382 goto canonicalize_shift;
3384 case SMIN:
3385 if (width <= HOST_BITS_PER_WIDE_INT
3386 && mode_signbit_p (mode, trueop1)
3387 && ! side_effects_p (op0))
3388 return op1;
3389 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3390 return op0;
3391 tem = simplify_associative_operation (code, mode, op0, op1);
3392 if (tem)
3393 return tem;
3394 break;
3396 case SMAX:
3397 if (width <= HOST_BITS_PER_WIDE_INT
3398 && CONST_INT_P (trueop1)
3399 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3400 && ! side_effects_p (op0))
3401 return op1;
3402 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3403 return op0;
3404 tem = simplify_associative_operation (code, mode, op0, op1);
3405 if (tem)
3406 return tem;
3407 break;
3409 case UMIN:
3410 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3411 return op1;
3412 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3413 return op0;
3414 tem = simplify_associative_operation (code, mode, op0, op1);
3415 if (tem)
3416 return tem;
3417 break;
3419 case UMAX:
3420 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3421 return op1;
3422 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3423 return op0;
3424 tem = simplify_associative_operation (code, mode, op0, op1);
3425 if (tem)
3426 return tem;
3427 break;
3429 case SS_PLUS:
3430 case US_PLUS:
3431 case SS_MINUS:
3432 case US_MINUS:
3433 case SS_MULT:
3434 case US_MULT:
3435 case SS_DIV:
3436 case US_DIV:
3437 /* ??? There are simplifications that can be done. */
3438 return 0;
3440 case VEC_SELECT:
3441 if (!VECTOR_MODE_P (mode))
3443 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3444 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3445 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3446 gcc_assert (XVECLEN (trueop1, 0) == 1);
3447 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3449 if (GET_CODE (trueop0) == CONST_VECTOR)
3450 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3451 (trueop1, 0, 0)));
3453 /* Extract a scalar element from a nested VEC_SELECT expression
3454 (with optional nested VEC_CONCAT expression). Some targets
3455 (i386) extract scalar element from a vector using chain of
3456 nested VEC_SELECT expressions. When input operand is a memory
3457 operand, this operation can be simplified to a simple scalar
3458 load from an offseted memory address. */
3459 if (GET_CODE (trueop0) == VEC_SELECT)
3461 rtx op0 = XEXP (trueop0, 0);
3462 rtx op1 = XEXP (trueop0, 1);
3464 enum machine_mode opmode = GET_MODE (op0);
3465 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3466 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3468 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3469 int elem;
3471 rtvec vec;
3472 rtx tmp_op, tmp;
3474 gcc_assert (GET_CODE (op1) == PARALLEL);
3475 gcc_assert (i < n_elts);
3477 /* Select element, pointed by nested selector. */
3478 elem = INTVAL (XVECEXP (op1, 0, i));
3480 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3481 if (GET_CODE (op0) == VEC_CONCAT)
3483 rtx op00 = XEXP (op0, 0);
3484 rtx op01 = XEXP (op0, 1);
3486 enum machine_mode mode00, mode01;
3487 int n_elts00, n_elts01;
3489 mode00 = GET_MODE (op00);
3490 mode01 = GET_MODE (op01);
3492 /* Find out number of elements of each operand. */
3493 if (VECTOR_MODE_P (mode00))
3495 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3496 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3498 else
3499 n_elts00 = 1;
3501 if (VECTOR_MODE_P (mode01))
3503 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3504 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3506 else
3507 n_elts01 = 1;
3509 gcc_assert (n_elts == n_elts00 + n_elts01);
3511 /* Select correct operand of VEC_CONCAT
3512 and adjust selector. */
3513 if (elem < n_elts01)
3514 tmp_op = op00;
3515 else
3517 tmp_op = op01;
3518 elem -= n_elts00;
3521 else
3522 tmp_op = op0;
3524 vec = rtvec_alloc (1);
3525 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3527 tmp = gen_rtx_fmt_ee (code, mode,
3528 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3529 return tmp;
3531 if (GET_CODE (trueop0) == VEC_DUPLICATE
3532 && GET_MODE (XEXP (trueop0, 0)) == mode)
3533 return XEXP (trueop0, 0);
3535 else
3537 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3538 gcc_assert (GET_MODE_INNER (mode)
3539 == GET_MODE_INNER (GET_MODE (trueop0)));
3540 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3542 if (GET_CODE (trueop0) == CONST_VECTOR)
3544 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3545 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3546 rtvec v = rtvec_alloc (n_elts);
3547 unsigned int i;
3549 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3550 for (i = 0; i < n_elts; i++)
3552 rtx x = XVECEXP (trueop1, 0, i);
3554 gcc_assert (CONST_INT_P (x));
3555 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3556 INTVAL (x));
3559 return gen_rtx_CONST_VECTOR (mode, v);
3562 /* Recognize the identity. */
3563 if (GET_MODE (trueop0) == mode)
3565 bool maybe_ident = true;
3566 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3568 rtx j = XVECEXP (trueop1, 0, i);
3569 if (!CONST_INT_P (j) || INTVAL (j) != i)
3571 maybe_ident = false;
3572 break;
3575 if (maybe_ident)
3576 return trueop0;
3579 /* If we build {a,b} then permute it, build the result directly. */
3580 if (XVECLEN (trueop1, 0) == 2
3581 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3582 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3583 && GET_CODE (trueop0) == VEC_CONCAT
3584 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3585 && GET_MODE (XEXP (trueop0, 0)) == mode
3586 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3587 && GET_MODE (XEXP (trueop0, 1)) == mode)
3589 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3590 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3591 rtx subop0, subop1;
3593 gcc_assert (i0 < 4 && i1 < 4);
3594 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3595 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3597 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3600 if (XVECLEN (trueop1, 0) == 2
3601 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3602 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3603 && GET_CODE (trueop0) == VEC_CONCAT
3604 && GET_MODE (trueop0) == mode)
3606 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3607 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3608 rtx subop0, subop1;
3610 gcc_assert (i0 < 2 && i1 < 2);
3611 subop0 = XEXP (trueop0, i0);
3612 subop1 = XEXP (trueop0, i1);
3614 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3618 if (XVECLEN (trueop1, 0) == 1
3619 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3620 && GET_CODE (trueop0) == VEC_CONCAT)
3622 rtx vec = trueop0;
3623 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3625 /* Try to find the element in the VEC_CONCAT. */
3626 while (GET_MODE (vec) != mode
3627 && GET_CODE (vec) == VEC_CONCAT)
3629 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3630 if (offset < vec_size)
3631 vec = XEXP (vec, 0);
3632 else
3634 offset -= vec_size;
3635 vec = XEXP (vec, 1);
3637 vec = avoid_constant_pool_reference (vec);
3640 if (GET_MODE (vec) == mode)
3641 return vec;
3644 /* If we select elements in a vec_merge that all come from the same
3645 operand, select from that operand directly. */
3646 if (GET_CODE (op0) == VEC_MERGE)
3648 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3649 if (CONST_INT_P (trueop02))
3651 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3652 bool all_operand0 = true;
3653 bool all_operand1 = true;
3654 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3656 rtx j = XVECEXP (trueop1, 0, i);
3657 if (sel & (1 << UINTVAL (j)))
3658 all_operand1 = false;
3659 else
3660 all_operand0 = false;
3662 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3663 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3664 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3665 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3669 return 0;
3670 case VEC_CONCAT:
3672 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3673 ? GET_MODE (trueop0)
3674 : GET_MODE_INNER (mode));
3675 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3676 ? GET_MODE (trueop1)
3677 : GET_MODE_INNER (mode));
3679 gcc_assert (VECTOR_MODE_P (mode));
3680 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3681 == GET_MODE_SIZE (mode));
3683 if (VECTOR_MODE_P (op0_mode))
3684 gcc_assert (GET_MODE_INNER (mode)
3685 == GET_MODE_INNER (op0_mode));
3686 else
3687 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3689 if (VECTOR_MODE_P (op1_mode))
3690 gcc_assert (GET_MODE_INNER (mode)
3691 == GET_MODE_INNER (op1_mode));
3692 else
3693 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3695 if ((GET_CODE (trueop0) == CONST_VECTOR
3696 || CONST_SCALAR_INT_P (trueop0)
3697 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3698 && (GET_CODE (trueop1) == CONST_VECTOR
3699 || CONST_SCALAR_INT_P (trueop1)
3700 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3702 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3703 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3704 rtvec v = rtvec_alloc (n_elts);
3705 unsigned int i;
3706 unsigned in_n_elts = 1;
3708 if (VECTOR_MODE_P (op0_mode))
3709 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3710 for (i = 0; i < n_elts; i++)
3712 if (i < in_n_elts)
3714 if (!VECTOR_MODE_P (op0_mode))
3715 RTVEC_ELT (v, i) = trueop0;
3716 else
3717 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3719 else
3721 if (!VECTOR_MODE_P (op1_mode))
3722 RTVEC_ELT (v, i) = trueop1;
3723 else
3724 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3725 i - in_n_elts);
3729 return gen_rtx_CONST_VECTOR (mode, v);
3732 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3733 Restrict the transformation to avoid generating a VEC_SELECT with a
3734 mode unrelated to its operand. */
3735 if (GET_CODE (trueop0) == VEC_SELECT
3736 && GET_CODE (trueop1) == VEC_SELECT
3737 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3738 && GET_MODE (XEXP (trueop0, 0)) == mode)
3740 rtx par0 = XEXP (trueop0, 1);
3741 rtx par1 = XEXP (trueop1, 1);
3742 int len0 = XVECLEN (par0, 0);
3743 int len1 = XVECLEN (par1, 0);
3744 rtvec vec = rtvec_alloc (len0 + len1);
3745 for (int i = 0; i < len0; i++)
3746 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3747 for (int i = 0; i < len1; i++)
3748 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3749 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3750 gen_rtx_PARALLEL (VOIDmode, vec));
3753 return 0;
3755 default:
3756 gcc_unreachable ();
3759 return 0;
3763 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3764 rtx op0, rtx op1)
3766 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3767 HOST_WIDE_INT val;
3768 unsigned int width = GET_MODE_PRECISION (mode);
3770 if (VECTOR_MODE_P (mode)
3771 && code != VEC_CONCAT
3772 && GET_CODE (op0) == CONST_VECTOR
3773 && GET_CODE (op1) == CONST_VECTOR)
3775 unsigned n_elts = GET_MODE_NUNITS (mode);
3776 enum machine_mode op0mode = GET_MODE (op0);
3777 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3778 enum machine_mode op1mode = GET_MODE (op1);
3779 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3780 rtvec v = rtvec_alloc (n_elts);
3781 unsigned int i;
3783 gcc_assert (op0_n_elts == n_elts);
3784 gcc_assert (op1_n_elts == n_elts);
3785 for (i = 0; i < n_elts; i++)
3787 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3788 CONST_VECTOR_ELT (op0, i),
3789 CONST_VECTOR_ELT (op1, i));
3790 if (!x)
3791 return 0;
3792 RTVEC_ELT (v, i) = x;
3795 return gen_rtx_CONST_VECTOR (mode, v);
3798 if (VECTOR_MODE_P (mode)
3799 && code == VEC_CONCAT
3800 && (CONST_SCALAR_INT_P (op0)
3801 || GET_CODE (op0) == CONST_FIXED
3802 || CONST_DOUBLE_AS_FLOAT_P (op0))
3803 && (CONST_SCALAR_INT_P (op1)
3804 || CONST_DOUBLE_AS_FLOAT_P (op1)
3805 || GET_CODE (op1) == CONST_FIXED))
3807 unsigned n_elts = GET_MODE_NUNITS (mode);
3808 rtvec v = rtvec_alloc (n_elts);
3810 gcc_assert (n_elts >= 2);
3811 if (n_elts == 2)
3813 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3814 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3816 RTVEC_ELT (v, 0) = op0;
3817 RTVEC_ELT (v, 1) = op1;
3819 else
3821 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3822 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3823 unsigned i;
3825 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3826 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3827 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3829 for (i = 0; i < op0_n_elts; ++i)
3830 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3831 for (i = 0; i < op1_n_elts; ++i)
3832 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3835 return gen_rtx_CONST_VECTOR (mode, v);
3838 if (SCALAR_FLOAT_MODE_P (mode)
3839 && CONST_DOUBLE_AS_FLOAT_P (op0)
3840 && CONST_DOUBLE_AS_FLOAT_P (op1)
3841 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3843 if (code == AND
3844 || code == IOR
3845 || code == XOR)
3847 long tmp0[4];
3848 long tmp1[4];
3849 REAL_VALUE_TYPE r;
3850 int i;
3852 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3853 GET_MODE (op0));
3854 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3855 GET_MODE (op1));
3856 for (i = 0; i < 4; i++)
3858 switch (code)
3860 case AND:
3861 tmp0[i] &= tmp1[i];
3862 break;
3863 case IOR:
3864 tmp0[i] |= tmp1[i];
3865 break;
3866 case XOR:
3867 tmp0[i] ^= tmp1[i];
3868 break;
3869 default:
3870 gcc_unreachable ();
3873 real_from_target (&r, tmp0, mode);
3874 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3876 else
3878 REAL_VALUE_TYPE f0, f1, value, result;
3879 bool inexact;
3881 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3882 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3883 real_convert (&f0, mode, &f0);
3884 real_convert (&f1, mode, &f1);
3886 if (HONOR_SNANS (mode)
3887 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3888 return 0;
3890 if (code == DIV
3891 && REAL_VALUES_EQUAL (f1, dconst0)
3892 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3893 return 0;
3895 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3896 && flag_trapping_math
3897 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3899 int s0 = REAL_VALUE_NEGATIVE (f0);
3900 int s1 = REAL_VALUE_NEGATIVE (f1);
3902 switch (code)
3904 case PLUS:
3905 /* Inf + -Inf = NaN plus exception. */
3906 if (s0 != s1)
3907 return 0;
3908 break;
3909 case MINUS:
3910 /* Inf - Inf = NaN plus exception. */
3911 if (s0 == s1)
3912 return 0;
3913 break;
3914 case DIV:
3915 /* Inf / Inf = NaN plus exception. */
3916 return 0;
3917 default:
3918 break;
3922 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3923 && flag_trapping_math
3924 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3925 || (REAL_VALUE_ISINF (f1)
3926 && REAL_VALUES_EQUAL (f0, dconst0))))
3927 /* Inf * 0 = NaN plus exception. */
3928 return 0;
3930 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3931 &f0, &f1);
3932 real_convert (&result, mode, &value);
3934 /* Don't constant fold this floating point operation if
3935 the result has overflowed and flag_trapping_math. */
3937 if (flag_trapping_math
3938 && MODE_HAS_INFINITIES (mode)
3939 && REAL_VALUE_ISINF (result)
3940 && !REAL_VALUE_ISINF (f0)
3941 && !REAL_VALUE_ISINF (f1))
3942 /* Overflow plus exception. */
3943 return 0;
3945 /* Don't constant fold this floating point operation if the
3946 result may dependent upon the run-time rounding mode and
3947 flag_rounding_math is set, or if GCC's software emulation
3948 is unable to accurately represent the result. */
3950 if ((flag_rounding_math
3951 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3952 && (inexact || !real_identical (&result, &value)))
3953 return NULL_RTX;
3955 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3959 /* We can fold some multi-word operations. */
3960 if (GET_MODE_CLASS (mode) == MODE_INT
3961 && width == HOST_BITS_PER_DOUBLE_INT
3962 && (CONST_DOUBLE_AS_INT_P (op0) || CONST_INT_P (op0))
3963 && (CONST_DOUBLE_AS_INT_P (op1) || CONST_INT_P (op1)))
3965 double_int o0, o1, res, tmp;
3966 bool overflow;
3968 o0 = rtx_to_double_int (op0);
3969 o1 = rtx_to_double_int (op1);
3971 switch (code)
3973 case MINUS:
3974 /* A - B == A + (-B). */
3975 o1 = -o1;
3977 /* Fall through.... */
3979 case PLUS:
3980 res = o0 + o1;
3981 break;
3983 case MULT:
3984 res = o0 * o1;
3985 break;
3987 case DIV:
3988 res = o0.divmod_with_overflow (o1, false, TRUNC_DIV_EXPR,
3989 &tmp, &overflow);
3990 if (overflow)
3991 return 0;
3992 break;
3994 case MOD:
3995 tmp = o0.divmod_with_overflow (o1, false, TRUNC_DIV_EXPR,
3996 &res, &overflow);
3997 if (overflow)
3998 return 0;
3999 break;
4001 case UDIV:
4002 res = o0.divmod_with_overflow (o1, true, TRUNC_DIV_EXPR,
4003 &tmp, &overflow);
4004 if (overflow)
4005 return 0;
4006 break;
4008 case UMOD:
4009 tmp = o0.divmod_with_overflow (o1, true, TRUNC_DIV_EXPR,
4010 &res, &overflow);
4011 if (overflow)
4012 return 0;
4013 break;
4015 case AND:
4016 res = o0 & o1;
4017 break;
4019 case IOR:
4020 res = o0 | o1;
4021 break;
4023 case XOR:
4024 res = o0 ^ o1;
4025 break;
4027 case SMIN:
4028 res = o0.smin (o1);
4029 break;
4031 case SMAX:
4032 res = o0.smax (o1);
4033 break;
4035 case UMIN:
4036 res = o0.umin (o1);
4037 break;
4039 case UMAX:
4040 res = o0.umax (o1);
4041 break;
4043 case LSHIFTRT: case ASHIFTRT:
4044 case ASHIFT:
4045 case ROTATE: case ROTATERT:
4047 unsigned HOST_WIDE_INT cnt;
4049 if (SHIFT_COUNT_TRUNCATED)
4051 o1.high = 0;
4052 o1.low &= GET_MODE_PRECISION (mode) - 1;
4055 if (!o1.fits_uhwi ()
4056 || o1.to_uhwi () >= GET_MODE_PRECISION (mode))
4057 return 0;
4059 cnt = o1.to_uhwi ();
4060 unsigned short prec = GET_MODE_PRECISION (mode);
4062 if (code == LSHIFTRT || code == ASHIFTRT)
4063 res = o0.rshift (cnt, prec, code == ASHIFTRT);
4064 else if (code == ASHIFT)
4065 res = o0.alshift (cnt, prec);
4066 else if (code == ROTATE)
4067 res = o0.lrotate (cnt, prec);
4068 else /* code == ROTATERT */
4069 res = o0.rrotate (cnt, prec);
4071 break;
4073 default:
4074 return 0;
4077 return immed_double_int_const (res, mode);
4080 if (CONST_INT_P (op0) && CONST_INT_P (op1)
4081 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
4083 /* Get the integer argument values in two forms:
4084 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
4086 arg0 = INTVAL (op0);
4087 arg1 = INTVAL (op1);
4089 if (width < HOST_BITS_PER_WIDE_INT)
4091 arg0 &= GET_MODE_MASK (mode);
4092 arg1 &= GET_MODE_MASK (mode);
4094 arg0s = arg0;
4095 if (val_signbit_known_set_p (mode, arg0s))
4096 arg0s |= ~GET_MODE_MASK (mode);
4098 arg1s = arg1;
4099 if (val_signbit_known_set_p (mode, arg1s))
4100 arg1s |= ~GET_MODE_MASK (mode);
4102 else
4104 arg0s = arg0;
4105 arg1s = arg1;
4108 /* Compute the value of the arithmetic. */
4110 switch (code)
4112 case PLUS:
4113 val = arg0s + arg1s;
4114 break;
4116 case MINUS:
4117 val = arg0s - arg1s;
4118 break;
4120 case MULT:
4121 val = arg0s * arg1s;
4122 break;
4124 case DIV:
4125 if (arg1s == 0
4126 || ((unsigned HOST_WIDE_INT) arg0s
4127 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4128 && arg1s == -1))
4129 return 0;
4130 val = arg0s / arg1s;
4131 break;
4133 case MOD:
4134 if (arg1s == 0
4135 || ((unsigned HOST_WIDE_INT) arg0s
4136 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4137 && arg1s == -1))
4138 return 0;
4139 val = arg0s % arg1s;
4140 break;
4142 case UDIV:
4143 if (arg1 == 0
4144 || ((unsigned HOST_WIDE_INT) arg0s
4145 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4146 && arg1s == -1))
4147 return 0;
4148 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
4149 break;
4151 case UMOD:
4152 if (arg1 == 0
4153 || ((unsigned HOST_WIDE_INT) arg0s
4154 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4155 && arg1s == -1))
4156 return 0;
4157 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
4158 break;
4160 case AND:
4161 val = arg0 & arg1;
4162 break;
4164 case IOR:
4165 val = arg0 | arg1;
4166 break;
4168 case XOR:
4169 val = arg0 ^ arg1;
4170 break;
4172 case LSHIFTRT:
4173 case ASHIFT:
4174 case ASHIFTRT:
4175 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
4176 the value is in range. We can't return any old value for
4177 out-of-range arguments because either the middle-end (via
4178 shift_truncation_mask) or the back-end might be relying on
4179 target-specific knowledge. Nor can we rely on
4180 shift_truncation_mask, since the shift might not be part of an
4181 ashlM3, lshrM3 or ashrM3 instruction. */
4182 if (SHIFT_COUNT_TRUNCATED)
4183 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
4184 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
4185 return 0;
4187 val = (code == ASHIFT
4188 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
4189 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
4191 /* Sign-extend the result for arithmetic right shifts. */
4192 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
4193 val |= HOST_WIDE_INT_M1U << (width - arg1);
4194 break;
4196 case ROTATERT:
4197 if (arg1 < 0)
4198 return 0;
4200 arg1 %= width;
4201 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
4202 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
4203 break;
4205 case ROTATE:
4206 if (arg1 < 0)
4207 return 0;
4209 arg1 %= width;
4210 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
4211 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
4212 break;
4214 case COMPARE:
4215 /* Do nothing here. */
4216 return 0;
4218 case SMIN:
4219 val = arg0s <= arg1s ? arg0s : arg1s;
4220 break;
4222 case UMIN:
4223 val = ((unsigned HOST_WIDE_INT) arg0
4224 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4225 break;
4227 case SMAX:
4228 val = arg0s > arg1s ? arg0s : arg1s;
4229 break;
4231 case UMAX:
4232 val = ((unsigned HOST_WIDE_INT) arg0
4233 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4234 break;
4236 case SS_PLUS:
4237 case US_PLUS:
4238 case SS_MINUS:
4239 case US_MINUS:
4240 case SS_MULT:
4241 case US_MULT:
4242 case SS_DIV:
4243 case US_DIV:
4244 case SS_ASHIFT:
4245 case US_ASHIFT:
4246 /* ??? There are simplifications that can be done. */
4247 return 0;
4249 default:
4250 gcc_unreachable ();
4253 return gen_int_mode (val, mode);
4256 return NULL_RTX;
4261 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
4262 PLUS or MINUS.
4264 Rather than test for specific case, we do this by a brute-force method
4265 and do all possible simplifications until no more changes occur. Then
4266 we rebuild the operation. */
4268 struct simplify_plus_minus_op_data
4270 rtx op;
4271 short neg;
4274 static bool
4275 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4277 int result;
4279 result = (commutative_operand_precedence (y)
4280 - commutative_operand_precedence (x));
4281 if (result)
4282 return result > 0;
4284 /* Group together equal REGs to do more simplification. */
4285 if (REG_P (x) && REG_P (y))
4286 return REGNO (x) > REGNO (y);
4287 else
4288 return false;
4291 static rtx
4292 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
4293 rtx op1)
4295 struct simplify_plus_minus_op_data ops[8];
4296 rtx result, tem;
4297 int n_ops = 2, input_ops = 2;
4298 int changed, n_constants = 0, canonicalized = 0;
4299 int i, j;
4301 memset (ops, 0, sizeof ops);
4303 /* Set up the two operands and then expand them until nothing has been
4304 changed. If we run out of room in our array, give up; this should
4305 almost never happen. */
4307 ops[0].op = op0;
4308 ops[0].neg = 0;
4309 ops[1].op = op1;
4310 ops[1].neg = (code == MINUS);
4314 changed = 0;
4316 for (i = 0; i < n_ops; i++)
4318 rtx this_op = ops[i].op;
4319 int this_neg = ops[i].neg;
4320 enum rtx_code this_code = GET_CODE (this_op);
4322 switch (this_code)
4324 case PLUS:
4325 case MINUS:
4326 if (n_ops == 7)
4327 return NULL_RTX;
4329 ops[n_ops].op = XEXP (this_op, 1);
4330 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4331 n_ops++;
4333 ops[i].op = XEXP (this_op, 0);
4334 input_ops++;
4335 changed = 1;
4336 canonicalized |= this_neg;
4337 break;
4339 case NEG:
4340 ops[i].op = XEXP (this_op, 0);
4341 ops[i].neg = ! this_neg;
4342 changed = 1;
4343 canonicalized = 1;
4344 break;
4346 case CONST:
4347 if (n_ops < 7
4348 && GET_CODE (XEXP (this_op, 0)) == PLUS
4349 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4350 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4352 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4353 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4354 ops[n_ops].neg = this_neg;
4355 n_ops++;
4356 changed = 1;
4357 canonicalized = 1;
4359 break;
4361 case NOT:
4362 /* ~a -> (-a - 1) */
4363 if (n_ops != 7)
4365 ops[n_ops].op = CONSTM1_RTX (mode);
4366 ops[n_ops++].neg = this_neg;
4367 ops[i].op = XEXP (this_op, 0);
4368 ops[i].neg = !this_neg;
4369 changed = 1;
4370 canonicalized = 1;
4372 break;
4374 case CONST_INT:
4375 n_constants++;
4376 if (this_neg)
4378 ops[i].op = neg_const_int (mode, this_op);
4379 ops[i].neg = 0;
4380 changed = 1;
4381 canonicalized = 1;
4383 break;
4385 default:
4386 break;
4390 while (changed);
4392 if (n_constants > 1)
4393 canonicalized = 1;
4395 gcc_assert (n_ops >= 2);
4397 /* If we only have two operands, we can avoid the loops. */
4398 if (n_ops == 2)
4400 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4401 rtx lhs, rhs;
4403 /* Get the two operands. Be careful with the order, especially for
4404 the cases where code == MINUS. */
4405 if (ops[0].neg && ops[1].neg)
4407 lhs = gen_rtx_NEG (mode, ops[0].op);
4408 rhs = ops[1].op;
4410 else if (ops[0].neg)
4412 lhs = ops[1].op;
4413 rhs = ops[0].op;
4415 else
4417 lhs = ops[0].op;
4418 rhs = ops[1].op;
4421 return simplify_const_binary_operation (code, mode, lhs, rhs);
4424 /* Now simplify each pair of operands until nothing changes. */
4427 /* Insertion sort is good enough for an eight-element array. */
4428 for (i = 1; i < n_ops; i++)
4430 struct simplify_plus_minus_op_data save;
4431 j = i - 1;
4432 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
4433 continue;
4435 canonicalized = 1;
4436 save = ops[i];
4438 ops[j + 1] = ops[j];
4439 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
4440 ops[j + 1] = save;
4443 changed = 0;
4444 for (i = n_ops - 1; i > 0; i--)
4445 for (j = i - 1; j >= 0; j--)
4447 rtx lhs = ops[j].op, rhs = ops[i].op;
4448 int lneg = ops[j].neg, rneg = ops[i].neg;
4450 if (lhs != 0 && rhs != 0)
4452 enum rtx_code ncode = PLUS;
4454 if (lneg != rneg)
4456 ncode = MINUS;
4457 if (lneg)
4458 tem = lhs, lhs = rhs, rhs = tem;
4460 else if (swap_commutative_operands_p (lhs, rhs))
4461 tem = lhs, lhs = rhs, rhs = tem;
4463 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4464 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4466 rtx tem_lhs, tem_rhs;
4468 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4469 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4470 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
4472 if (tem && !CONSTANT_P (tem))
4473 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4475 else
4476 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4478 /* Reject "simplifications" that just wrap the two
4479 arguments in a CONST. Failure to do so can result
4480 in infinite recursion with simplify_binary_operation
4481 when it calls us to simplify CONST operations. */
4482 if (tem
4483 && ! (GET_CODE (tem) == CONST
4484 && GET_CODE (XEXP (tem, 0)) == ncode
4485 && XEXP (XEXP (tem, 0), 0) == lhs
4486 && XEXP (XEXP (tem, 0), 1) == rhs))
4488 lneg &= rneg;
4489 if (GET_CODE (tem) == NEG)
4490 tem = XEXP (tem, 0), lneg = !lneg;
4491 if (CONST_INT_P (tem) && lneg)
4492 tem = neg_const_int (mode, tem), lneg = 0;
4494 ops[i].op = tem;
4495 ops[i].neg = lneg;
4496 ops[j].op = NULL_RTX;
4497 changed = 1;
4498 canonicalized = 1;
4503 /* If nothing changed, fail. */
4504 if (!canonicalized)
4505 return NULL_RTX;
4507 /* Pack all the operands to the lower-numbered entries. */
4508 for (i = 0, j = 0; j < n_ops; j++)
4509 if (ops[j].op)
4511 ops[i] = ops[j];
4512 i++;
4514 n_ops = i;
4516 while (changed);
4518 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4519 if (n_ops == 2
4520 && CONST_INT_P (ops[1].op)
4521 && CONSTANT_P (ops[0].op)
4522 && ops[0].neg)
4523 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4525 /* We suppressed creation of trivial CONST expressions in the
4526 combination loop to avoid recursion. Create one manually now.
4527 The combination loop should have ensured that there is exactly
4528 one CONST_INT, and the sort will have ensured that it is last
4529 in the array and that any other constant will be next-to-last. */
4531 if (n_ops > 1
4532 && CONST_INT_P (ops[n_ops - 1].op)
4533 && CONSTANT_P (ops[n_ops - 2].op))
4535 rtx value = ops[n_ops - 1].op;
4536 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4537 value = neg_const_int (mode, value);
4538 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4539 INTVAL (value));
4540 n_ops--;
4543 /* Put a non-negated operand first, if possible. */
4545 for (i = 0; i < n_ops && ops[i].neg; i++)
4546 continue;
4547 if (i == n_ops)
4548 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4549 else if (i != 0)
4551 tem = ops[0].op;
4552 ops[0] = ops[i];
4553 ops[i].op = tem;
4554 ops[i].neg = 1;
4557 /* Now make the result by performing the requested operations. */
4558 result = ops[0].op;
4559 for (i = 1; i < n_ops; i++)
4560 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4561 mode, result, ops[i].op);
4563 return result;
4566 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4567 static bool
4568 plus_minus_operand_p (const_rtx x)
4570 return GET_CODE (x) == PLUS
4571 || GET_CODE (x) == MINUS
4572 || (GET_CODE (x) == CONST
4573 && GET_CODE (XEXP (x, 0)) == PLUS
4574 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4575 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4578 /* Like simplify_binary_operation except used for relational operators.
4579 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4580 not also be VOIDmode.
4582 CMP_MODE specifies in which mode the comparison is done in, so it is
4583 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4584 the operands or, if both are VOIDmode, the operands are compared in
4585 "infinite precision". */
4587 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
4588 enum machine_mode cmp_mode, rtx op0, rtx op1)
4590 rtx tem, trueop0, trueop1;
4592 if (cmp_mode == VOIDmode)
4593 cmp_mode = GET_MODE (op0);
4594 if (cmp_mode == VOIDmode)
4595 cmp_mode = GET_MODE (op1);
4597 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4598 if (tem)
4600 if (SCALAR_FLOAT_MODE_P (mode))
4602 if (tem == const0_rtx)
4603 return CONST0_RTX (mode);
4604 #ifdef FLOAT_STORE_FLAG_VALUE
4606 REAL_VALUE_TYPE val;
4607 val = FLOAT_STORE_FLAG_VALUE (mode);
4608 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4610 #else
4611 return NULL_RTX;
4612 #endif
4614 if (VECTOR_MODE_P (mode))
4616 if (tem == const0_rtx)
4617 return CONST0_RTX (mode);
4618 #ifdef VECTOR_STORE_FLAG_VALUE
4620 int i, units;
4621 rtvec v;
4623 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4624 if (val == NULL_RTX)
4625 return NULL_RTX;
4626 if (val == const1_rtx)
4627 return CONST1_RTX (mode);
4629 units = GET_MODE_NUNITS (mode);
4630 v = rtvec_alloc (units);
4631 for (i = 0; i < units; i++)
4632 RTVEC_ELT (v, i) = val;
4633 return gen_rtx_raw_CONST_VECTOR (mode, v);
4635 #else
4636 return NULL_RTX;
4637 #endif
4640 return tem;
4643 /* For the following tests, ensure const0_rtx is op1. */
4644 if (swap_commutative_operands_p (op0, op1)
4645 || (op0 == const0_rtx && op1 != const0_rtx))
4646 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4648 /* If op0 is a compare, extract the comparison arguments from it. */
4649 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4650 return simplify_gen_relational (code, mode, VOIDmode,
4651 XEXP (op0, 0), XEXP (op0, 1));
4653 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4654 || CC0_P (op0))
4655 return NULL_RTX;
4657 trueop0 = avoid_constant_pool_reference (op0);
4658 trueop1 = avoid_constant_pool_reference (op1);
4659 return simplify_relational_operation_1 (code, mode, cmp_mode,
4660 trueop0, trueop1);
4663 /* This part of simplify_relational_operation is only used when CMP_MODE
4664 is not in class MODE_CC (i.e. it is a real comparison).
4666 MODE is the mode of the result, while CMP_MODE specifies in which
4667 mode the comparison is done in, so it is the mode of the operands. */
4669 static rtx
4670 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
4671 enum machine_mode cmp_mode, rtx op0, rtx op1)
4673 enum rtx_code op0code = GET_CODE (op0);
4675 if (op1 == const0_rtx && COMPARISON_P (op0))
4677 /* If op0 is a comparison, extract the comparison arguments
4678 from it. */
4679 if (code == NE)
4681 if (GET_MODE (op0) == mode)
4682 return simplify_rtx (op0);
4683 else
4684 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4685 XEXP (op0, 0), XEXP (op0, 1));
4687 else if (code == EQ)
4689 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4690 if (new_code != UNKNOWN)
4691 return simplify_gen_relational (new_code, mode, VOIDmode,
4692 XEXP (op0, 0), XEXP (op0, 1));
4696 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4697 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4698 if ((code == LTU || code == GEU)
4699 && GET_CODE (op0) == PLUS
4700 && CONST_INT_P (XEXP (op0, 1))
4701 && (rtx_equal_p (op1, XEXP (op0, 0))
4702 || rtx_equal_p (op1, XEXP (op0, 1)))
4703 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4704 && XEXP (op0, 1) != const0_rtx)
4706 rtx new_cmp
4707 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4708 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4709 cmp_mode, XEXP (op0, 0), new_cmp);
4712 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4713 if ((code == LTU || code == GEU)
4714 && GET_CODE (op0) == PLUS
4715 && rtx_equal_p (op1, XEXP (op0, 1))
4716 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4717 && !rtx_equal_p (op1, XEXP (op0, 0)))
4718 return simplify_gen_relational (code, mode, cmp_mode, op0,
4719 copy_rtx (XEXP (op0, 0)));
4721 if (op1 == const0_rtx)
4723 /* Canonicalize (GTU x 0) as (NE x 0). */
4724 if (code == GTU)
4725 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4726 /* Canonicalize (LEU x 0) as (EQ x 0). */
4727 if (code == LEU)
4728 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4730 else if (op1 == const1_rtx)
4732 switch (code)
4734 case GE:
4735 /* Canonicalize (GE x 1) as (GT x 0). */
4736 return simplify_gen_relational (GT, mode, cmp_mode,
4737 op0, const0_rtx);
4738 case GEU:
4739 /* Canonicalize (GEU x 1) as (NE x 0). */
4740 return simplify_gen_relational (NE, mode, cmp_mode,
4741 op0, const0_rtx);
4742 case LT:
4743 /* Canonicalize (LT x 1) as (LE x 0). */
4744 return simplify_gen_relational (LE, mode, cmp_mode,
4745 op0, const0_rtx);
4746 case LTU:
4747 /* Canonicalize (LTU x 1) as (EQ x 0). */
4748 return simplify_gen_relational (EQ, mode, cmp_mode,
4749 op0, const0_rtx);
4750 default:
4751 break;
4754 else if (op1 == constm1_rtx)
4756 /* Canonicalize (LE x -1) as (LT x 0). */
4757 if (code == LE)
4758 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4759 /* Canonicalize (GT x -1) as (GE x 0). */
4760 if (code == GT)
4761 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4764 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4765 if ((code == EQ || code == NE)
4766 && (op0code == PLUS || op0code == MINUS)
4767 && CONSTANT_P (op1)
4768 && CONSTANT_P (XEXP (op0, 1))
4769 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4771 rtx x = XEXP (op0, 0);
4772 rtx c = XEXP (op0, 1);
4773 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4774 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4776 /* Detect an infinite recursive condition, where we oscillate at this
4777 simplification case between:
4778 A + B == C <---> C - B == A,
4779 where A, B, and C are all constants with non-simplifiable expressions,
4780 usually SYMBOL_REFs. */
4781 if (GET_CODE (tem) == invcode
4782 && CONSTANT_P (x)
4783 && rtx_equal_p (c, XEXP (tem, 1)))
4784 return NULL_RTX;
4786 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4789 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4790 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4791 if (code == NE
4792 && op1 == const0_rtx
4793 && GET_MODE_CLASS (mode) == MODE_INT
4794 && cmp_mode != VOIDmode
4795 /* ??? Work-around BImode bugs in the ia64 backend. */
4796 && mode != BImode
4797 && cmp_mode != BImode
4798 && nonzero_bits (op0, cmp_mode) == 1
4799 && STORE_FLAG_VALUE == 1)
4800 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4801 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4802 : lowpart_subreg (mode, op0, cmp_mode);
4804 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4805 if ((code == EQ || code == NE)
4806 && op1 == const0_rtx
4807 && op0code == XOR)
4808 return simplify_gen_relational (code, mode, cmp_mode,
4809 XEXP (op0, 0), XEXP (op0, 1));
4811 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4812 if ((code == EQ || code == NE)
4813 && op0code == XOR
4814 && rtx_equal_p (XEXP (op0, 0), op1)
4815 && !side_effects_p (XEXP (op0, 0)))
4816 return simplify_gen_relational (code, mode, cmp_mode,
4817 XEXP (op0, 1), const0_rtx);
4819 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4820 if ((code == EQ || code == NE)
4821 && op0code == XOR
4822 && rtx_equal_p (XEXP (op0, 1), op1)
4823 && !side_effects_p (XEXP (op0, 1)))
4824 return simplify_gen_relational (code, mode, cmp_mode,
4825 XEXP (op0, 0), const0_rtx);
4827 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4828 if ((code == EQ || code == NE)
4829 && op0code == XOR
4830 && CONST_SCALAR_INT_P (op1)
4831 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4832 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4833 simplify_gen_binary (XOR, cmp_mode,
4834 XEXP (op0, 1), op1));
4836 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4837 if ((code == EQ || code == NE)
4838 && GET_CODE (op0) == BSWAP
4839 && CONST_SCALAR_INT_P (op1))
4840 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4841 simplify_gen_unary (BSWAP, cmp_mode,
4842 op1, cmp_mode));
4844 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4845 if ((code == EQ || code == NE)
4846 && GET_CODE (op0) == BSWAP
4847 && GET_CODE (op1) == BSWAP)
4848 return simplify_gen_relational (code, mode, cmp_mode,
4849 XEXP (op0, 0), XEXP (op1, 0));
4851 if (op0code == POPCOUNT && op1 == const0_rtx)
4852 switch (code)
4854 case EQ:
4855 case LE:
4856 case LEU:
4857 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4858 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4859 XEXP (op0, 0), const0_rtx);
4861 case NE:
4862 case GT:
4863 case GTU:
4864 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4865 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4866 XEXP (op0, 0), const0_rtx);
4868 default:
4869 break;
4872 return NULL_RTX;
4875 enum
4877 CMP_EQ = 1,
4878 CMP_LT = 2,
4879 CMP_GT = 4,
4880 CMP_LTU = 8,
4881 CMP_GTU = 16
4885 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4886 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4887 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4888 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4889 For floating-point comparisons, assume that the operands were ordered. */
4891 static rtx
4892 comparison_result (enum rtx_code code, int known_results)
4894 switch (code)
4896 case EQ:
4897 case UNEQ:
4898 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4899 case NE:
4900 case LTGT:
4901 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4903 case LT:
4904 case UNLT:
4905 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4906 case GE:
4907 case UNGE:
4908 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4910 case GT:
4911 case UNGT:
4912 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4913 case LE:
4914 case UNLE:
4915 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4917 case LTU:
4918 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4919 case GEU:
4920 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4922 case GTU:
4923 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4924 case LEU:
4925 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4927 case ORDERED:
4928 return const_true_rtx;
4929 case UNORDERED:
4930 return const0_rtx;
4931 default:
4932 gcc_unreachable ();
4936 /* Check if the given comparison (done in the given MODE) is actually a
4937 tautology or a contradiction.
4938 If no simplification is possible, this function returns zero.
4939 Otherwise, it returns either const_true_rtx or const0_rtx. */
4942 simplify_const_relational_operation (enum rtx_code code,
4943 enum machine_mode mode,
4944 rtx op0, rtx op1)
4946 rtx tem;
4947 rtx trueop0;
4948 rtx trueop1;
4950 gcc_assert (mode != VOIDmode
4951 || (GET_MODE (op0) == VOIDmode
4952 && GET_MODE (op1) == VOIDmode));
4954 /* If op0 is a compare, extract the comparison arguments from it. */
4955 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4957 op1 = XEXP (op0, 1);
4958 op0 = XEXP (op0, 0);
4960 if (GET_MODE (op0) != VOIDmode)
4961 mode = GET_MODE (op0);
4962 else if (GET_MODE (op1) != VOIDmode)
4963 mode = GET_MODE (op1);
4964 else
4965 return 0;
4968 /* We can't simplify MODE_CC values since we don't know what the
4969 actual comparison is. */
4970 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4971 return 0;
4973 /* Make sure the constant is second. */
4974 if (swap_commutative_operands_p (op0, op1))
4976 tem = op0, op0 = op1, op1 = tem;
4977 code = swap_condition (code);
4980 trueop0 = avoid_constant_pool_reference (op0);
4981 trueop1 = avoid_constant_pool_reference (op1);
4983 /* For integer comparisons of A and B maybe we can simplify A - B and can
4984 then simplify a comparison of that with zero. If A and B are both either
4985 a register or a CONST_INT, this can't help; testing for these cases will
4986 prevent infinite recursion here and speed things up.
4988 We can only do this for EQ and NE comparisons as otherwise we may
4989 lose or introduce overflow which we cannot disregard as undefined as
4990 we do not know the signedness of the operation on either the left or
4991 the right hand side of the comparison. */
4993 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4994 && (code == EQ || code == NE)
4995 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4996 && (REG_P (op1) || CONST_INT_P (trueop1)))
4997 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4998 /* We cannot do this if tem is a nonzero address. */
4999 && ! nonzero_address_p (tem))
5000 return simplify_const_relational_operation (signed_condition (code),
5001 mode, tem, const0_rtx);
5003 if (! HONOR_NANS (mode) && code == ORDERED)
5004 return const_true_rtx;
5006 if (! HONOR_NANS (mode) && code == UNORDERED)
5007 return const0_rtx;
5009 /* For modes without NaNs, if the two operands are equal, we know the
5010 result except if they have side-effects. Even with NaNs we know
5011 the result of unordered comparisons and, if signaling NaNs are
5012 irrelevant, also the result of LT/GT/LTGT. */
5013 if ((! HONOR_NANS (GET_MODE (trueop0))
5014 || code == UNEQ || code == UNLE || code == UNGE
5015 || ((code == LT || code == GT || code == LTGT)
5016 && ! HONOR_SNANS (GET_MODE (trueop0))))
5017 && rtx_equal_p (trueop0, trueop1)
5018 && ! side_effects_p (trueop0))
5019 return comparison_result (code, CMP_EQ);
5021 /* If the operands are floating-point constants, see if we can fold
5022 the result. */
5023 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
5024 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
5025 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
5027 REAL_VALUE_TYPE d0, d1;
5029 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
5030 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
5032 /* Comparisons are unordered iff at least one of the values is NaN. */
5033 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
5034 switch (code)
5036 case UNEQ:
5037 case UNLT:
5038 case UNGT:
5039 case UNLE:
5040 case UNGE:
5041 case NE:
5042 case UNORDERED:
5043 return const_true_rtx;
5044 case EQ:
5045 case LT:
5046 case GT:
5047 case LE:
5048 case GE:
5049 case LTGT:
5050 case ORDERED:
5051 return const0_rtx;
5052 default:
5053 return 0;
5056 return comparison_result (code,
5057 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
5058 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
5061 /* Otherwise, see if the operands are both integers. */
5062 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
5063 && (CONST_DOUBLE_AS_INT_P (trueop0) || CONST_INT_P (trueop0))
5064 && (CONST_DOUBLE_AS_INT_P (trueop1) || CONST_INT_P (trueop1)))
5066 int width = GET_MODE_PRECISION (mode);
5067 HOST_WIDE_INT l0s, h0s, l1s, h1s;
5068 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
5070 /* Get the two words comprising each integer constant. */
5071 if (CONST_DOUBLE_AS_INT_P (trueop0))
5073 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
5074 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
5076 else
5078 l0u = l0s = INTVAL (trueop0);
5079 h0u = h0s = HWI_SIGN_EXTEND (l0s);
5082 if (CONST_DOUBLE_AS_INT_P (trueop1))
5084 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
5085 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
5087 else
5089 l1u = l1s = INTVAL (trueop1);
5090 h1u = h1s = HWI_SIGN_EXTEND (l1s);
5093 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
5094 we have to sign or zero-extend the values. */
5095 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
5097 l0u &= GET_MODE_MASK (mode);
5098 l1u &= GET_MODE_MASK (mode);
5100 if (val_signbit_known_set_p (mode, l0s))
5101 l0s |= ~GET_MODE_MASK (mode);
5103 if (val_signbit_known_set_p (mode, l1s))
5104 l1s |= ~GET_MODE_MASK (mode);
5106 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
5107 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
5109 if (h0u == h1u && l0u == l1u)
5110 return comparison_result (code, CMP_EQ);
5111 else
5113 int cr;
5114 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
5115 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
5116 return comparison_result (code, cr);
5120 /* Optimize comparisons with upper and lower bounds. */
5121 if (HWI_COMPUTABLE_MODE_P (mode)
5122 && CONST_INT_P (trueop1))
5124 int sign;
5125 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
5126 HOST_WIDE_INT val = INTVAL (trueop1);
5127 HOST_WIDE_INT mmin, mmax;
5129 if (code == GEU
5130 || code == LEU
5131 || code == GTU
5132 || code == LTU)
5133 sign = 0;
5134 else
5135 sign = 1;
5137 /* Get a reduced range if the sign bit is zero. */
5138 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
5140 mmin = 0;
5141 mmax = nonzero;
5143 else
5145 rtx mmin_rtx, mmax_rtx;
5146 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
5148 mmin = INTVAL (mmin_rtx);
5149 mmax = INTVAL (mmax_rtx);
5150 if (sign)
5152 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
5154 mmin >>= (sign_copies - 1);
5155 mmax >>= (sign_copies - 1);
5159 switch (code)
5161 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5162 case GEU:
5163 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5164 return const_true_rtx;
5165 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5166 return const0_rtx;
5167 break;
5168 case GE:
5169 if (val <= mmin)
5170 return const_true_rtx;
5171 if (val > mmax)
5172 return const0_rtx;
5173 break;
5175 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5176 case LEU:
5177 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5178 return const_true_rtx;
5179 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5180 return const0_rtx;
5181 break;
5182 case LE:
5183 if (val >= mmax)
5184 return const_true_rtx;
5185 if (val < mmin)
5186 return const0_rtx;
5187 break;
5189 case EQ:
5190 /* x == y is always false for y out of range. */
5191 if (val < mmin || val > mmax)
5192 return const0_rtx;
5193 break;
5195 /* x > y is always false for y >= mmax, always true for y < mmin. */
5196 case GTU:
5197 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5198 return const0_rtx;
5199 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5200 return const_true_rtx;
5201 break;
5202 case GT:
5203 if (val >= mmax)
5204 return const0_rtx;
5205 if (val < mmin)
5206 return const_true_rtx;
5207 break;
5209 /* x < y is always false for y <= mmin, always true for y > mmax. */
5210 case LTU:
5211 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5212 return const0_rtx;
5213 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5214 return const_true_rtx;
5215 break;
5216 case LT:
5217 if (val <= mmin)
5218 return const0_rtx;
5219 if (val > mmax)
5220 return const_true_rtx;
5221 break;
5223 case NE:
5224 /* x != y is always true for y out of range. */
5225 if (val < mmin || val > mmax)
5226 return const_true_rtx;
5227 break;
5229 default:
5230 break;
5234 /* Optimize integer comparisons with zero. */
5235 if (trueop1 == const0_rtx)
5237 /* Some addresses are known to be nonzero. We don't know
5238 their sign, but equality comparisons are known. */
5239 if (nonzero_address_p (trueop0))
5241 if (code == EQ || code == LEU)
5242 return const0_rtx;
5243 if (code == NE || code == GTU)
5244 return const_true_rtx;
5247 /* See if the first operand is an IOR with a constant. If so, we
5248 may be able to determine the result of this comparison. */
5249 if (GET_CODE (op0) == IOR)
5251 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5252 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5254 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
5255 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5256 && (UINTVAL (inner_const)
5257 & ((unsigned HOST_WIDE_INT) 1
5258 << sign_bitnum)));
5260 switch (code)
5262 case EQ:
5263 case LEU:
5264 return const0_rtx;
5265 case NE:
5266 case GTU:
5267 return const_true_rtx;
5268 case LT:
5269 case LE:
5270 if (has_sign)
5271 return const_true_rtx;
5272 break;
5273 case GT:
5274 case GE:
5275 if (has_sign)
5276 return const0_rtx;
5277 break;
5278 default:
5279 break;
5285 /* Optimize comparison of ABS with zero. */
5286 if (trueop1 == CONST0_RTX (mode)
5287 && (GET_CODE (trueop0) == ABS
5288 || (GET_CODE (trueop0) == FLOAT_EXTEND
5289 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5291 switch (code)
5293 case LT:
5294 /* Optimize abs(x) < 0.0. */
5295 if (!HONOR_SNANS (mode)
5296 && (!INTEGRAL_MODE_P (mode)
5297 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5299 if (INTEGRAL_MODE_P (mode)
5300 && (issue_strict_overflow_warning
5301 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5302 warning (OPT_Wstrict_overflow,
5303 ("assuming signed overflow does not occur when "
5304 "assuming abs (x) < 0 is false"));
5305 return const0_rtx;
5307 break;
5309 case GE:
5310 /* Optimize abs(x) >= 0.0. */
5311 if (!HONOR_NANS (mode)
5312 && (!INTEGRAL_MODE_P (mode)
5313 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5315 if (INTEGRAL_MODE_P (mode)
5316 && (issue_strict_overflow_warning
5317 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5318 warning (OPT_Wstrict_overflow,
5319 ("assuming signed overflow does not occur when "
5320 "assuming abs (x) >= 0 is true"));
5321 return const_true_rtx;
5323 break;
5325 case UNGE:
5326 /* Optimize ! (abs(x) < 0.0). */
5327 return const_true_rtx;
5329 default:
5330 break;
5334 return 0;
5337 /* Simplify CODE, an operation with result mode MODE and three operands,
5338 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5339 a constant. Return 0 if no simplifications is possible. */
5342 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
5343 enum machine_mode op0_mode, rtx op0, rtx op1,
5344 rtx op2)
5346 unsigned int width = GET_MODE_PRECISION (mode);
5347 bool any_change = false;
5348 rtx tem, trueop2;
5350 /* VOIDmode means "infinite" precision. */
5351 if (width == 0)
5352 width = HOST_BITS_PER_WIDE_INT;
5354 switch (code)
5356 case FMA:
5357 /* Simplify negations around the multiplication. */
5358 /* -a * -b + c => a * b + c. */
5359 if (GET_CODE (op0) == NEG)
5361 tem = simplify_unary_operation (NEG, mode, op1, mode);
5362 if (tem)
5363 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5365 else if (GET_CODE (op1) == NEG)
5367 tem = simplify_unary_operation (NEG, mode, op0, mode);
5368 if (tem)
5369 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5372 /* Canonicalize the two multiplication operands. */
5373 /* a * -b + c => -b * a + c. */
5374 if (swap_commutative_operands_p (op0, op1))
5375 tem = op0, op0 = op1, op1 = tem, any_change = true;
5377 if (any_change)
5378 return gen_rtx_FMA (mode, op0, op1, op2);
5379 return NULL_RTX;
5381 case SIGN_EXTRACT:
5382 case ZERO_EXTRACT:
5383 if (CONST_INT_P (op0)
5384 && CONST_INT_P (op1)
5385 && CONST_INT_P (op2)
5386 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5387 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5389 /* Extracting a bit-field from a constant */
5390 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5391 HOST_WIDE_INT op1val = INTVAL (op1);
5392 HOST_WIDE_INT op2val = INTVAL (op2);
5393 if (BITS_BIG_ENDIAN)
5394 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5395 else
5396 val >>= op2val;
5398 if (HOST_BITS_PER_WIDE_INT != op1val)
5400 /* First zero-extend. */
5401 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
5402 /* If desired, propagate sign bit. */
5403 if (code == SIGN_EXTRACT
5404 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
5405 != 0)
5406 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5409 return gen_int_mode (val, mode);
5411 break;
5413 case IF_THEN_ELSE:
5414 if (CONST_INT_P (op0))
5415 return op0 != const0_rtx ? op1 : op2;
5417 /* Convert c ? a : a into "a". */
5418 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5419 return op1;
5421 /* Convert a != b ? a : b into "a". */
5422 if (GET_CODE (op0) == NE
5423 && ! side_effects_p (op0)
5424 && ! HONOR_NANS (mode)
5425 && ! HONOR_SIGNED_ZEROS (mode)
5426 && ((rtx_equal_p (XEXP (op0, 0), op1)
5427 && rtx_equal_p (XEXP (op0, 1), op2))
5428 || (rtx_equal_p (XEXP (op0, 0), op2)
5429 && rtx_equal_p (XEXP (op0, 1), op1))))
5430 return op1;
5432 /* Convert a == b ? a : b into "b". */
5433 if (GET_CODE (op0) == EQ
5434 && ! side_effects_p (op0)
5435 && ! HONOR_NANS (mode)
5436 && ! HONOR_SIGNED_ZEROS (mode)
5437 && ((rtx_equal_p (XEXP (op0, 0), op1)
5438 && rtx_equal_p (XEXP (op0, 1), op2))
5439 || (rtx_equal_p (XEXP (op0, 0), op2)
5440 && rtx_equal_p (XEXP (op0, 1), op1))))
5441 return op2;
5443 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5445 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5446 ? GET_MODE (XEXP (op0, 1))
5447 : GET_MODE (XEXP (op0, 0)));
5448 rtx temp;
5450 /* Look for happy constants in op1 and op2. */
5451 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5453 HOST_WIDE_INT t = INTVAL (op1);
5454 HOST_WIDE_INT f = INTVAL (op2);
5456 if (t == STORE_FLAG_VALUE && f == 0)
5457 code = GET_CODE (op0);
5458 else if (t == 0 && f == STORE_FLAG_VALUE)
5460 enum rtx_code tmp;
5461 tmp = reversed_comparison_code (op0, NULL_RTX);
5462 if (tmp == UNKNOWN)
5463 break;
5464 code = tmp;
5466 else
5467 break;
5469 return simplify_gen_relational (code, mode, cmp_mode,
5470 XEXP (op0, 0), XEXP (op0, 1));
5473 if (cmp_mode == VOIDmode)
5474 cmp_mode = op0_mode;
5475 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5476 cmp_mode, XEXP (op0, 0),
5477 XEXP (op0, 1));
5479 /* See if any simplifications were possible. */
5480 if (temp)
5482 if (CONST_INT_P (temp))
5483 return temp == const0_rtx ? op2 : op1;
5484 else if (temp)
5485 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5488 break;
5490 case VEC_MERGE:
5491 gcc_assert (GET_MODE (op0) == mode);
5492 gcc_assert (GET_MODE (op1) == mode);
5493 gcc_assert (VECTOR_MODE_P (mode));
5494 trueop2 = avoid_constant_pool_reference (op2);
5495 if (CONST_INT_P (trueop2))
5497 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5498 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5499 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5500 unsigned HOST_WIDE_INT mask;
5501 if (n_elts == HOST_BITS_PER_WIDE_INT)
5502 mask = -1;
5503 else
5504 mask = ((unsigned HOST_WIDE_INT) 1 << n_elts) - 1;
5506 if (!(sel & mask) && !side_effects_p (op0))
5507 return op1;
5508 if ((sel & mask) == mask && !side_effects_p (op1))
5509 return op0;
5511 rtx trueop0 = avoid_constant_pool_reference (op0);
5512 rtx trueop1 = avoid_constant_pool_reference (op1);
5513 if (GET_CODE (trueop0) == CONST_VECTOR
5514 && GET_CODE (trueop1) == CONST_VECTOR)
5516 rtvec v = rtvec_alloc (n_elts);
5517 unsigned int i;
5519 for (i = 0; i < n_elts; i++)
5520 RTVEC_ELT (v, i) = ((sel & ((unsigned HOST_WIDE_INT) 1 << i))
5521 ? CONST_VECTOR_ELT (trueop0, i)
5522 : CONST_VECTOR_ELT (trueop1, i));
5523 return gen_rtx_CONST_VECTOR (mode, v);
5526 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5527 if no element from a appears in the result. */
5528 if (GET_CODE (op0) == VEC_MERGE)
5530 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5531 if (CONST_INT_P (tem))
5533 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5534 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5535 return simplify_gen_ternary (code, mode, mode,
5536 XEXP (op0, 1), op1, op2);
5537 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5538 return simplify_gen_ternary (code, mode, mode,
5539 XEXP (op0, 0), op1, op2);
5542 if (GET_CODE (op1) == VEC_MERGE)
5544 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5545 if (CONST_INT_P (tem))
5547 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5548 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5549 return simplify_gen_ternary (code, mode, mode,
5550 op0, XEXP (op1, 1), op2);
5551 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5552 return simplify_gen_ternary (code, mode, mode,
5553 op0, XEXP (op1, 0), op2);
5558 if (rtx_equal_p (op0, op1)
5559 && !side_effects_p (op2) && !side_effects_p (op1))
5560 return op0;
5562 break;
5564 default:
5565 gcc_unreachable ();
5568 return 0;
5571 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
5572 or CONST_VECTOR,
5573 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5575 Works by unpacking OP into a collection of 8-bit values
5576 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5577 and then repacking them again for OUTERMODE. */
5579 static rtx
5580 simplify_immed_subreg (enum machine_mode outermode, rtx op,
5581 enum machine_mode innermode, unsigned int byte)
5583 /* We support up to 512-bit values (for V8DFmode). */
5584 enum {
5585 max_bitsize = 512,
5586 value_bit = 8,
5587 value_mask = (1 << value_bit) - 1
5589 unsigned char value[max_bitsize / value_bit];
5590 int value_start;
5591 int i;
5592 int elem;
5594 int num_elem;
5595 rtx * elems;
5596 int elem_bitsize;
5597 rtx result_s;
5598 rtvec result_v = NULL;
5599 enum mode_class outer_class;
5600 enum machine_mode outer_submode;
5602 /* Some ports misuse CCmode. */
5603 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5604 return op;
5606 /* We have no way to represent a complex constant at the rtl level. */
5607 if (COMPLEX_MODE_P (outermode))
5608 return NULL_RTX;
5610 /* Unpack the value. */
5612 if (GET_CODE (op) == CONST_VECTOR)
5614 num_elem = CONST_VECTOR_NUNITS (op);
5615 elems = &CONST_VECTOR_ELT (op, 0);
5616 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5618 else
5620 num_elem = 1;
5621 elems = &op;
5622 elem_bitsize = max_bitsize;
5624 /* If this asserts, it is too complicated; reducing value_bit may help. */
5625 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5626 /* I don't know how to handle endianness of sub-units. */
5627 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5629 for (elem = 0; elem < num_elem; elem++)
5631 unsigned char * vp;
5632 rtx el = elems[elem];
5634 /* Vectors are kept in target memory order. (This is probably
5635 a mistake.) */
5637 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5638 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5639 / BITS_PER_UNIT);
5640 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5641 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5642 unsigned bytele = (subword_byte % UNITS_PER_WORD
5643 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5644 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5647 switch (GET_CODE (el))
5649 case CONST_INT:
5650 for (i = 0;
5651 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5652 i += value_bit)
5653 *vp++ = INTVAL (el) >> i;
5654 /* CONST_INTs are always logically sign-extended. */
5655 for (; i < elem_bitsize; i += value_bit)
5656 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5657 break;
5659 case CONST_DOUBLE:
5660 if (GET_MODE (el) == VOIDmode)
5662 unsigned char extend = 0;
5663 /* If this triggers, someone should have generated a
5664 CONST_INT instead. */
5665 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5667 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5668 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5669 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5671 *vp++
5672 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5673 i += value_bit;
5676 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5677 extend = -1;
5678 for (; i < elem_bitsize; i += value_bit)
5679 *vp++ = extend;
5681 else
5683 long tmp[max_bitsize / 32];
5684 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5686 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5687 gcc_assert (bitsize <= elem_bitsize);
5688 gcc_assert (bitsize % value_bit == 0);
5690 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5691 GET_MODE (el));
5693 /* real_to_target produces its result in words affected by
5694 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5695 and use WORDS_BIG_ENDIAN instead; see the documentation
5696 of SUBREG in rtl.texi. */
5697 for (i = 0; i < bitsize; i += value_bit)
5699 int ibase;
5700 if (WORDS_BIG_ENDIAN)
5701 ibase = bitsize - 1 - i;
5702 else
5703 ibase = i;
5704 *vp++ = tmp[ibase / 32] >> i % 32;
5707 /* It shouldn't matter what's done here, so fill it with
5708 zero. */
5709 for (; i < elem_bitsize; i += value_bit)
5710 *vp++ = 0;
5712 break;
5714 case CONST_FIXED:
5715 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5717 for (i = 0; i < elem_bitsize; i += value_bit)
5718 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5720 else
5722 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5723 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5724 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5725 i += value_bit)
5726 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5727 >> (i - HOST_BITS_PER_WIDE_INT);
5728 for (; i < elem_bitsize; i += value_bit)
5729 *vp++ = 0;
5731 break;
5733 default:
5734 gcc_unreachable ();
5738 /* Now, pick the right byte to start with. */
5739 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5740 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5741 will already have offset 0. */
5742 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5744 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5745 - byte);
5746 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5747 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5748 byte = (subword_byte % UNITS_PER_WORD
5749 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5752 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5753 so if it's become negative it will instead be very large.) */
5754 gcc_assert (byte < GET_MODE_SIZE (innermode));
5756 /* Convert from bytes to chunks of size value_bit. */
5757 value_start = byte * (BITS_PER_UNIT / value_bit);
5759 /* Re-pack the value. */
5761 if (VECTOR_MODE_P (outermode))
5763 num_elem = GET_MODE_NUNITS (outermode);
5764 result_v = rtvec_alloc (num_elem);
5765 elems = &RTVEC_ELT (result_v, 0);
5766 outer_submode = GET_MODE_INNER (outermode);
5768 else
5770 num_elem = 1;
5771 elems = &result_s;
5772 outer_submode = outermode;
5775 outer_class = GET_MODE_CLASS (outer_submode);
5776 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5778 gcc_assert (elem_bitsize % value_bit == 0);
5779 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5781 for (elem = 0; elem < num_elem; elem++)
5783 unsigned char *vp;
5785 /* Vectors are stored in target memory order. (This is probably
5786 a mistake.) */
5788 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5789 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5790 / BITS_PER_UNIT);
5791 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5792 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5793 unsigned bytele = (subword_byte % UNITS_PER_WORD
5794 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5795 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5798 switch (outer_class)
5800 case MODE_INT:
5801 case MODE_PARTIAL_INT:
5803 unsigned HOST_WIDE_INT hi = 0, lo = 0;
5805 for (i = 0;
5806 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5807 i += value_bit)
5808 lo |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5809 for (; i < elem_bitsize; i += value_bit)
5810 hi |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5811 << (i - HOST_BITS_PER_WIDE_INT);
5813 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5814 know why. */
5815 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5816 elems[elem] = gen_int_mode (lo, outer_submode);
5817 else if (elem_bitsize <= HOST_BITS_PER_DOUBLE_INT)
5818 elems[elem] = immed_double_const (lo, hi, outer_submode);
5819 else
5820 return NULL_RTX;
5822 break;
5824 case MODE_FLOAT:
5825 case MODE_DECIMAL_FLOAT:
5827 REAL_VALUE_TYPE r;
5828 long tmp[max_bitsize / 32];
5830 /* real_from_target wants its input in words affected by
5831 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5832 and use WORDS_BIG_ENDIAN instead; see the documentation
5833 of SUBREG in rtl.texi. */
5834 for (i = 0; i < max_bitsize / 32; i++)
5835 tmp[i] = 0;
5836 for (i = 0; i < elem_bitsize; i += value_bit)
5838 int ibase;
5839 if (WORDS_BIG_ENDIAN)
5840 ibase = elem_bitsize - 1 - i;
5841 else
5842 ibase = i;
5843 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5846 real_from_target (&r, tmp, outer_submode);
5847 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5849 break;
5851 case MODE_FRACT:
5852 case MODE_UFRACT:
5853 case MODE_ACCUM:
5854 case MODE_UACCUM:
5856 FIXED_VALUE_TYPE f;
5857 f.data.low = 0;
5858 f.data.high = 0;
5859 f.mode = outer_submode;
5861 for (i = 0;
5862 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5863 i += value_bit)
5864 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5865 for (; i < elem_bitsize; i += value_bit)
5866 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5867 << (i - HOST_BITS_PER_WIDE_INT));
5869 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5871 break;
5873 default:
5874 gcc_unreachable ();
5877 if (VECTOR_MODE_P (outermode))
5878 return gen_rtx_CONST_VECTOR (outermode, result_v);
5879 else
5880 return result_s;
5883 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5884 Return 0 if no simplifications are possible. */
5886 simplify_subreg (enum machine_mode outermode, rtx op,
5887 enum machine_mode innermode, unsigned int byte)
5889 /* Little bit of sanity checking. */
5890 gcc_assert (innermode != VOIDmode);
5891 gcc_assert (outermode != VOIDmode);
5892 gcc_assert (innermode != BLKmode);
5893 gcc_assert (outermode != BLKmode);
5895 gcc_assert (GET_MODE (op) == innermode
5896 || GET_MODE (op) == VOIDmode);
5898 if ((byte % GET_MODE_SIZE (outermode)) != 0)
5899 return NULL_RTX;
5901 if (byte >= GET_MODE_SIZE (innermode))
5902 return NULL_RTX;
5904 if (outermode == innermode && !byte)
5905 return op;
5907 if (CONST_SCALAR_INT_P (op)
5908 || CONST_DOUBLE_AS_FLOAT_P (op)
5909 || GET_CODE (op) == CONST_FIXED
5910 || GET_CODE (op) == CONST_VECTOR)
5911 return simplify_immed_subreg (outermode, op, innermode, byte);
5913 /* Changing mode twice with SUBREG => just change it once,
5914 or not at all if changing back op starting mode. */
5915 if (GET_CODE (op) == SUBREG)
5917 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5918 int final_offset = byte + SUBREG_BYTE (op);
5919 rtx newx;
5921 if (outermode == innermostmode
5922 && byte == 0 && SUBREG_BYTE (op) == 0)
5923 return SUBREG_REG (op);
5925 /* The SUBREG_BYTE represents offset, as if the value were stored
5926 in memory. Irritating exception is paradoxical subreg, where
5927 we define SUBREG_BYTE to be 0. On big endian machines, this
5928 value should be negative. For a moment, undo this exception. */
5929 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5931 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5932 if (WORDS_BIG_ENDIAN)
5933 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5934 if (BYTES_BIG_ENDIAN)
5935 final_offset += difference % UNITS_PER_WORD;
5937 if (SUBREG_BYTE (op) == 0
5938 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5940 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5941 if (WORDS_BIG_ENDIAN)
5942 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5943 if (BYTES_BIG_ENDIAN)
5944 final_offset += difference % UNITS_PER_WORD;
5947 /* See whether resulting subreg will be paradoxical. */
5948 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5950 /* In nonparadoxical subregs we can't handle negative offsets. */
5951 if (final_offset < 0)
5952 return NULL_RTX;
5953 /* Bail out in case resulting subreg would be incorrect. */
5954 if (final_offset % GET_MODE_SIZE (outermode)
5955 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5956 return NULL_RTX;
5958 else
5960 int offset = 0;
5961 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5963 /* In paradoxical subreg, see if we are still looking on lower part.
5964 If so, our SUBREG_BYTE will be 0. */
5965 if (WORDS_BIG_ENDIAN)
5966 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5967 if (BYTES_BIG_ENDIAN)
5968 offset += difference % UNITS_PER_WORD;
5969 if (offset == final_offset)
5970 final_offset = 0;
5971 else
5972 return NULL_RTX;
5975 /* Recurse for further possible simplifications. */
5976 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5977 final_offset);
5978 if (newx)
5979 return newx;
5980 if (validate_subreg (outermode, innermostmode,
5981 SUBREG_REG (op), final_offset))
5983 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5984 if (SUBREG_PROMOTED_VAR_P (op)
5985 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5986 && GET_MODE_CLASS (outermode) == MODE_INT
5987 && IN_RANGE (GET_MODE_SIZE (outermode),
5988 GET_MODE_SIZE (innermode),
5989 GET_MODE_SIZE (innermostmode))
5990 && subreg_lowpart_p (newx))
5992 SUBREG_PROMOTED_VAR_P (newx) = 1;
5993 SUBREG_PROMOTED_UNSIGNED_SET
5994 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5996 return newx;
5998 return NULL_RTX;
6001 /* SUBREG of a hard register => just change the register number
6002 and/or mode. If the hard register is not valid in that mode,
6003 suppress this simplification. If the hard register is the stack,
6004 frame, or argument pointer, leave this as a SUBREG. */
6006 if (REG_P (op) && HARD_REGISTER_P (op))
6008 unsigned int regno, final_regno;
6010 regno = REGNO (op);
6011 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
6012 if (HARD_REGISTER_NUM_P (final_regno))
6014 rtx x;
6015 int final_offset = byte;
6017 /* Adjust offset for paradoxical subregs. */
6018 if (byte == 0
6019 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
6021 int difference = (GET_MODE_SIZE (innermode)
6022 - GET_MODE_SIZE (outermode));
6023 if (WORDS_BIG_ENDIAN)
6024 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6025 if (BYTES_BIG_ENDIAN)
6026 final_offset += difference % UNITS_PER_WORD;
6029 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
6031 /* Propagate original regno. We don't have any way to specify
6032 the offset inside original regno, so do so only for lowpart.
6033 The information is used only by alias analysis that can not
6034 grog partial register anyway. */
6036 if (subreg_lowpart_offset (outermode, innermode) == byte)
6037 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
6038 return x;
6042 /* If we have a SUBREG of a register that we are replacing and we are
6043 replacing it with a MEM, make a new MEM and try replacing the
6044 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6045 or if we would be widening it. */
6047 if (MEM_P (op)
6048 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
6049 /* Allow splitting of volatile memory references in case we don't
6050 have instruction to move the whole thing. */
6051 && (! MEM_VOLATILE_P (op)
6052 || ! have_insn_for (SET, innermode))
6053 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
6054 return adjust_address_nv (op, outermode, byte);
6056 /* Handle complex values represented as CONCAT
6057 of real and imaginary part. */
6058 if (GET_CODE (op) == CONCAT)
6060 unsigned int part_size, final_offset;
6061 rtx part, res;
6063 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
6064 if (byte < part_size)
6066 part = XEXP (op, 0);
6067 final_offset = byte;
6069 else
6071 part = XEXP (op, 1);
6072 final_offset = byte - part_size;
6075 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
6076 return NULL_RTX;
6078 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
6079 if (res)
6080 return res;
6081 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
6082 return gen_rtx_SUBREG (outermode, part, final_offset);
6083 return NULL_RTX;
6086 /* A SUBREG resulting from a zero extension may fold to zero if
6087 it extracts higher bits that the ZERO_EXTEND's source bits. */
6088 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
6090 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
6091 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
6092 return CONST0_RTX (outermode);
6095 if (SCALAR_INT_MODE_P (outermode)
6096 && SCALAR_INT_MODE_P (innermode)
6097 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
6098 && byte == subreg_lowpart_offset (outermode, innermode))
6100 rtx tem = simplify_truncation (outermode, op, innermode);
6101 if (tem)
6102 return tem;
6105 return NULL_RTX;
6108 /* Make a SUBREG operation or equivalent if it folds. */
6111 simplify_gen_subreg (enum machine_mode outermode, rtx op,
6112 enum machine_mode innermode, unsigned int byte)
6114 rtx newx;
6116 newx = simplify_subreg (outermode, op, innermode, byte);
6117 if (newx)
6118 return newx;
6120 if (GET_CODE (op) == SUBREG
6121 || GET_CODE (op) == CONCAT
6122 || GET_MODE (op) == VOIDmode)
6123 return NULL_RTX;
6125 if (validate_subreg (outermode, innermode, op, byte))
6126 return gen_rtx_SUBREG (outermode, op, byte);
6128 return NULL_RTX;
6131 /* Simplify X, an rtx expression.
6133 Return the simplified expression or NULL if no simplifications
6134 were possible.
6136 This is the preferred entry point into the simplification routines;
6137 however, we still allow passes to call the more specific routines.
6139 Right now GCC has three (yes, three) major bodies of RTL simplification
6140 code that need to be unified.
6142 1. fold_rtx in cse.c. This code uses various CSE specific
6143 information to aid in RTL simplification.
6145 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6146 it uses combine specific information to aid in RTL
6147 simplification.
6149 3. The routines in this file.
6152 Long term we want to only have one body of simplification code; to
6153 get to that state I recommend the following steps:
6155 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6156 which are not pass dependent state into these routines.
6158 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6159 use this routine whenever possible.
6161 3. Allow for pass dependent state to be provided to these
6162 routines and add simplifications based on the pass dependent
6163 state. Remove code from cse.c & combine.c that becomes
6164 redundant/dead.
6166 It will take time, but ultimately the compiler will be easier to
6167 maintain and improve. It's totally silly that when we add a
6168 simplification that it needs to be added to 4 places (3 for RTL
6169 simplification and 1 for tree simplification. */
6172 simplify_rtx (const_rtx x)
6174 const enum rtx_code code = GET_CODE (x);
6175 const enum machine_mode mode = GET_MODE (x);
6177 switch (GET_RTX_CLASS (code))
6179 case RTX_UNARY:
6180 return simplify_unary_operation (code, mode,
6181 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6182 case RTX_COMM_ARITH:
6183 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6184 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6186 /* Fall through.... */
6188 case RTX_BIN_ARITH:
6189 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6191 case RTX_TERNARY:
6192 case RTX_BITFIELD_OPS:
6193 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6194 XEXP (x, 0), XEXP (x, 1),
6195 XEXP (x, 2));
6197 case RTX_COMPARE:
6198 case RTX_COMM_COMPARE:
6199 return simplify_relational_operation (code, mode,
6200 ((GET_MODE (XEXP (x, 0))
6201 != VOIDmode)
6202 ? GET_MODE (XEXP (x, 0))
6203 : GET_MODE (XEXP (x, 1))),
6204 XEXP (x, 0),
6205 XEXP (x, 1));
6207 case RTX_EXTRA:
6208 if (code == SUBREG)
6209 return simplify_subreg (mode, SUBREG_REG (x),
6210 GET_MODE (SUBREG_REG (x)),
6211 SUBREG_BYTE (x));
6212 break;
6214 case RTX_OBJ:
6215 if (code == LO_SUM)
6217 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6218 if (GET_CODE (XEXP (x, 0)) == HIGH
6219 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6220 return XEXP (x, 1);
6222 break;
6224 default:
6225 break;
6227 return NULL;