* testsuite/17_intro/static.cc: Ignore AIX TOC reload warnings.
[official-gcc.git] / gcc / simplify-rtx.c
blobb91ae1ade3f9b633a68b3286c62c1f5b14a3eb8e
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2013 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "varasm.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "flags.h"
32 #include "insn-config.h"
33 #include "recog.h"
34 #include "function.h"
35 #include "expr.h"
36 #include "diagnostic-core.h"
37 #include "ggc.h"
38 #include "target.h"
40 /* Simplification and canonicalization of RTL. */
42 /* Much code operates on (low, high) pairs; the low value is an
43 unsigned wide int, the high value a signed wide int. We
44 occasionally need to sign extend from low to high as if low were a
45 signed wide int. */
46 #define HWI_SIGN_EXTEND(low) \
47 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
49 static rtx neg_const_int (enum machine_mode, const_rtx);
50 static bool plus_minus_operand_p (const_rtx);
51 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
52 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
53 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
54 unsigned int);
55 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
56 rtx, rtx);
57 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
58 enum machine_mode, rtx, rtx);
59 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
60 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
61 rtx, rtx, rtx, rtx);
63 /* Negate a CONST_INT rtx, truncating (because a conversion from a
64 maximally negative number can overflow). */
65 static rtx
66 neg_const_int (enum machine_mode mode, const_rtx i)
68 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
71 /* Test whether expression, X, is an immediate constant that represents
72 the most significant bit of machine mode MODE. */
74 bool
75 mode_signbit_p (enum machine_mode mode, const_rtx x)
77 unsigned HOST_WIDE_INT val;
78 unsigned int width;
80 if (GET_MODE_CLASS (mode) != MODE_INT)
81 return false;
83 width = GET_MODE_PRECISION (mode);
84 if (width == 0)
85 return false;
87 if (width <= HOST_BITS_PER_WIDE_INT
88 && CONST_INT_P (x))
89 val = INTVAL (x);
90 else if (width <= HOST_BITS_PER_DOUBLE_INT
91 && CONST_DOUBLE_AS_INT_P (x)
92 && CONST_DOUBLE_LOW (x) == 0)
94 val = CONST_DOUBLE_HIGH (x);
95 width -= HOST_BITS_PER_WIDE_INT;
97 else
98 /* FIXME: We don't yet have a representation for wider modes. */
99 return false;
101 if (width < HOST_BITS_PER_WIDE_INT)
102 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
103 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
106 /* Test whether VAL is equal to the most significant bit of mode MODE
107 (after masking with the mode mask of MODE). Returns false if the
108 precision of MODE is too large to handle. */
110 bool
111 val_signbit_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
113 unsigned int width;
115 if (GET_MODE_CLASS (mode) != MODE_INT)
116 return false;
118 width = GET_MODE_PRECISION (mode);
119 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
120 return false;
122 val &= GET_MODE_MASK (mode);
123 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
126 /* Test whether the most significant bit of mode MODE is set in VAL.
127 Returns false if the precision of MODE is too large to handle. */
128 bool
129 val_signbit_known_set_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
131 unsigned int width;
133 if (GET_MODE_CLASS (mode) != MODE_INT)
134 return false;
136 width = GET_MODE_PRECISION (mode);
137 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
138 return false;
140 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
141 return val != 0;
144 /* Test whether the most significant bit of mode MODE is clear in VAL.
145 Returns false if the precision of MODE is too large to handle. */
146 bool
147 val_signbit_known_clear_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
149 unsigned int width;
151 if (GET_MODE_CLASS (mode) != MODE_INT)
152 return false;
154 width = GET_MODE_PRECISION (mode);
155 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
156 return false;
158 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
159 return val == 0;
162 /* Make a binary operation by properly ordering the operands and
163 seeing if the expression folds. */
166 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
167 rtx op1)
169 rtx tem;
171 /* If this simplifies, do it. */
172 tem = simplify_binary_operation (code, mode, op0, op1);
173 if (tem)
174 return tem;
176 /* Put complex operands first and constants second if commutative. */
177 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
178 && swap_commutative_operands_p (op0, op1))
179 tem = op0, op0 = op1, op1 = tem;
181 return gen_rtx_fmt_ee (code, mode, op0, op1);
184 /* If X is a MEM referencing the constant pool, return the real value.
185 Otherwise return X. */
187 avoid_constant_pool_reference (rtx x)
189 rtx c, tmp, addr;
190 enum machine_mode cmode;
191 HOST_WIDE_INT offset = 0;
193 switch (GET_CODE (x))
195 case MEM:
196 break;
198 case FLOAT_EXTEND:
199 /* Handle float extensions of constant pool references. */
200 tmp = XEXP (x, 0);
201 c = avoid_constant_pool_reference (tmp);
202 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
204 REAL_VALUE_TYPE d;
206 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
207 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
209 return x;
211 default:
212 return x;
215 if (GET_MODE (x) == BLKmode)
216 return x;
218 addr = XEXP (x, 0);
220 /* Call target hook to avoid the effects of -fpic etc.... */
221 addr = targetm.delegitimize_address (addr);
223 /* Split the address into a base and integer offset. */
224 if (GET_CODE (addr) == CONST
225 && GET_CODE (XEXP (addr, 0)) == PLUS
226 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
228 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
229 addr = XEXP (XEXP (addr, 0), 0);
232 if (GET_CODE (addr) == LO_SUM)
233 addr = XEXP (addr, 1);
235 /* If this is a constant pool reference, we can turn it into its
236 constant and hope that simplifications happen. */
237 if (GET_CODE (addr) == SYMBOL_REF
238 && CONSTANT_POOL_ADDRESS_P (addr))
240 c = get_pool_constant (addr);
241 cmode = get_pool_mode (addr);
243 /* If we're accessing the constant in a different mode than it was
244 originally stored, attempt to fix that up via subreg simplifications.
245 If that fails we have no choice but to return the original memory. */
246 if ((offset != 0 || cmode != GET_MODE (x))
247 && offset >= 0 && offset < GET_MODE_SIZE (cmode))
249 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
250 if (tem && CONSTANT_P (tem))
251 return tem;
253 else
254 return c;
257 return x;
260 /* Simplify a MEM based on its attributes. This is the default
261 delegitimize_address target hook, and it's recommended that every
262 overrider call it. */
265 delegitimize_mem_from_attrs (rtx x)
267 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
268 use their base addresses as equivalent. */
269 if (MEM_P (x)
270 && MEM_EXPR (x)
271 && MEM_OFFSET_KNOWN_P (x))
273 tree decl = MEM_EXPR (x);
274 enum machine_mode mode = GET_MODE (x);
275 HOST_WIDE_INT offset = 0;
277 switch (TREE_CODE (decl))
279 default:
280 decl = NULL;
281 break;
283 case VAR_DECL:
284 break;
286 case ARRAY_REF:
287 case ARRAY_RANGE_REF:
288 case COMPONENT_REF:
289 case BIT_FIELD_REF:
290 case REALPART_EXPR:
291 case IMAGPART_EXPR:
292 case VIEW_CONVERT_EXPR:
294 HOST_WIDE_INT bitsize, bitpos;
295 tree toffset;
296 int unsignedp, volatilep = 0;
298 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
299 &mode, &unsignedp, &volatilep, false);
300 if (bitsize != GET_MODE_BITSIZE (mode)
301 || (bitpos % BITS_PER_UNIT)
302 || (toffset && !tree_fits_shwi_p (toffset)))
303 decl = NULL;
304 else
306 offset += bitpos / BITS_PER_UNIT;
307 if (toffset)
308 offset += TREE_INT_CST_LOW (toffset);
310 break;
314 if (decl
315 && mode == GET_MODE (x)
316 && TREE_CODE (decl) == VAR_DECL
317 && (TREE_STATIC (decl)
318 || DECL_THREAD_LOCAL_P (decl))
319 && DECL_RTL_SET_P (decl)
320 && MEM_P (DECL_RTL (decl)))
322 rtx newx;
324 offset += MEM_OFFSET (x);
326 newx = DECL_RTL (decl);
328 if (MEM_P (newx))
330 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
332 /* Avoid creating a new MEM needlessly if we already had
333 the same address. We do if there's no OFFSET and the
334 old address X is identical to NEWX, or if X is of the
335 form (plus NEWX OFFSET), or the NEWX is of the form
336 (plus Y (const_int Z)) and X is that with the offset
337 added: (plus Y (const_int Z+OFFSET)). */
338 if (!((offset == 0
339 || (GET_CODE (o) == PLUS
340 && GET_CODE (XEXP (o, 1)) == CONST_INT
341 && (offset == INTVAL (XEXP (o, 1))
342 || (GET_CODE (n) == PLUS
343 && GET_CODE (XEXP (n, 1)) == CONST_INT
344 && (INTVAL (XEXP (n, 1)) + offset
345 == INTVAL (XEXP (o, 1)))
346 && (n = XEXP (n, 0))))
347 && (o = XEXP (o, 0))))
348 && rtx_equal_p (o, n)))
349 x = adjust_address_nv (newx, mode, offset);
351 else if (GET_MODE (x) == GET_MODE (newx)
352 && offset == 0)
353 x = newx;
357 return x;
360 /* Make a unary operation by first seeing if it folds and otherwise making
361 the specified operation. */
364 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
365 enum machine_mode op_mode)
367 rtx tem;
369 /* If this simplifies, use it. */
370 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
371 return tem;
373 return gen_rtx_fmt_e (code, mode, op);
376 /* Likewise for ternary operations. */
379 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
380 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
382 rtx tem;
384 /* If this simplifies, use it. */
385 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
386 op0, op1, op2)))
387 return tem;
389 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
392 /* Likewise, for relational operations.
393 CMP_MODE specifies mode comparison is done in. */
396 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
397 enum machine_mode cmp_mode, rtx op0, rtx op1)
399 rtx tem;
401 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
402 op0, op1)))
403 return tem;
405 return gen_rtx_fmt_ee (code, mode, op0, op1);
408 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
409 and simplify the result. If FN is non-NULL, call this callback on each
410 X, if it returns non-NULL, replace X with its return value and simplify the
411 result. */
414 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
415 rtx (*fn) (rtx, const_rtx, void *), void *data)
417 enum rtx_code code = GET_CODE (x);
418 enum machine_mode mode = GET_MODE (x);
419 enum machine_mode op_mode;
420 const char *fmt;
421 rtx op0, op1, op2, newx, op;
422 rtvec vec, newvec;
423 int i, j;
425 if (__builtin_expect (fn != NULL, 0))
427 newx = fn (x, old_rtx, data);
428 if (newx)
429 return newx;
431 else if (rtx_equal_p (x, old_rtx))
432 return copy_rtx ((rtx) data);
434 switch (GET_RTX_CLASS (code))
436 case RTX_UNARY:
437 op0 = XEXP (x, 0);
438 op_mode = GET_MODE (op0);
439 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
440 if (op0 == XEXP (x, 0))
441 return x;
442 return simplify_gen_unary (code, mode, op0, op_mode);
444 case RTX_BIN_ARITH:
445 case RTX_COMM_ARITH:
446 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
447 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
448 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
449 return x;
450 return simplify_gen_binary (code, mode, op0, op1);
452 case RTX_COMPARE:
453 case RTX_COMM_COMPARE:
454 op0 = XEXP (x, 0);
455 op1 = XEXP (x, 1);
456 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
457 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
458 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
459 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
460 return x;
461 return simplify_gen_relational (code, mode, op_mode, op0, op1);
463 case RTX_TERNARY:
464 case RTX_BITFIELD_OPS:
465 op0 = XEXP (x, 0);
466 op_mode = GET_MODE (op0);
467 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
468 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
469 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
470 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
471 return x;
472 if (op_mode == VOIDmode)
473 op_mode = GET_MODE (op0);
474 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
476 case RTX_EXTRA:
477 if (code == SUBREG)
479 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
480 if (op0 == SUBREG_REG (x))
481 return x;
482 op0 = simplify_gen_subreg (GET_MODE (x), op0,
483 GET_MODE (SUBREG_REG (x)),
484 SUBREG_BYTE (x));
485 return op0 ? op0 : x;
487 break;
489 case RTX_OBJ:
490 if (code == MEM)
492 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
493 if (op0 == XEXP (x, 0))
494 return x;
495 return replace_equiv_address_nv (x, op0);
497 else if (code == LO_SUM)
499 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
500 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
502 /* (lo_sum (high x) x) -> x */
503 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
504 return op1;
506 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
507 return x;
508 return gen_rtx_LO_SUM (mode, op0, op1);
510 break;
512 default:
513 break;
516 newx = x;
517 fmt = GET_RTX_FORMAT (code);
518 for (i = 0; fmt[i]; i++)
519 switch (fmt[i])
521 case 'E':
522 vec = XVEC (x, i);
523 newvec = XVEC (newx, i);
524 for (j = 0; j < GET_NUM_ELEM (vec); j++)
526 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
527 old_rtx, fn, data);
528 if (op != RTVEC_ELT (vec, j))
530 if (newvec == vec)
532 newvec = shallow_copy_rtvec (vec);
533 if (x == newx)
534 newx = shallow_copy_rtx (x);
535 XVEC (newx, i) = newvec;
537 RTVEC_ELT (newvec, j) = op;
540 break;
542 case 'e':
543 if (XEXP (x, i))
545 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
546 if (op != XEXP (x, i))
548 if (x == newx)
549 newx = shallow_copy_rtx (x);
550 XEXP (newx, i) = op;
553 break;
555 return newx;
558 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
559 resulting RTX. Return a new RTX which is as simplified as possible. */
562 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
564 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
567 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
568 Only handle cases where the truncated value is inherently an rvalue.
570 RTL provides two ways of truncating a value:
572 1. a lowpart subreg. This form is only a truncation when both
573 the outer and inner modes (here MODE and OP_MODE respectively)
574 are scalar integers, and only then when the subreg is used as
575 an rvalue.
577 It is only valid to form such truncating subregs if the
578 truncation requires no action by the target. The onus for
579 proving this is on the creator of the subreg -- e.g. the
580 caller to simplify_subreg or simplify_gen_subreg -- and typically
581 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
583 2. a TRUNCATE. This form handles both scalar and compound integers.
585 The first form is preferred where valid. However, the TRUNCATE
586 handling in simplify_unary_operation turns the second form into the
587 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
588 so it is generally safe to form rvalue truncations using:
590 simplify_gen_unary (TRUNCATE, ...)
592 and leave simplify_unary_operation to work out which representation
593 should be used.
595 Because of the proof requirements on (1), simplify_truncation must
596 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
597 regardless of whether the outer truncation came from a SUBREG or a
598 TRUNCATE. For example, if the caller has proven that an SImode
599 truncation of:
601 (and:DI X Y)
603 is a no-op and can be represented as a subreg, it does not follow
604 that SImode truncations of X and Y are also no-ops. On a target
605 like 64-bit MIPS that requires SImode values to be stored in
606 sign-extended form, an SImode truncation of:
608 (and:DI (reg:DI X) (const_int 63))
610 is trivially a no-op because only the lower 6 bits can be set.
611 However, X is still an arbitrary 64-bit number and so we cannot
612 assume that truncating it too is a no-op. */
614 static rtx
615 simplify_truncation (enum machine_mode mode, rtx op,
616 enum machine_mode op_mode)
618 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
619 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
620 gcc_assert (precision <= op_precision);
622 /* Optimize truncations of zero and sign extended values. */
623 if (GET_CODE (op) == ZERO_EXTEND
624 || GET_CODE (op) == SIGN_EXTEND)
626 /* There are three possibilities. If MODE is the same as the
627 origmode, we can omit both the extension and the subreg.
628 If MODE is not larger than the origmode, we can apply the
629 truncation without the extension. Finally, if the outermode
630 is larger than the origmode, we can just extend to the appropriate
631 mode. */
632 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
633 if (mode == origmode)
634 return XEXP (op, 0);
635 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
636 return simplify_gen_unary (TRUNCATE, mode,
637 XEXP (op, 0), origmode);
638 else
639 return simplify_gen_unary (GET_CODE (op), mode,
640 XEXP (op, 0), origmode);
643 /* Simplify (truncate:SI (op:DI (x:DI) (y:DI)))
644 to (op:SI (truncate:SI (x:DI)) (truncate:SI (x:DI))). */
645 if (GET_CODE (op) == PLUS
646 || GET_CODE (op) == MINUS
647 || GET_CODE (op) == MULT)
649 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
650 if (op0)
652 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
653 if (op1)
654 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
658 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
659 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
660 the outer subreg is effectively a truncation to the original mode. */
661 if ((GET_CODE (op) == LSHIFTRT
662 || GET_CODE (op) == ASHIFTRT)
663 /* Ensure that OP_MODE is at least twice as wide as MODE
664 to avoid the possibility that an outer LSHIFTRT shifts by more
665 than the sign extension's sign_bit_copies and introduces zeros
666 into the high bits of the result. */
667 && 2 * precision <= op_precision
668 && CONST_INT_P (XEXP (op, 1))
669 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
670 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
671 && UINTVAL (XEXP (op, 1)) < precision)
672 return simplify_gen_binary (ASHIFTRT, mode,
673 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
675 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
676 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
677 the outer subreg is effectively a truncation to the original mode. */
678 if ((GET_CODE (op) == LSHIFTRT
679 || GET_CODE (op) == ASHIFTRT)
680 && CONST_INT_P (XEXP (op, 1))
681 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
682 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
683 && UINTVAL (XEXP (op, 1)) < precision)
684 return simplify_gen_binary (LSHIFTRT, mode,
685 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
687 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
688 to (ashift:QI (x:QI) C), where C is a suitable small constant and
689 the outer subreg is effectively a truncation to the original mode. */
690 if (GET_CODE (op) == ASHIFT
691 && CONST_INT_P (XEXP (op, 1))
692 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
693 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
694 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
695 && UINTVAL (XEXP (op, 1)) < precision)
696 return simplify_gen_binary (ASHIFT, mode,
697 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
699 /* Recognize a word extraction from a multi-word subreg. */
700 if ((GET_CODE (op) == LSHIFTRT
701 || GET_CODE (op) == ASHIFTRT)
702 && SCALAR_INT_MODE_P (mode)
703 && SCALAR_INT_MODE_P (op_mode)
704 && precision >= BITS_PER_WORD
705 && 2 * precision <= op_precision
706 && CONST_INT_P (XEXP (op, 1))
707 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
708 && UINTVAL (XEXP (op, 1)) < op_precision)
710 int byte = subreg_lowpart_offset (mode, op_mode);
711 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
712 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
713 (WORDS_BIG_ENDIAN
714 ? byte - shifted_bytes
715 : byte + shifted_bytes));
718 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
719 and try replacing the TRUNCATE and shift with it. Don't do this
720 if the MEM has a mode-dependent address. */
721 if ((GET_CODE (op) == LSHIFTRT
722 || GET_CODE (op) == ASHIFTRT)
723 && SCALAR_INT_MODE_P (op_mode)
724 && MEM_P (XEXP (op, 0))
725 && CONST_INT_P (XEXP (op, 1))
726 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
727 && INTVAL (XEXP (op, 1)) > 0
728 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
729 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
730 MEM_ADDR_SPACE (XEXP (op, 0)))
731 && ! MEM_VOLATILE_P (XEXP (op, 0))
732 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
733 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
735 int byte = subreg_lowpart_offset (mode, op_mode);
736 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
737 return adjust_address_nv (XEXP (op, 0), mode,
738 (WORDS_BIG_ENDIAN
739 ? byte - shifted_bytes
740 : byte + shifted_bytes));
743 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
744 (OP:SI foo:SI) if OP is NEG or ABS. */
745 if ((GET_CODE (op) == ABS
746 || GET_CODE (op) == NEG)
747 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
748 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
749 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
750 return simplify_gen_unary (GET_CODE (op), mode,
751 XEXP (XEXP (op, 0), 0), mode);
753 /* (truncate:A (subreg:B (truncate:C X) 0)) is
754 (truncate:A X). */
755 if (GET_CODE (op) == SUBREG
756 && SCALAR_INT_MODE_P (mode)
757 && SCALAR_INT_MODE_P (op_mode)
758 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
759 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
760 && subreg_lowpart_p (op))
762 rtx inner = XEXP (SUBREG_REG (op), 0);
763 if (GET_MODE_PRECISION (mode)
764 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
765 return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
766 else
767 /* If subreg above is paradoxical and C is narrower
768 than A, return (subreg:A (truncate:C X) 0). */
769 return simplify_gen_subreg (mode, SUBREG_REG (op),
770 GET_MODE (SUBREG_REG (op)), 0);
773 /* (truncate:A (truncate:B X)) is (truncate:A X). */
774 if (GET_CODE (op) == TRUNCATE)
775 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
776 GET_MODE (XEXP (op, 0)));
778 return NULL_RTX;
781 /* Try to simplify a unary operation CODE whose output mode is to be
782 MODE with input operand OP whose mode was originally OP_MODE.
783 Return zero if no simplification can be made. */
785 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
786 rtx op, enum machine_mode op_mode)
788 rtx trueop, tem;
790 trueop = avoid_constant_pool_reference (op);
792 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
793 if (tem)
794 return tem;
796 return simplify_unary_operation_1 (code, mode, op);
799 /* Perform some simplifications we can do even if the operands
800 aren't constant. */
801 static rtx
802 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
804 enum rtx_code reversed;
805 rtx temp;
807 switch (code)
809 case NOT:
810 /* (not (not X)) == X. */
811 if (GET_CODE (op) == NOT)
812 return XEXP (op, 0);
814 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
815 comparison is all ones. */
816 if (COMPARISON_P (op)
817 && (mode == BImode || STORE_FLAG_VALUE == -1)
818 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
819 return simplify_gen_relational (reversed, mode, VOIDmode,
820 XEXP (op, 0), XEXP (op, 1));
822 /* (not (plus X -1)) can become (neg X). */
823 if (GET_CODE (op) == PLUS
824 && XEXP (op, 1) == constm1_rtx)
825 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
827 /* Similarly, (not (neg X)) is (plus X -1). */
828 if (GET_CODE (op) == NEG)
829 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
830 CONSTM1_RTX (mode));
832 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
833 if (GET_CODE (op) == XOR
834 && CONST_INT_P (XEXP (op, 1))
835 && (temp = simplify_unary_operation (NOT, mode,
836 XEXP (op, 1), mode)) != 0)
837 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
839 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
840 if (GET_CODE (op) == PLUS
841 && CONST_INT_P (XEXP (op, 1))
842 && mode_signbit_p (mode, XEXP (op, 1))
843 && (temp = simplify_unary_operation (NOT, mode,
844 XEXP (op, 1), mode)) != 0)
845 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
848 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
849 operands other than 1, but that is not valid. We could do a
850 similar simplification for (not (lshiftrt C X)) where C is
851 just the sign bit, but this doesn't seem common enough to
852 bother with. */
853 if (GET_CODE (op) == ASHIFT
854 && XEXP (op, 0) == const1_rtx)
856 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
857 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
860 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
861 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
862 so we can perform the above simplification. */
863 if (STORE_FLAG_VALUE == -1
864 && GET_CODE (op) == ASHIFTRT
865 && GET_CODE (XEXP (op, 1))
866 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
867 return simplify_gen_relational (GE, mode, VOIDmode,
868 XEXP (op, 0), const0_rtx);
871 if (GET_CODE (op) == SUBREG
872 && subreg_lowpart_p (op)
873 && (GET_MODE_SIZE (GET_MODE (op))
874 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
875 && GET_CODE (SUBREG_REG (op)) == ASHIFT
876 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
878 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
879 rtx x;
881 x = gen_rtx_ROTATE (inner_mode,
882 simplify_gen_unary (NOT, inner_mode, const1_rtx,
883 inner_mode),
884 XEXP (SUBREG_REG (op), 1));
885 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
886 if (temp)
887 return temp;
890 /* Apply De Morgan's laws to reduce number of patterns for machines
891 with negating logical insns (and-not, nand, etc.). If result has
892 only one NOT, put it first, since that is how the patterns are
893 coded. */
894 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
896 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
897 enum machine_mode op_mode;
899 op_mode = GET_MODE (in1);
900 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
902 op_mode = GET_MODE (in2);
903 if (op_mode == VOIDmode)
904 op_mode = mode;
905 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
907 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
909 rtx tem = in2;
910 in2 = in1; in1 = tem;
913 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
914 mode, in1, in2);
917 /* (not (bswap x)) -> (bswap (not x)). */
918 if (GET_CODE (op) == BSWAP)
920 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
921 return simplify_gen_unary (BSWAP, mode, x, mode);
923 break;
925 case NEG:
926 /* (neg (neg X)) == X. */
927 if (GET_CODE (op) == NEG)
928 return XEXP (op, 0);
930 /* (neg (plus X 1)) can become (not X). */
931 if (GET_CODE (op) == PLUS
932 && XEXP (op, 1) == const1_rtx)
933 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
935 /* Similarly, (neg (not X)) is (plus X 1). */
936 if (GET_CODE (op) == NOT)
937 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
938 CONST1_RTX (mode));
940 /* (neg (minus X Y)) can become (minus Y X). This transformation
941 isn't safe for modes with signed zeros, since if X and Y are
942 both +0, (minus Y X) is the same as (minus X Y). If the
943 rounding mode is towards +infinity (or -infinity) then the two
944 expressions will be rounded differently. */
945 if (GET_CODE (op) == MINUS
946 && !HONOR_SIGNED_ZEROS (mode)
947 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
948 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
950 if (GET_CODE (op) == PLUS
951 && !HONOR_SIGNED_ZEROS (mode)
952 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
954 /* (neg (plus A C)) is simplified to (minus -C A). */
955 if (CONST_SCALAR_INT_P (XEXP (op, 1))
956 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
958 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
959 if (temp)
960 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
963 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
964 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
965 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
968 /* (neg (mult A B)) becomes (mult A (neg B)).
969 This works even for floating-point values. */
970 if (GET_CODE (op) == MULT
971 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
973 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
974 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
977 /* NEG commutes with ASHIFT since it is multiplication. Only do
978 this if we can then eliminate the NEG (e.g., if the operand
979 is a constant). */
980 if (GET_CODE (op) == ASHIFT)
982 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
983 if (temp)
984 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
987 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
988 C is equal to the width of MODE minus 1. */
989 if (GET_CODE (op) == ASHIFTRT
990 && CONST_INT_P (XEXP (op, 1))
991 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
992 return simplify_gen_binary (LSHIFTRT, mode,
993 XEXP (op, 0), XEXP (op, 1));
995 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
996 C is equal to the width of MODE minus 1. */
997 if (GET_CODE (op) == LSHIFTRT
998 && CONST_INT_P (XEXP (op, 1))
999 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1000 return simplify_gen_binary (ASHIFTRT, mode,
1001 XEXP (op, 0), XEXP (op, 1));
1003 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1004 if (GET_CODE (op) == XOR
1005 && XEXP (op, 1) == const1_rtx
1006 && nonzero_bits (XEXP (op, 0), mode) == 1)
1007 return plus_constant (mode, XEXP (op, 0), -1);
1009 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1010 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1011 if (GET_CODE (op) == LT
1012 && XEXP (op, 1) == const0_rtx
1013 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1015 enum machine_mode inner = GET_MODE (XEXP (op, 0));
1016 int isize = GET_MODE_PRECISION (inner);
1017 if (STORE_FLAG_VALUE == 1)
1019 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1020 GEN_INT (isize - 1));
1021 if (mode == inner)
1022 return temp;
1023 if (GET_MODE_PRECISION (mode) > isize)
1024 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1025 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1027 else if (STORE_FLAG_VALUE == -1)
1029 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1030 GEN_INT (isize - 1));
1031 if (mode == inner)
1032 return temp;
1033 if (GET_MODE_PRECISION (mode) > isize)
1034 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1035 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1038 break;
1040 case TRUNCATE:
1041 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1042 with the umulXi3_highpart patterns. */
1043 if (GET_CODE (op) == LSHIFTRT
1044 && GET_CODE (XEXP (op, 0)) == MULT)
1045 break;
1047 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1049 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1051 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1052 if (temp)
1053 return temp;
1055 /* We can't handle truncation to a partial integer mode here
1056 because we don't know the real bitsize of the partial
1057 integer mode. */
1058 break;
1061 if (GET_MODE (op) != VOIDmode)
1063 temp = simplify_truncation (mode, op, GET_MODE (op));
1064 if (temp)
1065 return temp;
1068 /* If we know that the value is already truncated, we can
1069 replace the TRUNCATE with a SUBREG. */
1070 if (GET_MODE_NUNITS (mode) == 1
1071 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1072 || truncated_to_mode (mode, op)))
1074 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1075 if (temp)
1076 return temp;
1079 /* A truncate of a comparison can be replaced with a subreg if
1080 STORE_FLAG_VALUE permits. This is like the previous test,
1081 but it works even if the comparison is done in a mode larger
1082 than HOST_BITS_PER_WIDE_INT. */
1083 if (HWI_COMPUTABLE_MODE_P (mode)
1084 && COMPARISON_P (op)
1085 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1087 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1088 if (temp)
1089 return temp;
1092 /* A truncate of a memory is just loading the low part of the memory
1093 if we are not changing the meaning of the address. */
1094 if (GET_CODE (op) == MEM
1095 && !VECTOR_MODE_P (mode)
1096 && !MEM_VOLATILE_P (op)
1097 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1099 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1100 if (temp)
1101 return temp;
1104 break;
1106 case FLOAT_TRUNCATE:
1107 if (DECIMAL_FLOAT_MODE_P (mode))
1108 break;
1110 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1111 if (GET_CODE (op) == FLOAT_EXTEND
1112 && GET_MODE (XEXP (op, 0)) == mode)
1113 return XEXP (op, 0);
1115 /* (float_truncate:SF (float_truncate:DF foo:XF))
1116 = (float_truncate:SF foo:XF).
1117 This may eliminate double rounding, so it is unsafe.
1119 (float_truncate:SF (float_extend:XF foo:DF))
1120 = (float_truncate:SF foo:DF).
1122 (float_truncate:DF (float_extend:XF foo:SF))
1123 = (float_extend:SF foo:DF). */
1124 if ((GET_CODE (op) == FLOAT_TRUNCATE
1125 && flag_unsafe_math_optimizations)
1126 || GET_CODE (op) == FLOAT_EXTEND)
1127 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1128 0)))
1129 > GET_MODE_SIZE (mode)
1130 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1131 mode,
1132 XEXP (op, 0), mode);
1134 /* (float_truncate (float x)) is (float x) */
1135 if (GET_CODE (op) == FLOAT
1136 && (flag_unsafe_math_optimizations
1137 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
1138 && ((unsigned)significand_size (GET_MODE (op))
1139 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1140 - num_sign_bit_copies (XEXP (op, 0),
1141 GET_MODE (XEXP (op, 0))))))))
1142 return simplify_gen_unary (FLOAT, mode,
1143 XEXP (op, 0),
1144 GET_MODE (XEXP (op, 0)));
1146 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1147 (OP:SF foo:SF) if OP is NEG or ABS. */
1148 if ((GET_CODE (op) == ABS
1149 || GET_CODE (op) == NEG)
1150 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1151 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1152 return simplify_gen_unary (GET_CODE (op), mode,
1153 XEXP (XEXP (op, 0), 0), mode);
1155 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1156 is (float_truncate:SF x). */
1157 if (GET_CODE (op) == SUBREG
1158 && subreg_lowpart_p (op)
1159 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1160 return SUBREG_REG (op);
1161 break;
1163 case FLOAT_EXTEND:
1164 if (DECIMAL_FLOAT_MODE_P (mode))
1165 break;
1167 /* (float_extend (float_extend x)) is (float_extend x)
1169 (float_extend (float x)) is (float x) assuming that double
1170 rounding can't happen.
1172 if (GET_CODE (op) == FLOAT_EXTEND
1173 || (GET_CODE (op) == FLOAT
1174 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1175 && ((unsigned)significand_size (GET_MODE (op))
1176 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1177 - num_sign_bit_copies (XEXP (op, 0),
1178 GET_MODE (XEXP (op, 0)))))))
1179 return simplify_gen_unary (GET_CODE (op), mode,
1180 XEXP (op, 0),
1181 GET_MODE (XEXP (op, 0)));
1183 break;
1185 case ABS:
1186 /* (abs (neg <foo>)) -> (abs <foo>) */
1187 if (GET_CODE (op) == NEG)
1188 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1189 GET_MODE (XEXP (op, 0)));
1191 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1192 do nothing. */
1193 if (GET_MODE (op) == VOIDmode)
1194 break;
1196 /* If operand is something known to be positive, ignore the ABS. */
1197 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1198 || val_signbit_known_clear_p (GET_MODE (op),
1199 nonzero_bits (op, GET_MODE (op))))
1200 return op;
1202 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1203 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1204 return gen_rtx_NEG (mode, op);
1206 break;
1208 case FFS:
1209 /* (ffs (*_extend <X>)) = (ffs <X>) */
1210 if (GET_CODE (op) == SIGN_EXTEND
1211 || GET_CODE (op) == ZERO_EXTEND)
1212 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1213 GET_MODE (XEXP (op, 0)));
1214 break;
1216 case POPCOUNT:
1217 switch (GET_CODE (op))
1219 case BSWAP:
1220 case ZERO_EXTEND:
1221 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1222 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1223 GET_MODE (XEXP (op, 0)));
1225 case ROTATE:
1226 case ROTATERT:
1227 /* Rotations don't affect popcount. */
1228 if (!side_effects_p (XEXP (op, 1)))
1229 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1230 GET_MODE (XEXP (op, 0)));
1231 break;
1233 default:
1234 break;
1236 break;
1238 case PARITY:
1239 switch (GET_CODE (op))
1241 case NOT:
1242 case BSWAP:
1243 case ZERO_EXTEND:
1244 case SIGN_EXTEND:
1245 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1246 GET_MODE (XEXP (op, 0)));
1248 case ROTATE:
1249 case ROTATERT:
1250 /* Rotations don't affect parity. */
1251 if (!side_effects_p (XEXP (op, 1)))
1252 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1253 GET_MODE (XEXP (op, 0)));
1254 break;
1256 default:
1257 break;
1259 break;
1261 case BSWAP:
1262 /* (bswap (bswap x)) -> x. */
1263 if (GET_CODE (op) == BSWAP)
1264 return XEXP (op, 0);
1265 break;
1267 case FLOAT:
1268 /* (float (sign_extend <X>)) = (float <X>). */
1269 if (GET_CODE (op) == SIGN_EXTEND)
1270 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1271 GET_MODE (XEXP (op, 0)));
1272 break;
1274 case SIGN_EXTEND:
1275 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1276 becomes just the MINUS if its mode is MODE. This allows
1277 folding switch statements on machines using casesi (such as
1278 the VAX). */
1279 if (GET_CODE (op) == TRUNCATE
1280 && GET_MODE (XEXP (op, 0)) == mode
1281 && GET_CODE (XEXP (op, 0)) == MINUS
1282 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1283 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1284 return XEXP (op, 0);
1286 /* Extending a widening multiplication should be canonicalized to
1287 a wider widening multiplication. */
1288 if (GET_CODE (op) == MULT)
1290 rtx lhs = XEXP (op, 0);
1291 rtx rhs = XEXP (op, 1);
1292 enum rtx_code lcode = GET_CODE (lhs);
1293 enum rtx_code rcode = GET_CODE (rhs);
1295 /* Widening multiplies usually extend both operands, but sometimes
1296 they use a shift to extract a portion of a register. */
1297 if ((lcode == SIGN_EXTEND
1298 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1299 && (rcode == SIGN_EXTEND
1300 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1302 enum machine_mode lmode = GET_MODE (lhs);
1303 enum machine_mode rmode = GET_MODE (rhs);
1304 int bits;
1306 if (lcode == ASHIFTRT)
1307 /* Number of bits not shifted off the end. */
1308 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1309 else /* lcode == SIGN_EXTEND */
1310 /* Size of inner mode. */
1311 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1313 if (rcode == ASHIFTRT)
1314 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1315 else /* rcode == SIGN_EXTEND */
1316 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1318 /* We can only widen multiplies if the result is mathematiclly
1319 equivalent. I.e. if overflow was impossible. */
1320 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1321 return simplify_gen_binary
1322 (MULT, mode,
1323 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1324 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1328 /* Check for a sign extension of a subreg of a promoted
1329 variable, where the promotion is sign-extended, and the
1330 target mode is the same as the variable's promotion. */
1331 if (GET_CODE (op) == SUBREG
1332 && SUBREG_PROMOTED_VAR_P (op)
1333 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1334 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1336 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1337 if (temp)
1338 return temp;
1341 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1342 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1343 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1345 gcc_assert (GET_MODE_BITSIZE (mode)
1346 > GET_MODE_BITSIZE (GET_MODE (op)));
1347 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1348 GET_MODE (XEXP (op, 0)));
1351 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1352 is (sign_extend:M (subreg:O <X>)) if there is mode with
1353 GET_MODE_BITSIZE (N) - I bits.
1354 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1355 is similarly (zero_extend:M (subreg:O <X>)). */
1356 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1357 && GET_CODE (XEXP (op, 0)) == ASHIFT
1358 && CONST_INT_P (XEXP (op, 1))
1359 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1360 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1362 enum machine_mode tmode
1363 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1364 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1365 gcc_assert (GET_MODE_BITSIZE (mode)
1366 > GET_MODE_BITSIZE (GET_MODE (op)));
1367 if (tmode != BLKmode)
1369 rtx inner =
1370 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1371 if (inner)
1372 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1373 ? SIGN_EXTEND : ZERO_EXTEND,
1374 mode, inner, tmode);
1378 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1379 /* As we do not know which address space the pointer is referring to,
1380 we can do this only if the target does not support different pointer
1381 or address modes depending on the address space. */
1382 if (target_default_pointer_address_modes_p ()
1383 && ! POINTERS_EXTEND_UNSIGNED
1384 && mode == Pmode && GET_MODE (op) == ptr_mode
1385 && (CONSTANT_P (op)
1386 || (GET_CODE (op) == SUBREG
1387 && REG_P (SUBREG_REG (op))
1388 && REG_POINTER (SUBREG_REG (op))
1389 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1390 return convert_memory_address (Pmode, op);
1391 #endif
1392 break;
1394 case ZERO_EXTEND:
1395 /* Check for a zero extension of a subreg of a promoted
1396 variable, where the promotion is zero-extended, and the
1397 target mode is the same as the variable's promotion. */
1398 if (GET_CODE (op) == SUBREG
1399 && SUBREG_PROMOTED_VAR_P (op)
1400 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1401 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1403 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1404 if (temp)
1405 return temp;
1408 /* Extending a widening multiplication should be canonicalized to
1409 a wider widening multiplication. */
1410 if (GET_CODE (op) == MULT)
1412 rtx lhs = XEXP (op, 0);
1413 rtx rhs = XEXP (op, 1);
1414 enum rtx_code lcode = GET_CODE (lhs);
1415 enum rtx_code rcode = GET_CODE (rhs);
1417 /* Widening multiplies usually extend both operands, but sometimes
1418 they use a shift to extract a portion of a register. */
1419 if ((lcode == ZERO_EXTEND
1420 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1421 && (rcode == ZERO_EXTEND
1422 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1424 enum machine_mode lmode = GET_MODE (lhs);
1425 enum machine_mode rmode = GET_MODE (rhs);
1426 int bits;
1428 if (lcode == LSHIFTRT)
1429 /* Number of bits not shifted off the end. */
1430 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1431 else /* lcode == ZERO_EXTEND */
1432 /* Size of inner mode. */
1433 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1435 if (rcode == LSHIFTRT)
1436 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1437 else /* rcode == ZERO_EXTEND */
1438 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1440 /* We can only widen multiplies if the result is mathematiclly
1441 equivalent. I.e. if overflow was impossible. */
1442 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1443 return simplify_gen_binary
1444 (MULT, mode,
1445 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1446 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1450 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1451 if (GET_CODE (op) == ZERO_EXTEND)
1452 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1453 GET_MODE (XEXP (op, 0)));
1455 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1456 is (zero_extend:M (subreg:O <X>)) if there is mode with
1457 GET_MODE_BITSIZE (N) - I bits. */
1458 if (GET_CODE (op) == LSHIFTRT
1459 && GET_CODE (XEXP (op, 0)) == ASHIFT
1460 && CONST_INT_P (XEXP (op, 1))
1461 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1462 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1464 enum machine_mode tmode
1465 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1466 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1467 if (tmode != BLKmode)
1469 rtx inner =
1470 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1471 if (inner)
1472 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1476 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1477 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1478 of mode N. E.g.
1479 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1480 (and:SI (reg:SI) (const_int 63)). */
1481 if (GET_CODE (op) == SUBREG
1482 && GET_MODE_PRECISION (GET_MODE (op))
1483 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1484 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1485 <= HOST_BITS_PER_WIDE_INT
1486 && GET_MODE_PRECISION (mode)
1487 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1488 && subreg_lowpart_p (op)
1489 && (nonzero_bits (SUBREG_REG (op), GET_MODE (SUBREG_REG (op)))
1490 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1492 if (GET_MODE_PRECISION (mode)
1493 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
1494 return SUBREG_REG (op);
1495 return simplify_gen_unary (ZERO_EXTEND, mode, SUBREG_REG (op),
1496 GET_MODE (SUBREG_REG (op)));
1499 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1500 /* As we do not know which address space the pointer is referring to,
1501 we can do this only if the target does not support different pointer
1502 or address modes depending on the address space. */
1503 if (target_default_pointer_address_modes_p ()
1504 && POINTERS_EXTEND_UNSIGNED > 0
1505 && mode == Pmode && GET_MODE (op) == ptr_mode
1506 && (CONSTANT_P (op)
1507 || (GET_CODE (op) == SUBREG
1508 && REG_P (SUBREG_REG (op))
1509 && REG_POINTER (SUBREG_REG (op))
1510 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1511 return convert_memory_address (Pmode, op);
1512 #endif
1513 break;
1515 default:
1516 break;
1519 return 0;
1522 /* Try to compute the value of a unary operation CODE whose output mode is to
1523 be MODE with input operand OP whose mode was originally OP_MODE.
1524 Return zero if the value cannot be computed. */
1526 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1527 rtx op, enum machine_mode op_mode)
1529 unsigned int width = GET_MODE_PRECISION (mode);
1530 unsigned int op_width = GET_MODE_PRECISION (op_mode);
1532 if (code == VEC_DUPLICATE)
1534 gcc_assert (VECTOR_MODE_P (mode));
1535 if (GET_MODE (op) != VOIDmode)
1537 if (!VECTOR_MODE_P (GET_MODE (op)))
1538 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1539 else
1540 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1541 (GET_MODE (op)));
1543 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1544 || GET_CODE (op) == CONST_VECTOR)
1546 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1547 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1548 rtvec v = rtvec_alloc (n_elts);
1549 unsigned int i;
1551 if (GET_CODE (op) != CONST_VECTOR)
1552 for (i = 0; i < n_elts; i++)
1553 RTVEC_ELT (v, i) = op;
1554 else
1556 enum machine_mode inmode = GET_MODE (op);
1557 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1558 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1560 gcc_assert (in_n_elts < n_elts);
1561 gcc_assert ((n_elts % in_n_elts) == 0);
1562 for (i = 0; i < n_elts; i++)
1563 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1565 return gen_rtx_CONST_VECTOR (mode, v);
1569 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1571 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1572 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1573 enum machine_mode opmode = GET_MODE (op);
1574 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1575 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1576 rtvec v = rtvec_alloc (n_elts);
1577 unsigned int i;
1579 gcc_assert (op_n_elts == n_elts);
1580 for (i = 0; i < n_elts; i++)
1582 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1583 CONST_VECTOR_ELT (op, i),
1584 GET_MODE_INNER (opmode));
1585 if (!x)
1586 return 0;
1587 RTVEC_ELT (v, i) = x;
1589 return gen_rtx_CONST_VECTOR (mode, v);
1592 /* The order of these tests is critical so that, for example, we don't
1593 check the wrong mode (input vs. output) for a conversion operation,
1594 such as FIX. At some point, this should be simplified. */
1596 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1598 HOST_WIDE_INT hv, lv;
1599 REAL_VALUE_TYPE d;
1601 if (CONST_INT_P (op))
1602 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1603 else
1604 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1606 REAL_VALUE_FROM_INT (d, lv, hv, mode);
1607 d = real_value_truncate (mode, d);
1608 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1610 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1612 HOST_WIDE_INT hv, lv;
1613 REAL_VALUE_TYPE d;
1615 if (CONST_INT_P (op))
1616 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1617 else
1618 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1620 if (op_mode == VOIDmode
1621 || GET_MODE_PRECISION (op_mode) > HOST_BITS_PER_DOUBLE_INT)
1622 /* We should never get a negative number. */
1623 gcc_assert (hv >= 0);
1624 else if (GET_MODE_PRECISION (op_mode) <= HOST_BITS_PER_WIDE_INT)
1625 hv = 0, lv &= GET_MODE_MASK (op_mode);
1627 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1628 d = real_value_truncate (mode, d);
1629 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1632 if (CONST_INT_P (op)
1633 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1635 HOST_WIDE_INT arg0 = INTVAL (op);
1636 HOST_WIDE_INT val;
1638 switch (code)
1640 case NOT:
1641 val = ~ arg0;
1642 break;
1644 case NEG:
1645 val = - arg0;
1646 break;
1648 case ABS:
1649 val = (arg0 >= 0 ? arg0 : - arg0);
1650 break;
1652 case FFS:
1653 arg0 &= GET_MODE_MASK (mode);
1654 val = ffs_hwi (arg0);
1655 break;
1657 case CLZ:
1658 arg0 &= GET_MODE_MASK (mode);
1659 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1661 else
1662 val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 1;
1663 break;
1665 case CLRSB:
1666 arg0 &= GET_MODE_MASK (mode);
1667 if (arg0 == 0)
1668 val = GET_MODE_PRECISION (mode) - 1;
1669 else if (arg0 >= 0)
1670 val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 2;
1671 else if (arg0 < 0)
1672 val = GET_MODE_PRECISION (mode) - floor_log2 (~arg0) - 2;
1673 break;
1675 case CTZ:
1676 arg0 &= GET_MODE_MASK (mode);
1677 if (arg0 == 0)
1679 /* Even if the value at zero is undefined, we have to come
1680 up with some replacement. Seems good enough. */
1681 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1682 val = GET_MODE_PRECISION (mode);
1684 else
1685 val = ctz_hwi (arg0);
1686 break;
1688 case POPCOUNT:
1689 arg0 &= GET_MODE_MASK (mode);
1690 val = 0;
1691 while (arg0)
1692 val++, arg0 &= arg0 - 1;
1693 break;
1695 case PARITY:
1696 arg0 &= GET_MODE_MASK (mode);
1697 val = 0;
1698 while (arg0)
1699 val++, arg0 &= arg0 - 1;
1700 val &= 1;
1701 break;
1703 case BSWAP:
1705 unsigned int s;
1707 val = 0;
1708 for (s = 0; s < width; s += 8)
1710 unsigned int d = width - s - 8;
1711 unsigned HOST_WIDE_INT byte;
1712 byte = (arg0 >> s) & 0xff;
1713 val |= byte << d;
1716 break;
1718 case TRUNCATE:
1719 val = arg0;
1720 break;
1722 case ZERO_EXTEND:
1723 /* When zero-extending a CONST_INT, we need to know its
1724 original mode. */
1725 gcc_assert (op_mode != VOIDmode);
1726 if (op_width == HOST_BITS_PER_WIDE_INT)
1728 /* If we were really extending the mode,
1729 we would have to distinguish between zero-extension
1730 and sign-extension. */
1731 gcc_assert (width == op_width);
1732 val = arg0;
1734 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1735 val = arg0 & GET_MODE_MASK (op_mode);
1736 else
1737 return 0;
1738 break;
1740 case SIGN_EXTEND:
1741 if (op_mode == VOIDmode)
1742 op_mode = mode;
1743 op_width = GET_MODE_PRECISION (op_mode);
1744 if (op_width == HOST_BITS_PER_WIDE_INT)
1746 /* If we were really extending the mode,
1747 we would have to distinguish between zero-extension
1748 and sign-extension. */
1749 gcc_assert (width == op_width);
1750 val = arg0;
1752 else if (op_width < HOST_BITS_PER_WIDE_INT)
1754 val = arg0 & GET_MODE_MASK (op_mode);
1755 if (val_signbit_known_set_p (op_mode, val))
1756 val |= ~GET_MODE_MASK (op_mode);
1758 else
1759 return 0;
1760 break;
1762 case SQRT:
1763 case FLOAT_EXTEND:
1764 case FLOAT_TRUNCATE:
1765 case SS_TRUNCATE:
1766 case US_TRUNCATE:
1767 case SS_NEG:
1768 case US_NEG:
1769 case SS_ABS:
1770 return 0;
1772 default:
1773 gcc_unreachable ();
1776 return gen_int_mode (val, mode);
1779 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1780 for a DImode operation on a CONST_INT. */
1781 else if (width <= HOST_BITS_PER_DOUBLE_INT
1782 && (CONST_DOUBLE_AS_INT_P (op) || CONST_INT_P (op)))
1784 double_int first, value;
1786 if (CONST_DOUBLE_AS_INT_P (op))
1787 first = double_int::from_pair (CONST_DOUBLE_HIGH (op),
1788 CONST_DOUBLE_LOW (op));
1789 else
1790 first = double_int::from_shwi (INTVAL (op));
1792 switch (code)
1794 case NOT:
1795 value = ~first;
1796 break;
1798 case NEG:
1799 value = -first;
1800 break;
1802 case ABS:
1803 if (first.is_negative ())
1804 value = -first;
1805 else
1806 value = first;
1807 break;
1809 case FFS:
1810 value.high = 0;
1811 if (first.low != 0)
1812 value.low = ffs_hwi (first.low);
1813 else if (first.high != 0)
1814 value.low = HOST_BITS_PER_WIDE_INT + ffs_hwi (first.high);
1815 else
1816 value.low = 0;
1817 break;
1819 case CLZ:
1820 value.high = 0;
1821 if (first.high != 0)
1822 value.low = GET_MODE_PRECISION (mode) - floor_log2 (first.high) - 1
1823 - HOST_BITS_PER_WIDE_INT;
1824 else if (first.low != 0)
1825 value.low = GET_MODE_PRECISION (mode) - floor_log2 (first.low) - 1;
1826 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, value.low))
1827 value.low = GET_MODE_PRECISION (mode);
1828 break;
1830 case CTZ:
1831 value.high = 0;
1832 if (first.low != 0)
1833 value.low = ctz_hwi (first.low);
1834 else if (first.high != 0)
1835 value.low = HOST_BITS_PER_WIDE_INT + ctz_hwi (first.high);
1836 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, value.low))
1837 value.low = GET_MODE_PRECISION (mode);
1838 break;
1840 case POPCOUNT:
1841 value = double_int_zero;
1842 while (first.low)
1844 value.low++;
1845 first.low &= first.low - 1;
1847 while (first.high)
1849 value.low++;
1850 first.high &= first.high - 1;
1852 break;
1854 case PARITY:
1855 value = double_int_zero;
1856 while (first.low)
1858 value.low++;
1859 first.low &= first.low - 1;
1861 while (first.high)
1863 value.low++;
1864 first.high &= first.high - 1;
1866 value.low &= 1;
1867 break;
1869 case BSWAP:
1871 unsigned int s;
1873 value = double_int_zero;
1874 for (s = 0; s < width; s += 8)
1876 unsigned int d = width - s - 8;
1877 unsigned HOST_WIDE_INT byte;
1879 if (s < HOST_BITS_PER_WIDE_INT)
1880 byte = (first.low >> s) & 0xff;
1881 else
1882 byte = (first.high >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1884 if (d < HOST_BITS_PER_WIDE_INT)
1885 value.low |= byte << d;
1886 else
1887 value.high |= byte << (d - HOST_BITS_PER_WIDE_INT);
1890 break;
1892 case TRUNCATE:
1893 /* This is just a change-of-mode, so do nothing. */
1894 value = first;
1895 break;
1897 case ZERO_EXTEND:
1898 gcc_assert (op_mode != VOIDmode);
1900 if (op_width > HOST_BITS_PER_WIDE_INT)
1901 return 0;
1903 value = double_int::from_uhwi (first.low & GET_MODE_MASK (op_mode));
1904 break;
1906 case SIGN_EXTEND:
1907 if (op_mode == VOIDmode
1908 || op_width > HOST_BITS_PER_WIDE_INT)
1909 return 0;
1910 else
1912 value.low = first.low & GET_MODE_MASK (op_mode);
1913 if (val_signbit_known_set_p (op_mode, value.low))
1914 value.low |= ~GET_MODE_MASK (op_mode);
1916 value.high = HWI_SIGN_EXTEND (value.low);
1918 break;
1920 case SQRT:
1921 return 0;
1923 default:
1924 return 0;
1927 return immed_double_int_const (value, mode);
1930 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1931 && SCALAR_FLOAT_MODE_P (mode)
1932 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1934 REAL_VALUE_TYPE d, t;
1935 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1937 switch (code)
1939 case SQRT:
1940 if (HONOR_SNANS (mode) && real_isnan (&d))
1941 return 0;
1942 real_sqrt (&t, mode, &d);
1943 d = t;
1944 break;
1945 case ABS:
1946 d = real_value_abs (&d);
1947 break;
1948 case NEG:
1949 d = real_value_negate (&d);
1950 break;
1951 case FLOAT_TRUNCATE:
1952 d = real_value_truncate (mode, d);
1953 break;
1954 case FLOAT_EXTEND:
1955 /* All this does is change the mode, unless changing
1956 mode class. */
1957 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1958 real_convert (&d, mode, &d);
1959 break;
1960 case FIX:
1961 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1962 break;
1963 case NOT:
1965 long tmp[4];
1966 int i;
1968 real_to_target (tmp, &d, GET_MODE (op));
1969 for (i = 0; i < 4; i++)
1970 tmp[i] = ~tmp[i];
1971 real_from_target (&d, tmp, mode);
1972 break;
1974 default:
1975 gcc_unreachable ();
1977 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1980 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1981 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1982 && GET_MODE_CLASS (mode) == MODE_INT
1983 && width <= HOST_BITS_PER_DOUBLE_INT && width > 0)
1985 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1986 operators are intentionally left unspecified (to ease implementation
1987 by target backends), for consistency, this routine implements the
1988 same semantics for constant folding as used by the middle-end. */
1990 /* This was formerly used only for non-IEEE float.
1991 eggert@twinsun.com says it is safe for IEEE also. */
1992 HOST_WIDE_INT xh, xl, th, tl;
1993 REAL_VALUE_TYPE x, t;
1994 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1995 switch (code)
1997 case FIX:
1998 if (REAL_VALUE_ISNAN (x))
1999 return const0_rtx;
2001 /* Test against the signed upper bound. */
2002 if (width > HOST_BITS_PER_WIDE_INT)
2004 th = ((unsigned HOST_WIDE_INT) 1
2005 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
2006 tl = -1;
2008 else
2010 th = 0;
2011 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
2013 real_from_integer (&t, VOIDmode, tl, th, 0);
2014 if (REAL_VALUES_LESS (t, x))
2016 xh = th;
2017 xl = tl;
2018 break;
2021 /* Test against the signed lower bound. */
2022 if (width > HOST_BITS_PER_WIDE_INT)
2024 th = HOST_WIDE_INT_M1U << (width - HOST_BITS_PER_WIDE_INT - 1);
2025 tl = 0;
2027 else
2029 th = -1;
2030 tl = HOST_WIDE_INT_M1U << (width - 1);
2032 real_from_integer (&t, VOIDmode, tl, th, 0);
2033 if (REAL_VALUES_LESS (x, t))
2035 xh = th;
2036 xl = tl;
2037 break;
2039 REAL_VALUE_TO_INT (&xl, &xh, x);
2040 break;
2042 case UNSIGNED_FIX:
2043 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
2044 return const0_rtx;
2046 /* Test against the unsigned upper bound. */
2047 if (width == HOST_BITS_PER_DOUBLE_INT)
2049 th = -1;
2050 tl = -1;
2052 else if (width >= HOST_BITS_PER_WIDE_INT)
2054 th = ((unsigned HOST_WIDE_INT) 1
2055 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
2056 tl = -1;
2058 else
2060 th = 0;
2061 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
2063 real_from_integer (&t, VOIDmode, tl, th, 1);
2064 if (REAL_VALUES_LESS (t, x))
2066 xh = th;
2067 xl = tl;
2068 break;
2071 REAL_VALUE_TO_INT (&xl, &xh, x);
2072 break;
2074 default:
2075 gcc_unreachable ();
2077 return immed_double_const (xl, xh, mode);
2080 return NULL_RTX;
2083 /* Subroutine of simplify_binary_operation to simplify a binary operation
2084 CODE that can commute with byte swapping, with result mode MODE and
2085 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2086 Return zero if no simplification or canonicalization is possible. */
2088 static rtx
2089 simplify_byte_swapping_operation (enum rtx_code code, enum machine_mode mode,
2090 rtx op0, rtx op1)
2092 rtx tem;
2094 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2095 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
2097 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
2098 simplify_gen_unary (BSWAP, mode, op1, mode));
2099 return simplify_gen_unary (BSWAP, mode, tem, mode);
2102 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2103 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
2105 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
2106 return simplify_gen_unary (BSWAP, mode, tem, mode);
2109 return NULL_RTX;
2112 /* Subroutine of simplify_binary_operation to simplify a commutative,
2113 associative binary operation CODE with result mode MODE, operating
2114 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2115 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2116 canonicalization is possible. */
2118 static rtx
2119 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
2120 rtx op0, rtx op1)
2122 rtx tem;
2124 /* Linearize the operator to the left. */
2125 if (GET_CODE (op1) == code)
2127 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2128 if (GET_CODE (op0) == code)
2130 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2131 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2134 /* "a op (b op c)" becomes "(b op c) op a". */
2135 if (! swap_commutative_operands_p (op1, op0))
2136 return simplify_gen_binary (code, mode, op1, op0);
2138 tem = op0;
2139 op0 = op1;
2140 op1 = tem;
2143 if (GET_CODE (op0) == code)
2145 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2146 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2148 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2149 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2152 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2153 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2154 if (tem != 0)
2155 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2157 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2158 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2159 if (tem != 0)
2160 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2163 return 0;
2167 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2168 and OP1. Return 0 if no simplification is possible.
2170 Don't use this for relational operations such as EQ or LT.
2171 Use simplify_relational_operation instead. */
2173 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
2174 rtx op0, rtx op1)
2176 rtx trueop0, trueop1;
2177 rtx tem;
2179 /* Relational operations don't work here. We must know the mode
2180 of the operands in order to do the comparison correctly.
2181 Assuming a full word can give incorrect results.
2182 Consider comparing 128 with -128 in QImode. */
2183 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2184 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2186 /* Make sure the constant is second. */
2187 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2188 && swap_commutative_operands_p (op0, op1))
2190 tem = op0, op0 = op1, op1 = tem;
2193 trueop0 = avoid_constant_pool_reference (op0);
2194 trueop1 = avoid_constant_pool_reference (op1);
2196 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2197 if (tem)
2198 return tem;
2199 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2202 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2203 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2204 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2205 actual constants. */
2207 static rtx
2208 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
2209 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2211 rtx tem, reversed, opleft, opright;
2212 HOST_WIDE_INT val;
2213 unsigned int width = GET_MODE_PRECISION (mode);
2215 /* Even if we can't compute a constant result,
2216 there are some cases worth simplifying. */
2218 switch (code)
2220 case PLUS:
2221 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2222 when x is NaN, infinite, or finite and nonzero. They aren't
2223 when x is -0 and the rounding mode is not towards -infinity,
2224 since (-0) + 0 is then 0. */
2225 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2226 return op0;
2228 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2229 transformations are safe even for IEEE. */
2230 if (GET_CODE (op0) == NEG)
2231 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2232 else if (GET_CODE (op1) == NEG)
2233 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2235 /* (~a) + 1 -> -a */
2236 if (INTEGRAL_MODE_P (mode)
2237 && GET_CODE (op0) == NOT
2238 && trueop1 == const1_rtx)
2239 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2241 /* Handle both-operands-constant cases. We can only add
2242 CONST_INTs to constants since the sum of relocatable symbols
2243 can't be handled by most assemblers. Don't add CONST_INT
2244 to CONST_INT since overflow won't be computed properly if wider
2245 than HOST_BITS_PER_WIDE_INT. */
2247 if ((GET_CODE (op0) == CONST
2248 || GET_CODE (op0) == SYMBOL_REF
2249 || GET_CODE (op0) == LABEL_REF)
2250 && CONST_INT_P (op1))
2251 return plus_constant (mode, op0, INTVAL (op1));
2252 else if ((GET_CODE (op1) == CONST
2253 || GET_CODE (op1) == SYMBOL_REF
2254 || GET_CODE (op1) == LABEL_REF)
2255 && CONST_INT_P (op0))
2256 return plus_constant (mode, op1, INTVAL (op0));
2258 /* See if this is something like X * C - X or vice versa or
2259 if the multiplication is written as a shift. If so, we can
2260 distribute and make a new multiply, shift, or maybe just
2261 have X (if C is 2 in the example above). But don't make
2262 something more expensive than we had before. */
2264 if (SCALAR_INT_MODE_P (mode))
2266 double_int coeff0, coeff1;
2267 rtx lhs = op0, rhs = op1;
2269 coeff0 = double_int_one;
2270 coeff1 = double_int_one;
2272 if (GET_CODE (lhs) == NEG)
2274 coeff0 = double_int_minus_one;
2275 lhs = XEXP (lhs, 0);
2277 else if (GET_CODE (lhs) == MULT
2278 && CONST_INT_P (XEXP (lhs, 1)))
2280 coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1)));
2281 lhs = XEXP (lhs, 0);
2283 else if (GET_CODE (lhs) == ASHIFT
2284 && CONST_INT_P (XEXP (lhs, 1))
2285 && INTVAL (XEXP (lhs, 1)) >= 0
2286 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2288 coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1)));
2289 lhs = XEXP (lhs, 0);
2292 if (GET_CODE (rhs) == NEG)
2294 coeff1 = double_int_minus_one;
2295 rhs = XEXP (rhs, 0);
2297 else if (GET_CODE (rhs) == MULT
2298 && CONST_INT_P (XEXP (rhs, 1)))
2300 coeff1 = double_int::from_shwi (INTVAL (XEXP (rhs, 1)));
2301 rhs = XEXP (rhs, 0);
2303 else if (GET_CODE (rhs) == ASHIFT
2304 && CONST_INT_P (XEXP (rhs, 1))
2305 && INTVAL (XEXP (rhs, 1)) >= 0
2306 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2308 coeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1)));
2309 rhs = XEXP (rhs, 0);
2312 if (rtx_equal_p (lhs, rhs))
2314 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2315 rtx coeff;
2316 double_int val;
2317 bool speed = optimize_function_for_speed_p (cfun);
2319 val = coeff0 + coeff1;
2320 coeff = immed_double_int_const (val, mode);
2322 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2323 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2324 ? tem : 0;
2328 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2329 if (CONST_SCALAR_INT_P (op1)
2330 && GET_CODE (op0) == XOR
2331 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2332 && mode_signbit_p (mode, op1))
2333 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2334 simplify_gen_binary (XOR, mode, op1,
2335 XEXP (op0, 1)));
2337 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2338 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2339 && GET_CODE (op0) == MULT
2340 && GET_CODE (XEXP (op0, 0)) == NEG)
2342 rtx in1, in2;
2344 in1 = XEXP (XEXP (op0, 0), 0);
2345 in2 = XEXP (op0, 1);
2346 return simplify_gen_binary (MINUS, mode, op1,
2347 simplify_gen_binary (MULT, mode,
2348 in1, in2));
2351 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2352 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2353 is 1. */
2354 if (COMPARISON_P (op0)
2355 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2356 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2357 && (reversed = reversed_comparison (op0, mode)))
2358 return
2359 simplify_gen_unary (NEG, mode, reversed, mode);
2361 /* If one of the operands is a PLUS or a MINUS, see if we can
2362 simplify this by the associative law.
2363 Don't use the associative law for floating point.
2364 The inaccuracy makes it nonassociative,
2365 and subtle programs can break if operations are associated. */
2367 if (INTEGRAL_MODE_P (mode)
2368 && (plus_minus_operand_p (op0)
2369 || plus_minus_operand_p (op1))
2370 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2371 return tem;
2373 /* Reassociate floating point addition only when the user
2374 specifies associative math operations. */
2375 if (FLOAT_MODE_P (mode)
2376 && flag_associative_math)
2378 tem = simplify_associative_operation (code, mode, op0, op1);
2379 if (tem)
2380 return tem;
2382 break;
2384 case COMPARE:
2385 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2386 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2387 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2388 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2390 rtx xop00 = XEXP (op0, 0);
2391 rtx xop10 = XEXP (op1, 0);
2393 #ifdef HAVE_cc0
2394 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2395 #else
2396 if (REG_P (xop00) && REG_P (xop10)
2397 && GET_MODE (xop00) == GET_MODE (xop10)
2398 && REGNO (xop00) == REGNO (xop10)
2399 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2400 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2401 #endif
2402 return xop00;
2404 break;
2406 case MINUS:
2407 /* We can't assume x-x is 0 even with non-IEEE floating point,
2408 but since it is zero except in very strange circumstances, we
2409 will treat it as zero with -ffinite-math-only. */
2410 if (rtx_equal_p (trueop0, trueop1)
2411 && ! side_effects_p (op0)
2412 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2413 return CONST0_RTX (mode);
2415 /* Change subtraction from zero into negation. (0 - x) is the
2416 same as -x when x is NaN, infinite, or finite and nonzero.
2417 But if the mode has signed zeros, and does not round towards
2418 -infinity, then 0 - 0 is 0, not -0. */
2419 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2420 return simplify_gen_unary (NEG, mode, op1, mode);
2422 /* (-1 - a) is ~a. */
2423 if (trueop0 == constm1_rtx)
2424 return simplify_gen_unary (NOT, mode, op1, mode);
2426 /* Subtracting 0 has no effect unless the mode has signed zeros
2427 and supports rounding towards -infinity. In such a case,
2428 0 - 0 is -0. */
2429 if (!(HONOR_SIGNED_ZEROS (mode)
2430 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2431 && trueop1 == CONST0_RTX (mode))
2432 return op0;
2434 /* See if this is something like X * C - X or vice versa or
2435 if the multiplication is written as a shift. If so, we can
2436 distribute and make a new multiply, shift, or maybe just
2437 have X (if C is 2 in the example above). But don't make
2438 something more expensive than we had before. */
2440 if (SCALAR_INT_MODE_P (mode))
2442 double_int coeff0, negcoeff1;
2443 rtx lhs = op0, rhs = op1;
2445 coeff0 = double_int_one;
2446 negcoeff1 = double_int_minus_one;
2448 if (GET_CODE (lhs) == NEG)
2450 coeff0 = double_int_minus_one;
2451 lhs = XEXP (lhs, 0);
2453 else if (GET_CODE (lhs) == MULT
2454 && CONST_INT_P (XEXP (lhs, 1)))
2456 coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1)));
2457 lhs = XEXP (lhs, 0);
2459 else if (GET_CODE (lhs) == ASHIFT
2460 && CONST_INT_P (XEXP (lhs, 1))
2461 && INTVAL (XEXP (lhs, 1)) >= 0
2462 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2464 coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1)));
2465 lhs = XEXP (lhs, 0);
2468 if (GET_CODE (rhs) == NEG)
2470 negcoeff1 = double_int_one;
2471 rhs = XEXP (rhs, 0);
2473 else if (GET_CODE (rhs) == MULT
2474 && CONST_INT_P (XEXP (rhs, 1)))
2476 negcoeff1 = double_int::from_shwi (-INTVAL (XEXP (rhs, 1)));
2477 rhs = XEXP (rhs, 0);
2479 else if (GET_CODE (rhs) == ASHIFT
2480 && CONST_INT_P (XEXP (rhs, 1))
2481 && INTVAL (XEXP (rhs, 1)) >= 0
2482 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2484 negcoeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1)));
2485 negcoeff1 = -negcoeff1;
2486 rhs = XEXP (rhs, 0);
2489 if (rtx_equal_p (lhs, rhs))
2491 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2492 rtx coeff;
2493 double_int val;
2494 bool speed = optimize_function_for_speed_p (cfun);
2496 val = coeff0 + negcoeff1;
2497 coeff = immed_double_int_const (val, mode);
2499 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2500 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2501 ? tem : 0;
2505 /* (a - (-b)) -> (a + b). True even for IEEE. */
2506 if (GET_CODE (op1) == NEG)
2507 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2509 /* (-x - c) may be simplified as (-c - x). */
2510 if (GET_CODE (op0) == NEG
2511 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2513 tem = simplify_unary_operation (NEG, mode, op1, mode);
2514 if (tem)
2515 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2518 /* Don't let a relocatable value get a negative coeff. */
2519 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2520 return simplify_gen_binary (PLUS, mode,
2521 op0,
2522 neg_const_int (mode, op1));
2524 /* (x - (x & y)) -> (x & ~y) */
2525 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2527 if (rtx_equal_p (op0, XEXP (op1, 0)))
2529 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2530 GET_MODE (XEXP (op1, 1)));
2531 return simplify_gen_binary (AND, mode, op0, tem);
2533 if (rtx_equal_p (op0, XEXP (op1, 1)))
2535 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2536 GET_MODE (XEXP (op1, 0)));
2537 return simplify_gen_binary (AND, mode, op0, tem);
2541 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2542 by reversing the comparison code if valid. */
2543 if (STORE_FLAG_VALUE == 1
2544 && trueop0 == const1_rtx
2545 && COMPARISON_P (op1)
2546 && (reversed = reversed_comparison (op1, mode)))
2547 return reversed;
2549 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2550 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2551 && GET_CODE (op1) == MULT
2552 && GET_CODE (XEXP (op1, 0)) == NEG)
2554 rtx in1, in2;
2556 in1 = XEXP (XEXP (op1, 0), 0);
2557 in2 = XEXP (op1, 1);
2558 return simplify_gen_binary (PLUS, mode,
2559 simplify_gen_binary (MULT, mode,
2560 in1, in2),
2561 op0);
2564 /* Canonicalize (minus (neg A) (mult B C)) to
2565 (minus (mult (neg B) C) A). */
2566 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2567 && GET_CODE (op1) == MULT
2568 && GET_CODE (op0) == NEG)
2570 rtx in1, in2;
2572 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2573 in2 = XEXP (op1, 1);
2574 return simplify_gen_binary (MINUS, mode,
2575 simplify_gen_binary (MULT, mode,
2576 in1, in2),
2577 XEXP (op0, 0));
2580 /* If one of the operands is a PLUS or a MINUS, see if we can
2581 simplify this by the associative law. This will, for example,
2582 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2583 Don't use the associative law for floating point.
2584 The inaccuracy makes it nonassociative,
2585 and subtle programs can break if operations are associated. */
2587 if (INTEGRAL_MODE_P (mode)
2588 && (plus_minus_operand_p (op0)
2589 || plus_minus_operand_p (op1))
2590 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2591 return tem;
2592 break;
2594 case MULT:
2595 if (trueop1 == constm1_rtx)
2596 return simplify_gen_unary (NEG, mode, op0, mode);
2598 if (GET_CODE (op0) == NEG)
2600 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2601 /* If op1 is a MULT as well and simplify_unary_operation
2602 just moved the NEG to the second operand, simplify_gen_binary
2603 below could through simplify_associative_operation move
2604 the NEG around again and recurse endlessly. */
2605 if (temp
2606 && GET_CODE (op1) == MULT
2607 && GET_CODE (temp) == MULT
2608 && XEXP (op1, 0) == XEXP (temp, 0)
2609 && GET_CODE (XEXP (temp, 1)) == NEG
2610 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2611 temp = NULL_RTX;
2612 if (temp)
2613 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2615 if (GET_CODE (op1) == NEG)
2617 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2618 /* If op0 is a MULT as well and simplify_unary_operation
2619 just moved the NEG to the second operand, simplify_gen_binary
2620 below could through simplify_associative_operation move
2621 the NEG around again and recurse endlessly. */
2622 if (temp
2623 && GET_CODE (op0) == MULT
2624 && GET_CODE (temp) == MULT
2625 && XEXP (op0, 0) == XEXP (temp, 0)
2626 && GET_CODE (XEXP (temp, 1)) == NEG
2627 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2628 temp = NULL_RTX;
2629 if (temp)
2630 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2633 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2634 x is NaN, since x * 0 is then also NaN. Nor is it valid
2635 when the mode has signed zeros, since multiplying a negative
2636 number by 0 will give -0, not 0. */
2637 if (!HONOR_NANS (mode)
2638 && !HONOR_SIGNED_ZEROS (mode)
2639 && trueop1 == CONST0_RTX (mode)
2640 && ! side_effects_p (op0))
2641 return op1;
2643 /* In IEEE floating point, x*1 is not equivalent to x for
2644 signalling NaNs. */
2645 if (!HONOR_SNANS (mode)
2646 && trueop1 == CONST1_RTX (mode))
2647 return op0;
2649 /* Convert multiply by constant power of two into shift unless
2650 we are still generating RTL. This test is a kludge. */
2651 if (CONST_INT_P (trueop1)
2652 && (val = exact_log2 (UINTVAL (trueop1))) >= 0
2653 /* If the mode is larger than the host word size, and the
2654 uppermost bit is set, then this isn't a power of two due
2655 to implicit sign extension. */
2656 && (width <= HOST_BITS_PER_WIDE_INT
2657 || val != HOST_BITS_PER_WIDE_INT - 1))
2658 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2660 /* Likewise for multipliers wider than a word. */
2661 if (CONST_DOUBLE_AS_INT_P (trueop1)
2662 && GET_MODE (op0) == mode
2663 && CONST_DOUBLE_LOW (trueop1) == 0
2664 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0
2665 && (val < HOST_BITS_PER_DOUBLE_INT - 1
2666 || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_DOUBLE_INT))
2667 return simplify_gen_binary (ASHIFT, mode, op0,
2668 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2670 /* x*2 is x+x and x*(-1) is -x */
2671 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2672 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2673 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2674 && GET_MODE (op0) == mode)
2676 REAL_VALUE_TYPE d;
2677 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2679 if (REAL_VALUES_EQUAL (d, dconst2))
2680 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2682 if (!HONOR_SNANS (mode)
2683 && REAL_VALUES_EQUAL (d, dconstm1))
2684 return simplify_gen_unary (NEG, mode, op0, mode);
2687 /* Optimize -x * -x as x * x. */
2688 if (FLOAT_MODE_P (mode)
2689 && GET_CODE (op0) == NEG
2690 && GET_CODE (op1) == NEG
2691 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2692 && !side_effects_p (XEXP (op0, 0)))
2693 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2695 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2696 if (SCALAR_FLOAT_MODE_P (mode)
2697 && GET_CODE (op0) == ABS
2698 && GET_CODE (op1) == ABS
2699 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2700 && !side_effects_p (XEXP (op0, 0)))
2701 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2703 /* Reassociate multiplication, but for floating point MULTs
2704 only when the user specifies unsafe math optimizations. */
2705 if (! FLOAT_MODE_P (mode)
2706 || flag_unsafe_math_optimizations)
2708 tem = simplify_associative_operation (code, mode, op0, op1);
2709 if (tem)
2710 return tem;
2712 break;
2714 case IOR:
2715 if (trueop1 == CONST0_RTX (mode))
2716 return op0;
2717 if (INTEGRAL_MODE_P (mode)
2718 && trueop1 == CONSTM1_RTX (mode)
2719 && !side_effects_p (op0))
2720 return op1;
2721 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2722 return op0;
2723 /* A | (~A) -> -1 */
2724 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2725 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2726 && ! side_effects_p (op0)
2727 && SCALAR_INT_MODE_P (mode))
2728 return constm1_rtx;
2730 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2731 if (CONST_INT_P (op1)
2732 && HWI_COMPUTABLE_MODE_P (mode)
2733 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2734 && !side_effects_p (op0))
2735 return op1;
2737 /* Canonicalize (X & C1) | C2. */
2738 if (GET_CODE (op0) == AND
2739 && CONST_INT_P (trueop1)
2740 && CONST_INT_P (XEXP (op0, 1)))
2742 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2743 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2744 HOST_WIDE_INT c2 = INTVAL (trueop1);
2746 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2747 if ((c1 & c2) == c1
2748 && !side_effects_p (XEXP (op0, 0)))
2749 return trueop1;
2751 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2752 if (((c1|c2) & mask) == mask)
2753 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2755 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2756 if (((c1 & ~c2) & mask) != (c1 & mask))
2758 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2759 gen_int_mode (c1 & ~c2, mode));
2760 return simplify_gen_binary (IOR, mode, tem, op1);
2764 /* Convert (A & B) | A to A. */
2765 if (GET_CODE (op0) == AND
2766 && (rtx_equal_p (XEXP (op0, 0), op1)
2767 || rtx_equal_p (XEXP (op0, 1), op1))
2768 && ! side_effects_p (XEXP (op0, 0))
2769 && ! side_effects_p (XEXP (op0, 1)))
2770 return op1;
2772 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2773 mode size to (rotate A CX). */
2775 if (GET_CODE (op1) == ASHIFT
2776 || GET_CODE (op1) == SUBREG)
2778 opleft = op1;
2779 opright = op0;
2781 else
2783 opright = op1;
2784 opleft = op0;
2787 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2788 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2789 && CONST_INT_P (XEXP (opleft, 1))
2790 && CONST_INT_P (XEXP (opright, 1))
2791 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2792 == GET_MODE_PRECISION (mode)))
2793 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2795 /* Same, but for ashift that has been "simplified" to a wider mode
2796 by simplify_shift_const. */
2798 if (GET_CODE (opleft) == SUBREG
2799 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2800 && GET_CODE (opright) == LSHIFTRT
2801 && GET_CODE (XEXP (opright, 0)) == SUBREG
2802 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2803 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2804 && (GET_MODE_SIZE (GET_MODE (opleft))
2805 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2806 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2807 SUBREG_REG (XEXP (opright, 0)))
2808 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2809 && CONST_INT_P (XEXP (opright, 1))
2810 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2811 == GET_MODE_PRECISION (mode)))
2812 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2813 XEXP (SUBREG_REG (opleft), 1));
2815 /* If we have (ior (and (X C1) C2)), simplify this by making
2816 C1 as small as possible if C1 actually changes. */
2817 if (CONST_INT_P (op1)
2818 && (HWI_COMPUTABLE_MODE_P (mode)
2819 || INTVAL (op1) > 0)
2820 && GET_CODE (op0) == AND
2821 && CONST_INT_P (XEXP (op0, 1))
2822 && CONST_INT_P (op1)
2823 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2825 rtx tmp = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2826 gen_int_mode (UINTVAL (XEXP (op0, 1))
2827 & ~UINTVAL (op1),
2828 mode));
2829 return simplify_gen_binary (IOR, mode, tmp, op1);
2832 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2833 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2834 the PLUS does not affect any of the bits in OP1: then we can do
2835 the IOR as a PLUS and we can associate. This is valid if OP1
2836 can be safely shifted left C bits. */
2837 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2838 && GET_CODE (XEXP (op0, 0)) == PLUS
2839 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2840 && CONST_INT_P (XEXP (op0, 1))
2841 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2843 int count = INTVAL (XEXP (op0, 1));
2844 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2846 if (mask >> count == INTVAL (trueop1)
2847 && trunc_int_for_mode (mask, mode) == mask
2848 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2849 return simplify_gen_binary (ASHIFTRT, mode,
2850 plus_constant (mode, XEXP (op0, 0),
2851 mask),
2852 XEXP (op0, 1));
2855 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2856 if (tem)
2857 return tem;
2859 tem = simplify_associative_operation (code, mode, op0, op1);
2860 if (tem)
2861 return tem;
2862 break;
2864 case XOR:
2865 if (trueop1 == CONST0_RTX (mode))
2866 return op0;
2867 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2868 return simplify_gen_unary (NOT, mode, op0, mode);
2869 if (rtx_equal_p (trueop0, trueop1)
2870 && ! side_effects_p (op0)
2871 && GET_MODE_CLASS (mode) != MODE_CC)
2872 return CONST0_RTX (mode);
2874 /* Canonicalize XOR of the most significant bit to PLUS. */
2875 if (CONST_SCALAR_INT_P (op1)
2876 && mode_signbit_p (mode, op1))
2877 return simplify_gen_binary (PLUS, mode, op0, op1);
2878 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2879 if (CONST_SCALAR_INT_P (op1)
2880 && GET_CODE (op0) == PLUS
2881 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2882 && mode_signbit_p (mode, XEXP (op0, 1)))
2883 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2884 simplify_gen_binary (XOR, mode, op1,
2885 XEXP (op0, 1)));
2887 /* If we are XORing two things that have no bits in common,
2888 convert them into an IOR. This helps to detect rotation encoded
2889 using those methods and possibly other simplifications. */
2891 if (HWI_COMPUTABLE_MODE_P (mode)
2892 && (nonzero_bits (op0, mode)
2893 & nonzero_bits (op1, mode)) == 0)
2894 return (simplify_gen_binary (IOR, mode, op0, op1));
2896 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2897 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2898 (NOT y). */
2900 int num_negated = 0;
2902 if (GET_CODE (op0) == NOT)
2903 num_negated++, op0 = XEXP (op0, 0);
2904 if (GET_CODE (op1) == NOT)
2905 num_negated++, op1 = XEXP (op1, 0);
2907 if (num_negated == 2)
2908 return simplify_gen_binary (XOR, mode, op0, op1);
2909 else if (num_negated == 1)
2910 return simplify_gen_unary (NOT, mode,
2911 simplify_gen_binary (XOR, mode, op0, op1),
2912 mode);
2915 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2916 correspond to a machine insn or result in further simplifications
2917 if B is a constant. */
2919 if (GET_CODE (op0) == AND
2920 && rtx_equal_p (XEXP (op0, 1), op1)
2921 && ! side_effects_p (op1))
2922 return simplify_gen_binary (AND, mode,
2923 simplify_gen_unary (NOT, mode,
2924 XEXP (op0, 0), mode),
2925 op1);
2927 else if (GET_CODE (op0) == AND
2928 && rtx_equal_p (XEXP (op0, 0), op1)
2929 && ! side_effects_p (op1))
2930 return simplify_gen_binary (AND, mode,
2931 simplify_gen_unary (NOT, mode,
2932 XEXP (op0, 1), mode),
2933 op1);
2935 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2936 we can transform like this:
2937 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2938 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2939 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2940 Attempt a few simplifications when B and C are both constants. */
2941 if (GET_CODE (op0) == AND
2942 && CONST_INT_P (op1)
2943 && CONST_INT_P (XEXP (op0, 1)))
2945 rtx a = XEXP (op0, 0);
2946 rtx b = XEXP (op0, 1);
2947 rtx c = op1;
2948 HOST_WIDE_INT bval = INTVAL (b);
2949 HOST_WIDE_INT cval = INTVAL (c);
2951 rtx na_c
2952 = simplify_binary_operation (AND, mode,
2953 simplify_gen_unary (NOT, mode, a, mode),
2955 if ((~cval & bval) == 0)
2957 /* Try to simplify ~A&C | ~B&C. */
2958 if (na_c != NULL_RTX)
2959 return simplify_gen_binary (IOR, mode, na_c,
2960 gen_int_mode (~bval & cval, mode));
2962 else
2964 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2965 if (na_c == const0_rtx)
2967 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2968 gen_int_mode (~cval & bval,
2969 mode));
2970 return simplify_gen_binary (IOR, mode, a_nc_b,
2971 gen_int_mode (~bval & cval,
2972 mode));
2977 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2978 comparison if STORE_FLAG_VALUE is 1. */
2979 if (STORE_FLAG_VALUE == 1
2980 && trueop1 == const1_rtx
2981 && COMPARISON_P (op0)
2982 && (reversed = reversed_comparison (op0, mode)))
2983 return reversed;
2985 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2986 is (lt foo (const_int 0)), so we can perform the above
2987 simplification if STORE_FLAG_VALUE is 1. */
2989 if (STORE_FLAG_VALUE == 1
2990 && trueop1 == const1_rtx
2991 && GET_CODE (op0) == LSHIFTRT
2992 && CONST_INT_P (XEXP (op0, 1))
2993 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2994 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2996 /* (xor (comparison foo bar) (const_int sign-bit))
2997 when STORE_FLAG_VALUE is the sign bit. */
2998 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2999 && trueop1 == const_true_rtx
3000 && COMPARISON_P (op0)
3001 && (reversed = reversed_comparison (op0, mode)))
3002 return reversed;
3004 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3005 if (tem)
3006 return tem;
3008 tem = simplify_associative_operation (code, mode, op0, op1);
3009 if (tem)
3010 return tem;
3011 break;
3013 case AND:
3014 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3015 return trueop1;
3016 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
3017 return op0;
3018 if (HWI_COMPUTABLE_MODE_P (mode))
3020 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
3021 HOST_WIDE_INT nzop1;
3022 if (CONST_INT_P (trueop1))
3024 HOST_WIDE_INT val1 = INTVAL (trueop1);
3025 /* If we are turning off bits already known off in OP0, we need
3026 not do an AND. */
3027 if ((nzop0 & ~val1) == 0)
3028 return op0;
3030 nzop1 = nonzero_bits (trueop1, mode);
3031 /* If we are clearing all the nonzero bits, the result is zero. */
3032 if ((nzop1 & nzop0) == 0
3033 && !side_effects_p (op0) && !side_effects_p (op1))
3034 return CONST0_RTX (mode);
3036 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
3037 && GET_MODE_CLASS (mode) != MODE_CC)
3038 return op0;
3039 /* A & (~A) -> 0 */
3040 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3041 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3042 && ! side_effects_p (op0)
3043 && GET_MODE_CLASS (mode) != MODE_CC)
3044 return CONST0_RTX (mode);
3046 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3047 there are no nonzero bits of C outside of X's mode. */
3048 if ((GET_CODE (op0) == SIGN_EXTEND
3049 || GET_CODE (op0) == ZERO_EXTEND)
3050 && CONST_INT_P (trueop1)
3051 && HWI_COMPUTABLE_MODE_P (mode)
3052 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
3053 & UINTVAL (trueop1)) == 0)
3055 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
3056 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
3057 gen_int_mode (INTVAL (trueop1),
3058 imode));
3059 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
3062 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3063 we might be able to further simplify the AND with X and potentially
3064 remove the truncation altogether. */
3065 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
3067 rtx x = XEXP (op0, 0);
3068 enum machine_mode xmode = GET_MODE (x);
3069 tem = simplify_gen_binary (AND, xmode, x,
3070 gen_int_mode (INTVAL (trueop1), xmode));
3071 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
3074 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3075 if (GET_CODE (op0) == IOR
3076 && CONST_INT_P (trueop1)
3077 && CONST_INT_P (XEXP (op0, 1)))
3079 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
3080 return simplify_gen_binary (IOR, mode,
3081 simplify_gen_binary (AND, mode,
3082 XEXP (op0, 0), op1),
3083 gen_int_mode (tmp, mode));
3086 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3087 insn (and may simplify more). */
3088 if (GET_CODE (op0) == XOR
3089 && rtx_equal_p (XEXP (op0, 0), op1)
3090 && ! side_effects_p (op1))
3091 return simplify_gen_binary (AND, mode,
3092 simplify_gen_unary (NOT, mode,
3093 XEXP (op0, 1), mode),
3094 op1);
3096 if (GET_CODE (op0) == XOR
3097 && rtx_equal_p (XEXP (op0, 1), op1)
3098 && ! side_effects_p (op1))
3099 return simplify_gen_binary (AND, mode,
3100 simplify_gen_unary (NOT, mode,
3101 XEXP (op0, 0), mode),
3102 op1);
3104 /* Similarly for (~(A ^ B)) & A. */
3105 if (GET_CODE (op0) == NOT
3106 && GET_CODE (XEXP (op0, 0)) == XOR
3107 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3108 && ! side_effects_p (op1))
3109 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3111 if (GET_CODE (op0) == NOT
3112 && GET_CODE (XEXP (op0, 0)) == XOR
3113 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3114 && ! side_effects_p (op1))
3115 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3117 /* Convert (A | B) & A to A. */
3118 if (GET_CODE (op0) == IOR
3119 && (rtx_equal_p (XEXP (op0, 0), op1)
3120 || rtx_equal_p (XEXP (op0, 1), op1))
3121 && ! side_effects_p (XEXP (op0, 0))
3122 && ! side_effects_p (XEXP (op0, 1)))
3123 return op1;
3125 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3126 ((A & N) + B) & M -> (A + B) & M
3127 Similarly if (N & M) == 0,
3128 ((A | N) + B) & M -> (A + B) & M
3129 and for - instead of + and/or ^ instead of |.
3130 Also, if (N & M) == 0, then
3131 (A +- N) & M -> A & M. */
3132 if (CONST_INT_P (trueop1)
3133 && HWI_COMPUTABLE_MODE_P (mode)
3134 && ~UINTVAL (trueop1)
3135 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3136 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3138 rtx pmop[2];
3139 int which;
3141 pmop[0] = XEXP (op0, 0);
3142 pmop[1] = XEXP (op0, 1);
3144 if (CONST_INT_P (pmop[1])
3145 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3146 return simplify_gen_binary (AND, mode, pmop[0], op1);
3148 for (which = 0; which < 2; which++)
3150 tem = pmop[which];
3151 switch (GET_CODE (tem))
3153 case AND:
3154 if (CONST_INT_P (XEXP (tem, 1))
3155 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3156 == UINTVAL (trueop1))
3157 pmop[which] = XEXP (tem, 0);
3158 break;
3159 case IOR:
3160 case XOR:
3161 if (CONST_INT_P (XEXP (tem, 1))
3162 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3163 pmop[which] = XEXP (tem, 0);
3164 break;
3165 default:
3166 break;
3170 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3172 tem = simplify_gen_binary (GET_CODE (op0), mode,
3173 pmop[0], pmop[1]);
3174 return simplify_gen_binary (code, mode, tem, op1);
3178 /* (and X (ior (not X) Y) -> (and X Y) */
3179 if (GET_CODE (op1) == IOR
3180 && GET_CODE (XEXP (op1, 0)) == NOT
3181 && op0 == XEXP (XEXP (op1, 0), 0))
3182 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3184 /* (and (ior (not X) Y) X) -> (and X Y) */
3185 if (GET_CODE (op0) == IOR
3186 && GET_CODE (XEXP (op0, 0)) == NOT
3187 && op1 == XEXP (XEXP (op0, 0), 0))
3188 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3190 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3191 if (tem)
3192 return tem;
3194 tem = simplify_associative_operation (code, mode, op0, op1);
3195 if (tem)
3196 return tem;
3197 break;
3199 case UDIV:
3200 /* 0/x is 0 (or x&0 if x has side-effects). */
3201 if (trueop0 == CONST0_RTX (mode))
3203 if (side_effects_p (op1))
3204 return simplify_gen_binary (AND, mode, op1, trueop0);
3205 return trueop0;
3207 /* x/1 is x. */
3208 if (trueop1 == CONST1_RTX (mode))
3210 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3211 if (tem)
3212 return tem;
3214 /* Convert divide by power of two into shift. */
3215 if (CONST_INT_P (trueop1)
3216 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3217 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3218 break;
3220 case DIV:
3221 /* Handle floating point and integers separately. */
3222 if (SCALAR_FLOAT_MODE_P (mode))
3224 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3225 safe for modes with NaNs, since 0.0 / 0.0 will then be
3226 NaN rather than 0.0. Nor is it safe for modes with signed
3227 zeros, since dividing 0 by a negative number gives -0.0 */
3228 if (trueop0 == CONST0_RTX (mode)
3229 && !HONOR_NANS (mode)
3230 && !HONOR_SIGNED_ZEROS (mode)
3231 && ! side_effects_p (op1))
3232 return op0;
3233 /* x/1.0 is x. */
3234 if (trueop1 == CONST1_RTX (mode)
3235 && !HONOR_SNANS (mode))
3236 return op0;
3238 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3239 && trueop1 != CONST0_RTX (mode))
3241 REAL_VALUE_TYPE d;
3242 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
3244 /* x/-1.0 is -x. */
3245 if (REAL_VALUES_EQUAL (d, dconstm1)
3246 && !HONOR_SNANS (mode))
3247 return simplify_gen_unary (NEG, mode, op0, mode);
3249 /* Change FP division by a constant into multiplication.
3250 Only do this with -freciprocal-math. */
3251 if (flag_reciprocal_math
3252 && !REAL_VALUES_EQUAL (d, dconst0))
3254 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
3255 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3256 return simplify_gen_binary (MULT, mode, op0, tem);
3260 else if (SCALAR_INT_MODE_P (mode))
3262 /* 0/x is 0 (or x&0 if x has side-effects). */
3263 if (trueop0 == CONST0_RTX (mode)
3264 && !cfun->can_throw_non_call_exceptions)
3266 if (side_effects_p (op1))
3267 return simplify_gen_binary (AND, mode, op1, trueop0);
3268 return trueop0;
3270 /* x/1 is x. */
3271 if (trueop1 == CONST1_RTX (mode))
3273 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3274 if (tem)
3275 return tem;
3277 /* x/-1 is -x. */
3278 if (trueop1 == constm1_rtx)
3280 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3281 if (x)
3282 return simplify_gen_unary (NEG, mode, x, mode);
3285 break;
3287 case UMOD:
3288 /* 0%x is 0 (or x&0 if x has side-effects). */
3289 if (trueop0 == CONST0_RTX (mode))
3291 if (side_effects_p (op1))
3292 return simplify_gen_binary (AND, mode, op1, trueop0);
3293 return trueop0;
3295 /* x%1 is 0 (of x&0 if x has side-effects). */
3296 if (trueop1 == CONST1_RTX (mode))
3298 if (side_effects_p (op0))
3299 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3300 return CONST0_RTX (mode);
3302 /* Implement modulus by power of two as AND. */
3303 if (CONST_INT_P (trueop1)
3304 && exact_log2 (UINTVAL (trueop1)) > 0)
3305 return simplify_gen_binary (AND, mode, op0,
3306 gen_int_mode (INTVAL (op1) - 1, mode));
3307 break;
3309 case MOD:
3310 /* 0%x is 0 (or x&0 if x has side-effects). */
3311 if (trueop0 == CONST0_RTX (mode))
3313 if (side_effects_p (op1))
3314 return simplify_gen_binary (AND, mode, op1, trueop0);
3315 return trueop0;
3317 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3318 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3320 if (side_effects_p (op0))
3321 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3322 return CONST0_RTX (mode);
3324 break;
3326 case ROTATERT:
3327 case ROTATE:
3328 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3329 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3330 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3331 amount instead. */
3332 if (CONST_INT_P (trueop1)
3333 && IN_RANGE (INTVAL (trueop1),
3334 GET_MODE_BITSIZE (mode) / 2 + (code == ROTATE),
3335 GET_MODE_BITSIZE (mode) - 1))
3336 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3337 mode, op0, GEN_INT (GET_MODE_BITSIZE (mode)
3338 - INTVAL (trueop1)));
3339 /* FALLTHRU */
3340 case ASHIFTRT:
3341 if (trueop1 == CONST0_RTX (mode))
3342 return op0;
3343 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3344 return op0;
3345 /* Rotating ~0 always results in ~0. */
3346 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3347 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3348 && ! side_effects_p (op1))
3349 return op0;
3350 canonicalize_shift:
3351 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3353 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
3354 if (val != INTVAL (op1))
3355 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3357 break;
3359 case ASHIFT:
3360 case SS_ASHIFT:
3361 case US_ASHIFT:
3362 if (trueop1 == CONST0_RTX (mode))
3363 return op0;
3364 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3365 return op0;
3366 goto canonicalize_shift;
3368 case LSHIFTRT:
3369 if (trueop1 == CONST0_RTX (mode))
3370 return op0;
3371 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3372 return op0;
3373 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3374 if (GET_CODE (op0) == CLZ
3375 && CONST_INT_P (trueop1)
3376 && STORE_FLAG_VALUE == 1
3377 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3379 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
3380 unsigned HOST_WIDE_INT zero_val = 0;
3382 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3383 && zero_val == GET_MODE_PRECISION (imode)
3384 && INTVAL (trueop1) == exact_log2 (zero_val))
3385 return simplify_gen_relational (EQ, mode, imode,
3386 XEXP (op0, 0), const0_rtx);
3388 goto canonicalize_shift;
3390 case SMIN:
3391 if (width <= HOST_BITS_PER_WIDE_INT
3392 && mode_signbit_p (mode, trueop1)
3393 && ! side_effects_p (op0))
3394 return op1;
3395 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3396 return op0;
3397 tem = simplify_associative_operation (code, mode, op0, op1);
3398 if (tem)
3399 return tem;
3400 break;
3402 case SMAX:
3403 if (width <= HOST_BITS_PER_WIDE_INT
3404 && CONST_INT_P (trueop1)
3405 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3406 && ! side_effects_p (op0))
3407 return op1;
3408 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3409 return op0;
3410 tem = simplify_associative_operation (code, mode, op0, op1);
3411 if (tem)
3412 return tem;
3413 break;
3415 case UMIN:
3416 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3417 return op1;
3418 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3419 return op0;
3420 tem = simplify_associative_operation (code, mode, op0, op1);
3421 if (tem)
3422 return tem;
3423 break;
3425 case UMAX:
3426 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3427 return op1;
3428 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3429 return op0;
3430 tem = simplify_associative_operation (code, mode, op0, op1);
3431 if (tem)
3432 return tem;
3433 break;
3435 case SS_PLUS:
3436 case US_PLUS:
3437 case SS_MINUS:
3438 case US_MINUS:
3439 case SS_MULT:
3440 case US_MULT:
3441 case SS_DIV:
3442 case US_DIV:
3443 /* ??? There are simplifications that can be done. */
3444 return 0;
3446 case VEC_SELECT:
3447 if (!VECTOR_MODE_P (mode))
3449 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3450 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3451 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3452 gcc_assert (XVECLEN (trueop1, 0) == 1);
3453 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3455 if (GET_CODE (trueop0) == CONST_VECTOR)
3456 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3457 (trueop1, 0, 0)));
3459 /* Extract a scalar element from a nested VEC_SELECT expression
3460 (with optional nested VEC_CONCAT expression). Some targets
3461 (i386) extract scalar element from a vector using chain of
3462 nested VEC_SELECT expressions. When input operand is a memory
3463 operand, this operation can be simplified to a simple scalar
3464 load from an offseted memory address. */
3465 if (GET_CODE (trueop0) == VEC_SELECT)
3467 rtx op0 = XEXP (trueop0, 0);
3468 rtx op1 = XEXP (trueop0, 1);
3470 enum machine_mode opmode = GET_MODE (op0);
3471 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3472 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3474 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3475 int elem;
3477 rtvec vec;
3478 rtx tmp_op, tmp;
3480 gcc_assert (GET_CODE (op1) == PARALLEL);
3481 gcc_assert (i < n_elts);
3483 /* Select element, pointed by nested selector. */
3484 elem = INTVAL (XVECEXP (op1, 0, i));
3486 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3487 if (GET_CODE (op0) == VEC_CONCAT)
3489 rtx op00 = XEXP (op0, 0);
3490 rtx op01 = XEXP (op0, 1);
3492 enum machine_mode mode00, mode01;
3493 int n_elts00, n_elts01;
3495 mode00 = GET_MODE (op00);
3496 mode01 = GET_MODE (op01);
3498 /* Find out number of elements of each operand. */
3499 if (VECTOR_MODE_P (mode00))
3501 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3502 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3504 else
3505 n_elts00 = 1;
3507 if (VECTOR_MODE_P (mode01))
3509 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3510 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3512 else
3513 n_elts01 = 1;
3515 gcc_assert (n_elts == n_elts00 + n_elts01);
3517 /* Select correct operand of VEC_CONCAT
3518 and adjust selector. */
3519 if (elem < n_elts01)
3520 tmp_op = op00;
3521 else
3523 tmp_op = op01;
3524 elem -= n_elts00;
3527 else
3528 tmp_op = op0;
3530 vec = rtvec_alloc (1);
3531 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3533 tmp = gen_rtx_fmt_ee (code, mode,
3534 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3535 return tmp;
3537 if (GET_CODE (trueop0) == VEC_DUPLICATE
3538 && GET_MODE (XEXP (trueop0, 0)) == mode)
3539 return XEXP (trueop0, 0);
3541 else
3543 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3544 gcc_assert (GET_MODE_INNER (mode)
3545 == GET_MODE_INNER (GET_MODE (trueop0)));
3546 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3548 if (GET_CODE (trueop0) == CONST_VECTOR)
3550 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3551 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3552 rtvec v = rtvec_alloc (n_elts);
3553 unsigned int i;
3555 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3556 for (i = 0; i < n_elts; i++)
3558 rtx x = XVECEXP (trueop1, 0, i);
3560 gcc_assert (CONST_INT_P (x));
3561 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3562 INTVAL (x));
3565 return gen_rtx_CONST_VECTOR (mode, v);
3568 /* Recognize the identity. */
3569 if (GET_MODE (trueop0) == mode)
3571 bool maybe_ident = true;
3572 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3574 rtx j = XVECEXP (trueop1, 0, i);
3575 if (!CONST_INT_P (j) || INTVAL (j) != i)
3577 maybe_ident = false;
3578 break;
3581 if (maybe_ident)
3582 return trueop0;
3585 /* If we build {a,b} then permute it, build the result directly. */
3586 if (XVECLEN (trueop1, 0) == 2
3587 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3588 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3589 && GET_CODE (trueop0) == VEC_CONCAT
3590 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3591 && GET_MODE (XEXP (trueop0, 0)) == mode
3592 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3593 && GET_MODE (XEXP (trueop0, 1)) == mode)
3595 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3596 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3597 rtx subop0, subop1;
3599 gcc_assert (i0 < 4 && i1 < 4);
3600 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3601 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3603 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3606 if (XVECLEN (trueop1, 0) == 2
3607 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3608 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3609 && GET_CODE (trueop0) == VEC_CONCAT
3610 && GET_MODE (trueop0) == mode)
3612 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3613 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3614 rtx subop0, subop1;
3616 gcc_assert (i0 < 2 && i1 < 2);
3617 subop0 = XEXP (trueop0, i0);
3618 subop1 = XEXP (trueop0, i1);
3620 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3624 if (XVECLEN (trueop1, 0) == 1
3625 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3626 && GET_CODE (trueop0) == VEC_CONCAT)
3628 rtx vec = trueop0;
3629 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3631 /* Try to find the element in the VEC_CONCAT. */
3632 while (GET_MODE (vec) != mode
3633 && GET_CODE (vec) == VEC_CONCAT)
3635 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3636 if (offset < vec_size)
3637 vec = XEXP (vec, 0);
3638 else
3640 offset -= vec_size;
3641 vec = XEXP (vec, 1);
3643 vec = avoid_constant_pool_reference (vec);
3646 if (GET_MODE (vec) == mode)
3647 return vec;
3650 /* If we select elements in a vec_merge that all come from the same
3651 operand, select from that operand directly. */
3652 if (GET_CODE (op0) == VEC_MERGE)
3654 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3655 if (CONST_INT_P (trueop02))
3657 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3658 bool all_operand0 = true;
3659 bool all_operand1 = true;
3660 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3662 rtx j = XVECEXP (trueop1, 0, i);
3663 if (sel & (1 << UINTVAL (j)))
3664 all_operand1 = false;
3665 else
3666 all_operand0 = false;
3668 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3669 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3670 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3671 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3675 return 0;
3676 case VEC_CONCAT:
3678 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3679 ? GET_MODE (trueop0)
3680 : GET_MODE_INNER (mode));
3681 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3682 ? GET_MODE (trueop1)
3683 : GET_MODE_INNER (mode));
3685 gcc_assert (VECTOR_MODE_P (mode));
3686 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3687 == GET_MODE_SIZE (mode));
3689 if (VECTOR_MODE_P (op0_mode))
3690 gcc_assert (GET_MODE_INNER (mode)
3691 == GET_MODE_INNER (op0_mode));
3692 else
3693 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3695 if (VECTOR_MODE_P (op1_mode))
3696 gcc_assert (GET_MODE_INNER (mode)
3697 == GET_MODE_INNER (op1_mode));
3698 else
3699 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3701 if ((GET_CODE (trueop0) == CONST_VECTOR
3702 || CONST_SCALAR_INT_P (trueop0)
3703 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3704 && (GET_CODE (trueop1) == CONST_VECTOR
3705 || CONST_SCALAR_INT_P (trueop1)
3706 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3708 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3709 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3710 rtvec v = rtvec_alloc (n_elts);
3711 unsigned int i;
3712 unsigned in_n_elts = 1;
3714 if (VECTOR_MODE_P (op0_mode))
3715 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3716 for (i = 0; i < n_elts; i++)
3718 if (i < in_n_elts)
3720 if (!VECTOR_MODE_P (op0_mode))
3721 RTVEC_ELT (v, i) = trueop0;
3722 else
3723 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3725 else
3727 if (!VECTOR_MODE_P (op1_mode))
3728 RTVEC_ELT (v, i) = trueop1;
3729 else
3730 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3731 i - in_n_elts);
3735 return gen_rtx_CONST_VECTOR (mode, v);
3738 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3739 Restrict the transformation to avoid generating a VEC_SELECT with a
3740 mode unrelated to its operand. */
3741 if (GET_CODE (trueop0) == VEC_SELECT
3742 && GET_CODE (trueop1) == VEC_SELECT
3743 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3744 && GET_MODE (XEXP (trueop0, 0)) == mode)
3746 rtx par0 = XEXP (trueop0, 1);
3747 rtx par1 = XEXP (trueop1, 1);
3748 int len0 = XVECLEN (par0, 0);
3749 int len1 = XVECLEN (par1, 0);
3750 rtvec vec = rtvec_alloc (len0 + len1);
3751 for (int i = 0; i < len0; i++)
3752 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3753 for (int i = 0; i < len1; i++)
3754 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3755 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3756 gen_rtx_PARALLEL (VOIDmode, vec));
3759 return 0;
3761 default:
3762 gcc_unreachable ();
3765 return 0;
3769 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3770 rtx op0, rtx op1)
3772 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3773 HOST_WIDE_INT val;
3774 unsigned int width = GET_MODE_PRECISION (mode);
3776 if (VECTOR_MODE_P (mode)
3777 && code != VEC_CONCAT
3778 && GET_CODE (op0) == CONST_VECTOR
3779 && GET_CODE (op1) == CONST_VECTOR)
3781 unsigned n_elts = GET_MODE_NUNITS (mode);
3782 enum machine_mode op0mode = GET_MODE (op0);
3783 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3784 enum machine_mode op1mode = GET_MODE (op1);
3785 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3786 rtvec v = rtvec_alloc (n_elts);
3787 unsigned int i;
3789 gcc_assert (op0_n_elts == n_elts);
3790 gcc_assert (op1_n_elts == n_elts);
3791 for (i = 0; i < n_elts; i++)
3793 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3794 CONST_VECTOR_ELT (op0, i),
3795 CONST_VECTOR_ELT (op1, i));
3796 if (!x)
3797 return 0;
3798 RTVEC_ELT (v, i) = x;
3801 return gen_rtx_CONST_VECTOR (mode, v);
3804 if (VECTOR_MODE_P (mode)
3805 && code == VEC_CONCAT
3806 && (CONST_SCALAR_INT_P (op0)
3807 || GET_CODE (op0) == CONST_FIXED
3808 || CONST_DOUBLE_AS_FLOAT_P (op0))
3809 && (CONST_SCALAR_INT_P (op1)
3810 || CONST_DOUBLE_AS_FLOAT_P (op1)
3811 || GET_CODE (op1) == CONST_FIXED))
3813 unsigned n_elts = GET_MODE_NUNITS (mode);
3814 rtvec v = rtvec_alloc (n_elts);
3816 gcc_assert (n_elts >= 2);
3817 if (n_elts == 2)
3819 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3820 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3822 RTVEC_ELT (v, 0) = op0;
3823 RTVEC_ELT (v, 1) = op1;
3825 else
3827 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3828 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3829 unsigned i;
3831 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3832 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3833 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3835 for (i = 0; i < op0_n_elts; ++i)
3836 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3837 for (i = 0; i < op1_n_elts; ++i)
3838 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3841 return gen_rtx_CONST_VECTOR (mode, v);
3844 if (SCALAR_FLOAT_MODE_P (mode)
3845 && CONST_DOUBLE_AS_FLOAT_P (op0)
3846 && CONST_DOUBLE_AS_FLOAT_P (op1)
3847 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3849 if (code == AND
3850 || code == IOR
3851 || code == XOR)
3853 long tmp0[4];
3854 long tmp1[4];
3855 REAL_VALUE_TYPE r;
3856 int i;
3858 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3859 GET_MODE (op0));
3860 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3861 GET_MODE (op1));
3862 for (i = 0; i < 4; i++)
3864 switch (code)
3866 case AND:
3867 tmp0[i] &= tmp1[i];
3868 break;
3869 case IOR:
3870 tmp0[i] |= tmp1[i];
3871 break;
3872 case XOR:
3873 tmp0[i] ^= tmp1[i];
3874 break;
3875 default:
3876 gcc_unreachable ();
3879 real_from_target (&r, tmp0, mode);
3880 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3882 else
3884 REAL_VALUE_TYPE f0, f1, value, result;
3885 bool inexact;
3887 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3888 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3889 real_convert (&f0, mode, &f0);
3890 real_convert (&f1, mode, &f1);
3892 if (HONOR_SNANS (mode)
3893 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3894 return 0;
3896 if (code == DIV
3897 && REAL_VALUES_EQUAL (f1, dconst0)
3898 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3899 return 0;
3901 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3902 && flag_trapping_math
3903 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3905 int s0 = REAL_VALUE_NEGATIVE (f0);
3906 int s1 = REAL_VALUE_NEGATIVE (f1);
3908 switch (code)
3910 case PLUS:
3911 /* Inf + -Inf = NaN plus exception. */
3912 if (s0 != s1)
3913 return 0;
3914 break;
3915 case MINUS:
3916 /* Inf - Inf = NaN plus exception. */
3917 if (s0 == s1)
3918 return 0;
3919 break;
3920 case DIV:
3921 /* Inf / Inf = NaN plus exception. */
3922 return 0;
3923 default:
3924 break;
3928 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3929 && flag_trapping_math
3930 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3931 || (REAL_VALUE_ISINF (f1)
3932 && REAL_VALUES_EQUAL (f0, dconst0))))
3933 /* Inf * 0 = NaN plus exception. */
3934 return 0;
3936 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3937 &f0, &f1);
3938 real_convert (&result, mode, &value);
3940 /* Don't constant fold this floating point operation if
3941 the result has overflowed and flag_trapping_math. */
3943 if (flag_trapping_math
3944 && MODE_HAS_INFINITIES (mode)
3945 && REAL_VALUE_ISINF (result)
3946 && !REAL_VALUE_ISINF (f0)
3947 && !REAL_VALUE_ISINF (f1))
3948 /* Overflow plus exception. */
3949 return 0;
3951 /* Don't constant fold this floating point operation if the
3952 result may dependent upon the run-time rounding mode and
3953 flag_rounding_math is set, or if GCC's software emulation
3954 is unable to accurately represent the result. */
3956 if ((flag_rounding_math
3957 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3958 && (inexact || !real_identical (&result, &value)))
3959 return NULL_RTX;
3961 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3965 /* We can fold some multi-word operations. */
3966 if (GET_MODE_CLASS (mode) == MODE_INT
3967 && width == HOST_BITS_PER_DOUBLE_INT
3968 && (CONST_DOUBLE_AS_INT_P (op0) || CONST_INT_P (op0))
3969 && (CONST_DOUBLE_AS_INT_P (op1) || CONST_INT_P (op1)))
3971 double_int o0, o1, res, tmp;
3972 bool overflow;
3974 o0 = rtx_to_double_int (op0);
3975 o1 = rtx_to_double_int (op1);
3977 switch (code)
3979 case MINUS:
3980 /* A - B == A + (-B). */
3981 o1 = -o1;
3983 /* Fall through.... */
3985 case PLUS:
3986 res = o0 + o1;
3987 break;
3989 case MULT:
3990 res = o0 * o1;
3991 break;
3993 case DIV:
3994 res = o0.divmod_with_overflow (o1, false, TRUNC_DIV_EXPR,
3995 &tmp, &overflow);
3996 if (overflow)
3997 return 0;
3998 break;
4000 case MOD:
4001 tmp = o0.divmod_with_overflow (o1, false, TRUNC_DIV_EXPR,
4002 &res, &overflow);
4003 if (overflow)
4004 return 0;
4005 break;
4007 case UDIV:
4008 res = o0.divmod_with_overflow (o1, true, TRUNC_DIV_EXPR,
4009 &tmp, &overflow);
4010 if (overflow)
4011 return 0;
4012 break;
4014 case UMOD:
4015 tmp = o0.divmod_with_overflow (o1, true, TRUNC_DIV_EXPR,
4016 &res, &overflow);
4017 if (overflow)
4018 return 0;
4019 break;
4021 case AND:
4022 res = o0 & o1;
4023 break;
4025 case IOR:
4026 res = o0 | o1;
4027 break;
4029 case XOR:
4030 res = o0 ^ o1;
4031 break;
4033 case SMIN:
4034 res = o0.smin (o1);
4035 break;
4037 case SMAX:
4038 res = o0.smax (o1);
4039 break;
4041 case UMIN:
4042 res = o0.umin (o1);
4043 break;
4045 case UMAX:
4046 res = o0.umax (o1);
4047 break;
4049 case LSHIFTRT: case ASHIFTRT:
4050 case ASHIFT:
4051 case ROTATE: case ROTATERT:
4053 unsigned HOST_WIDE_INT cnt;
4055 if (SHIFT_COUNT_TRUNCATED)
4057 o1.high = 0;
4058 o1.low &= GET_MODE_PRECISION (mode) - 1;
4061 if (!o1.fits_uhwi ()
4062 || o1.to_uhwi () >= GET_MODE_PRECISION (mode))
4063 return 0;
4065 cnt = o1.to_uhwi ();
4066 unsigned short prec = GET_MODE_PRECISION (mode);
4068 if (code == LSHIFTRT || code == ASHIFTRT)
4069 res = o0.rshift (cnt, prec, code == ASHIFTRT);
4070 else if (code == ASHIFT)
4071 res = o0.alshift (cnt, prec);
4072 else if (code == ROTATE)
4073 res = o0.lrotate (cnt, prec);
4074 else /* code == ROTATERT */
4075 res = o0.rrotate (cnt, prec);
4077 break;
4079 default:
4080 return 0;
4083 return immed_double_int_const (res, mode);
4086 if (CONST_INT_P (op0) && CONST_INT_P (op1)
4087 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
4089 /* Get the integer argument values in two forms:
4090 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
4092 arg0 = INTVAL (op0);
4093 arg1 = INTVAL (op1);
4095 if (width < HOST_BITS_PER_WIDE_INT)
4097 arg0 &= GET_MODE_MASK (mode);
4098 arg1 &= GET_MODE_MASK (mode);
4100 arg0s = arg0;
4101 if (val_signbit_known_set_p (mode, arg0s))
4102 arg0s |= ~GET_MODE_MASK (mode);
4104 arg1s = arg1;
4105 if (val_signbit_known_set_p (mode, arg1s))
4106 arg1s |= ~GET_MODE_MASK (mode);
4108 else
4110 arg0s = arg0;
4111 arg1s = arg1;
4114 /* Compute the value of the arithmetic. */
4116 switch (code)
4118 case PLUS:
4119 val = arg0s + arg1s;
4120 break;
4122 case MINUS:
4123 val = arg0s - arg1s;
4124 break;
4126 case MULT:
4127 val = arg0s * arg1s;
4128 break;
4130 case DIV:
4131 if (arg1s == 0
4132 || ((unsigned HOST_WIDE_INT) arg0s
4133 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4134 && arg1s == -1))
4135 return 0;
4136 val = arg0s / arg1s;
4137 break;
4139 case MOD:
4140 if (arg1s == 0
4141 || ((unsigned HOST_WIDE_INT) arg0s
4142 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4143 && arg1s == -1))
4144 return 0;
4145 val = arg0s % arg1s;
4146 break;
4148 case UDIV:
4149 if (arg1 == 0
4150 || ((unsigned HOST_WIDE_INT) arg0s
4151 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4152 && arg1s == -1))
4153 return 0;
4154 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
4155 break;
4157 case UMOD:
4158 if (arg1 == 0
4159 || ((unsigned HOST_WIDE_INT) arg0s
4160 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4161 && arg1s == -1))
4162 return 0;
4163 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
4164 break;
4166 case AND:
4167 val = arg0 & arg1;
4168 break;
4170 case IOR:
4171 val = arg0 | arg1;
4172 break;
4174 case XOR:
4175 val = arg0 ^ arg1;
4176 break;
4178 case LSHIFTRT:
4179 case ASHIFT:
4180 case ASHIFTRT:
4181 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
4182 the value is in range. We can't return any old value for
4183 out-of-range arguments because either the middle-end (via
4184 shift_truncation_mask) or the back-end might be relying on
4185 target-specific knowledge. Nor can we rely on
4186 shift_truncation_mask, since the shift might not be part of an
4187 ashlM3, lshrM3 or ashrM3 instruction. */
4188 if (SHIFT_COUNT_TRUNCATED)
4189 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
4190 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
4191 return 0;
4193 val = (code == ASHIFT
4194 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
4195 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
4197 /* Sign-extend the result for arithmetic right shifts. */
4198 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
4199 val |= HOST_WIDE_INT_M1U << (width - arg1);
4200 break;
4202 case ROTATERT:
4203 if (arg1 < 0)
4204 return 0;
4206 arg1 %= width;
4207 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
4208 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
4209 break;
4211 case ROTATE:
4212 if (arg1 < 0)
4213 return 0;
4215 arg1 %= width;
4216 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
4217 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
4218 break;
4220 case COMPARE:
4221 /* Do nothing here. */
4222 return 0;
4224 case SMIN:
4225 val = arg0s <= arg1s ? arg0s : arg1s;
4226 break;
4228 case UMIN:
4229 val = ((unsigned HOST_WIDE_INT) arg0
4230 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4231 break;
4233 case SMAX:
4234 val = arg0s > arg1s ? arg0s : arg1s;
4235 break;
4237 case UMAX:
4238 val = ((unsigned HOST_WIDE_INT) arg0
4239 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4240 break;
4242 case SS_PLUS:
4243 case US_PLUS:
4244 case SS_MINUS:
4245 case US_MINUS:
4246 case SS_MULT:
4247 case US_MULT:
4248 case SS_DIV:
4249 case US_DIV:
4250 case SS_ASHIFT:
4251 case US_ASHIFT:
4252 /* ??? There are simplifications that can be done. */
4253 return 0;
4255 default:
4256 gcc_unreachable ();
4259 return gen_int_mode (val, mode);
4262 return NULL_RTX;
4267 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
4268 PLUS or MINUS.
4270 Rather than test for specific case, we do this by a brute-force method
4271 and do all possible simplifications until no more changes occur. Then
4272 we rebuild the operation. */
4274 struct simplify_plus_minus_op_data
4276 rtx op;
4277 short neg;
4280 static bool
4281 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4283 int result;
4285 result = (commutative_operand_precedence (y)
4286 - commutative_operand_precedence (x));
4287 if (result)
4288 return result > 0;
4290 /* Group together equal REGs to do more simplification. */
4291 if (REG_P (x) && REG_P (y))
4292 return REGNO (x) > REGNO (y);
4293 else
4294 return false;
4297 static rtx
4298 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
4299 rtx op1)
4301 struct simplify_plus_minus_op_data ops[8];
4302 rtx result, tem;
4303 int n_ops = 2, input_ops = 2;
4304 int changed, n_constants = 0, canonicalized = 0;
4305 int i, j;
4307 memset (ops, 0, sizeof ops);
4309 /* Set up the two operands and then expand them until nothing has been
4310 changed. If we run out of room in our array, give up; this should
4311 almost never happen. */
4313 ops[0].op = op0;
4314 ops[0].neg = 0;
4315 ops[1].op = op1;
4316 ops[1].neg = (code == MINUS);
4320 changed = 0;
4322 for (i = 0; i < n_ops; i++)
4324 rtx this_op = ops[i].op;
4325 int this_neg = ops[i].neg;
4326 enum rtx_code this_code = GET_CODE (this_op);
4328 switch (this_code)
4330 case PLUS:
4331 case MINUS:
4332 if (n_ops == 7)
4333 return NULL_RTX;
4335 ops[n_ops].op = XEXP (this_op, 1);
4336 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4337 n_ops++;
4339 ops[i].op = XEXP (this_op, 0);
4340 input_ops++;
4341 changed = 1;
4342 canonicalized |= this_neg;
4343 break;
4345 case NEG:
4346 ops[i].op = XEXP (this_op, 0);
4347 ops[i].neg = ! this_neg;
4348 changed = 1;
4349 canonicalized = 1;
4350 break;
4352 case CONST:
4353 if (n_ops < 7
4354 && GET_CODE (XEXP (this_op, 0)) == PLUS
4355 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4356 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4358 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4359 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4360 ops[n_ops].neg = this_neg;
4361 n_ops++;
4362 changed = 1;
4363 canonicalized = 1;
4365 break;
4367 case NOT:
4368 /* ~a -> (-a - 1) */
4369 if (n_ops != 7)
4371 ops[n_ops].op = CONSTM1_RTX (mode);
4372 ops[n_ops++].neg = this_neg;
4373 ops[i].op = XEXP (this_op, 0);
4374 ops[i].neg = !this_neg;
4375 changed = 1;
4376 canonicalized = 1;
4378 break;
4380 case CONST_INT:
4381 n_constants++;
4382 if (this_neg)
4384 ops[i].op = neg_const_int (mode, this_op);
4385 ops[i].neg = 0;
4386 changed = 1;
4387 canonicalized = 1;
4389 break;
4391 default:
4392 break;
4396 while (changed);
4398 if (n_constants > 1)
4399 canonicalized = 1;
4401 gcc_assert (n_ops >= 2);
4403 /* If we only have two operands, we can avoid the loops. */
4404 if (n_ops == 2)
4406 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4407 rtx lhs, rhs;
4409 /* Get the two operands. Be careful with the order, especially for
4410 the cases where code == MINUS. */
4411 if (ops[0].neg && ops[1].neg)
4413 lhs = gen_rtx_NEG (mode, ops[0].op);
4414 rhs = ops[1].op;
4416 else if (ops[0].neg)
4418 lhs = ops[1].op;
4419 rhs = ops[0].op;
4421 else
4423 lhs = ops[0].op;
4424 rhs = ops[1].op;
4427 return simplify_const_binary_operation (code, mode, lhs, rhs);
4430 /* Now simplify each pair of operands until nothing changes. */
4433 /* Insertion sort is good enough for an eight-element array. */
4434 for (i = 1; i < n_ops; i++)
4436 struct simplify_plus_minus_op_data save;
4437 j = i - 1;
4438 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
4439 continue;
4441 canonicalized = 1;
4442 save = ops[i];
4444 ops[j + 1] = ops[j];
4445 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
4446 ops[j + 1] = save;
4449 changed = 0;
4450 for (i = n_ops - 1; i > 0; i--)
4451 for (j = i - 1; j >= 0; j--)
4453 rtx lhs = ops[j].op, rhs = ops[i].op;
4454 int lneg = ops[j].neg, rneg = ops[i].neg;
4456 if (lhs != 0 && rhs != 0)
4458 enum rtx_code ncode = PLUS;
4460 if (lneg != rneg)
4462 ncode = MINUS;
4463 if (lneg)
4464 tem = lhs, lhs = rhs, rhs = tem;
4466 else if (swap_commutative_operands_p (lhs, rhs))
4467 tem = lhs, lhs = rhs, rhs = tem;
4469 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4470 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4472 rtx tem_lhs, tem_rhs;
4474 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4475 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4476 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
4478 if (tem && !CONSTANT_P (tem))
4479 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4481 else
4482 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4484 /* Reject "simplifications" that just wrap the two
4485 arguments in a CONST. Failure to do so can result
4486 in infinite recursion with simplify_binary_operation
4487 when it calls us to simplify CONST operations. */
4488 if (tem
4489 && ! (GET_CODE (tem) == CONST
4490 && GET_CODE (XEXP (tem, 0)) == ncode
4491 && XEXP (XEXP (tem, 0), 0) == lhs
4492 && XEXP (XEXP (tem, 0), 1) == rhs))
4494 lneg &= rneg;
4495 if (GET_CODE (tem) == NEG)
4496 tem = XEXP (tem, 0), lneg = !lneg;
4497 if (CONST_INT_P (tem) && lneg)
4498 tem = neg_const_int (mode, tem), lneg = 0;
4500 ops[i].op = tem;
4501 ops[i].neg = lneg;
4502 ops[j].op = NULL_RTX;
4503 changed = 1;
4504 canonicalized = 1;
4509 /* If nothing changed, fail. */
4510 if (!canonicalized)
4511 return NULL_RTX;
4513 /* Pack all the operands to the lower-numbered entries. */
4514 for (i = 0, j = 0; j < n_ops; j++)
4515 if (ops[j].op)
4517 ops[i] = ops[j];
4518 i++;
4520 n_ops = i;
4522 while (changed);
4524 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4525 if (n_ops == 2
4526 && CONST_INT_P (ops[1].op)
4527 && CONSTANT_P (ops[0].op)
4528 && ops[0].neg)
4529 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4531 /* We suppressed creation of trivial CONST expressions in the
4532 combination loop to avoid recursion. Create one manually now.
4533 The combination loop should have ensured that there is exactly
4534 one CONST_INT, and the sort will have ensured that it is last
4535 in the array and that any other constant will be next-to-last. */
4537 if (n_ops > 1
4538 && CONST_INT_P (ops[n_ops - 1].op)
4539 && CONSTANT_P (ops[n_ops - 2].op))
4541 rtx value = ops[n_ops - 1].op;
4542 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4543 value = neg_const_int (mode, value);
4544 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4545 INTVAL (value));
4546 n_ops--;
4549 /* Put a non-negated operand first, if possible. */
4551 for (i = 0; i < n_ops && ops[i].neg; i++)
4552 continue;
4553 if (i == n_ops)
4554 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4555 else if (i != 0)
4557 tem = ops[0].op;
4558 ops[0] = ops[i];
4559 ops[i].op = tem;
4560 ops[i].neg = 1;
4563 /* Now make the result by performing the requested operations. */
4564 result = ops[0].op;
4565 for (i = 1; i < n_ops; i++)
4566 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4567 mode, result, ops[i].op);
4569 return result;
4572 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4573 static bool
4574 plus_minus_operand_p (const_rtx x)
4576 return GET_CODE (x) == PLUS
4577 || GET_CODE (x) == MINUS
4578 || (GET_CODE (x) == CONST
4579 && GET_CODE (XEXP (x, 0)) == PLUS
4580 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4581 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4584 /* Like simplify_binary_operation except used for relational operators.
4585 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4586 not also be VOIDmode.
4588 CMP_MODE specifies in which mode the comparison is done in, so it is
4589 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4590 the operands or, if both are VOIDmode, the operands are compared in
4591 "infinite precision". */
4593 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
4594 enum machine_mode cmp_mode, rtx op0, rtx op1)
4596 rtx tem, trueop0, trueop1;
4598 if (cmp_mode == VOIDmode)
4599 cmp_mode = GET_MODE (op0);
4600 if (cmp_mode == VOIDmode)
4601 cmp_mode = GET_MODE (op1);
4603 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4604 if (tem)
4606 if (SCALAR_FLOAT_MODE_P (mode))
4608 if (tem == const0_rtx)
4609 return CONST0_RTX (mode);
4610 #ifdef FLOAT_STORE_FLAG_VALUE
4612 REAL_VALUE_TYPE val;
4613 val = FLOAT_STORE_FLAG_VALUE (mode);
4614 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4616 #else
4617 return NULL_RTX;
4618 #endif
4620 if (VECTOR_MODE_P (mode))
4622 if (tem == const0_rtx)
4623 return CONST0_RTX (mode);
4624 #ifdef VECTOR_STORE_FLAG_VALUE
4626 int i, units;
4627 rtvec v;
4629 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4630 if (val == NULL_RTX)
4631 return NULL_RTX;
4632 if (val == const1_rtx)
4633 return CONST1_RTX (mode);
4635 units = GET_MODE_NUNITS (mode);
4636 v = rtvec_alloc (units);
4637 for (i = 0; i < units; i++)
4638 RTVEC_ELT (v, i) = val;
4639 return gen_rtx_raw_CONST_VECTOR (mode, v);
4641 #else
4642 return NULL_RTX;
4643 #endif
4646 return tem;
4649 /* For the following tests, ensure const0_rtx is op1. */
4650 if (swap_commutative_operands_p (op0, op1)
4651 || (op0 == const0_rtx && op1 != const0_rtx))
4652 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4654 /* If op0 is a compare, extract the comparison arguments from it. */
4655 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4656 return simplify_gen_relational (code, mode, VOIDmode,
4657 XEXP (op0, 0), XEXP (op0, 1));
4659 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4660 || CC0_P (op0))
4661 return NULL_RTX;
4663 trueop0 = avoid_constant_pool_reference (op0);
4664 trueop1 = avoid_constant_pool_reference (op1);
4665 return simplify_relational_operation_1 (code, mode, cmp_mode,
4666 trueop0, trueop1);
4669 /* This part of simplify_relational_operation is only used when CMP_MODE
4670 is not in class MODE_CC (i.e. it is a real comparison).
4672 MODE is the mode of the result, while CMP_MODE specifies in which
4673 mode the comparison is done in, so it is the mode of the operands. */
4675 static rtx
4676 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
4677 enum machine_mode cmp_mode, rtx op0, rtx op1)
4679 enum rtx_code op0code = GET_CODE (op0);
4681 if (op1 == const0_rtx && COMPARISON_P (op0))
4683 /* If op0 is a comparison, extract the comparison arguments
4684 from it. */
4685 if (code == NE)
4687 if (GET_MODE (op0) == mode)
4688 return simplify_rtx (op0);
4689 else
4690 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4691 XEXP (op0, 0), XEXP (op0, 1));
4693 else if (code == EQ)
4695 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4696 if (new_code != UNKNOWN)
4697 return simplify_gen_relational (new_code, mode, VOIDmode,
4698 XEXP (op0, 0), XEXP (op0, 1));
4702 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4703 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4704 if ((code == LTU || code == GEU)
4705 && GET_CODE (op0) == PLUS
4706 && CONST_INT_P (XEXP (op0, 1))
4707 && (rtx_equal_p (op1, XEXP (op0, 0))
4708 || rtx_equal_p (op1, XEXP (op0, 1)))
4709 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4710 && XEXP (op0, 1) != const0_rtx)
4712 rtx new_cmp
4713 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4714 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4715 cmp_mode, XEXP (op0, 0), new_cmp);
4718 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4719 if ((code == LTU || code == GEU)
4720 && GET_CODE (op0) == PLUS
4721 && rtx_equal_p (op1, XEXP (op0, 1))
4722 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4723 && !rtx_equal_p (op1, XEXP (op0, 0)))
4724 return simplify_gen_relational (code, mode, cmp_mode, op0,
4725 copy_rtx (XEXP (op0, 0)));
4727 if (op1 == const0_rtx)
4729 /* Canonicalize (GTU x 0) as (NE x 0). */
4730 if (code == GTU)
4731 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4732 /* Canonicalize (LEU x 0) as (EQ x 0). */
4733 if (code == LEU)
4734 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4736 else if (op1 == const1_rtx)
4738 switch (code)
4740 case GE:
4741 /* Canonicalize (GE x 1) as (GT x 0). */
4742 return simplify_gen_relational (GT, mode, cmp_mode,
4743 op0, const0_rtx);
4744 case GEU:
4745 /* Canonicalize (GEU x 1) as (NE x 0). */
4746 return simplify_gen_relational (NE, mode, cmp_mode,
4747 op0, const0_rtx);
4748 case LT:
4749 /* Canonicalize (LT x 1) as (LE x 0). */
4750 return simplify_gen_relational (LE, mode, cmp_mode,
4751 op0, const0_rtx);
4752 case LTU:
4753 /* Canonicalize (LTU x 1) as (EQ x 0). */
4754 return simplify_gen_relational (EQ, mode, cmp_mode,
4755 op0, const0_rtx);
4756 default:
4757 break;
4760 else if (op1 == constm1_rtx)
4762 /* Canonicalize (LE x -1) as (LT x 0). */
4763 if (code == LE)
4764 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4765 /* Canonicalize (GT x -1) as (GE x 0). */
4766 if (code == GT)
4767 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4770 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4771 if ((code == EQ || code == NE)
4772 && (op0code == PLUS || op0code == MINUS)
4773 && CONSTANT_P (op1)
4774 && CONSTANT_P (XEXP (op0, 1))
4775 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4777 rtx x = XEXP (op0, 0);
4778 rtx c = XEXP (op0, 1);
4779 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4780 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4782 /* Detect an infinite recursive condition, where we oscillate at this
4783 simplification case between:
4784 A + B == C <---> C - B == A,
4785 where A, B, and C are all constants with non-simplifiable expressions,
4786 usually SYMBOL_REFs. */
4787 if (GET_CODE (tem) == invcode
4788 && CONSTANT_P (x)
4789 && rtx_equal_p (c, XEXP (tem, 1)))
4790 return NULL_RTX;
4792 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4795 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4796 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4797 if (code == NE
4798 && op1 == const0_rtx
4799 && GET_MODE_CLASS (mode) == MODE_INT
4800 && cmp_mode != VOIDmode
4801 /* ??? Work-around BImode bugs in the ia64 backend. */
4802 && mode != BImode
4803 && cmp_mode != BImode
4804 && nonzero_bits (op0, cmp_mode) == 1
4805 && STORE_FLAG_VALUE == 1)
4806 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4807 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4808 : lowpart_subreg (mode, op0, cmp_mode);
4810 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4811 if ((code == EQ || code == NE)
4812 && op1 == const0_rtx
4813 && op0code == XOR)
4814 return simplify_gen_relational (code, mode, cmp_mode,
4815 XEXP (op0, 0), XEXP (op0, 1));
4817 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4818 if ((code == EQ || code == NE)
4819 && op0code == XOR
4820 && rtx_equal_p (XEXP (op0, 0), op1)
4821 && !side_effects_p (XEXP (op0, 0)))
4822 return simplify_gen_relational (code, mode, cmp_mode,
4823 XEXP (op0, 1), const0_rtx);
4825 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4826 if ((code == EQ || code == NE)
4827 && op0code == XOR
4828 && rtx_equal_p (XEXP (op0, 1), op1)
4829 && !side_effects_p (XEXP (op0, 1)))
4830 return simplify_gen_relational (code, mode, cmp_mode,
4831 XEXP (op0, 0), const0_rtx);
4833 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4834 if ((code == EQ || code == NE)
4835 && op0code == XOR
4836 && CONST_SCALAR_INT_P (op1)
4837 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4838 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4839 simplify_gen_binary (XOR, cmp_mode,
4840 XEXP (op0, 1), op1));
4842 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4843 if ((code == EQ || code == NE)
4844 && GET_CODE (op0) == BSWAP
4845 && CONST_SCALAR_INT_P (op1))
4846 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4847 simplify_gen_unary (BSWAP, cmp_mode,
4848 op1, cmp_mode));
4850 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4851 if ((code == EQ || code == NE)
4852 && GET_CODE (op0) == BSWAP
4853 && GET_CODE (op1) == BSWAP)
4854 return simplify_gen_relational (code, mode, cmp_mode,
4855 XEXP (op0, 0), XEXP (op1, 0));
4857 if (op0code == POPCOUNT && op1 == const0_rtx)
4858 switch (code)
4860 case EQ:
4861 case LE:
4862 case LEU:
4863 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4864 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4865 XEXP (op0, 0), const0_rtx);
4867 case NE:
4868 case GT:
4869 case GTU:
4870 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4871 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4872 XEXP (op0, 0), const0_rtx);
4874 default:
4875 break;
4878 return NULL_RTX;
4881 enum
4883 CMP_EQ = 1,
4884 CMP_LT = 2,
4885 CMP_GT = 4,
4886 CMP_LTU = 8,
4887 CMP_GTU = 16
4891 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4892 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4893 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4894 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4895 For floating-point comparisons, assume that the operands were ordered. */
4897 static rtx
4898 comparison_result (enum rtx_code code, int known_results)
4900 switch (code)
4902 case EQ:
4903 case UNEQ:
4904 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4905 case NE:
4906 case LTGT:
4907 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4909 case LT:
4910 case UNLT:
4911 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4912 case GE:
4913 case UNGE:
4914 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4916 case GT:
4917 case UNGT:
4918 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4919 case LE:
4920 case UNLE:
4921 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4923 case LTU:
4924 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4925 case GEU:
4926 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4928 case GTU:
4929 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4930 case LEU:
4931 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4933 case ORDERED:
4934 return const_true_rtx;
4935 case UNORDERED:
4936 return const0_rtx;
4937 default:
4938 gcc_unreachable ();
4942 /* Check if the given comparison (done in the given MODE) is actually a
4943 tautology or a contradiction.
4944 If no simplification is possible, this function returns zero.
4945 Otherwise, it returns either const_true_rtx or const0_rtx. */
4948 simplify_const_relational_operation (enum rtx_code code,
4949 enum machine_mode mode,
4950 rtx op0, rtx op1)
4952 rtx tem;
4953 rtx trueop0;
4954 rtx trueop1;
4956 gcc_assert (mode != VOIDmode
4957 || (GET_MODE (op0) == VOIDmode
4958 && GET_MODE (op1) == VOIDmode));
4960 /* If op0 is a compare, extract the comparison arguments from it. */
4961 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4963 op1 = XEXP (op0, 1);
4964 op0 = XEXP (op0, 0);
4966 if (GET_MODE (op0) != VOIDmode)
4967 mode = GET_MODE (op0);
4968 else if (GET_MODE (op1) != VOIDmode)
4969 mode = GET_MODE (op1);
4970 else
4971 return 0;
4974 /* We can't simplify MODE_CC values since we don't know what the
4975 actual comparison is. */
4976 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4977 return 0;
4979 /* Make sure the constant is second. */
4980 if (swap_commutative_operands_p (op0, op1))
4982 tem = op0, op0 = op1, op1 = tem;
4983 code = swap_condition (code);
4986 trueop0 = avoid_constant_pool_reference (op0);
4987 trueop1 = avoid_constant_pool_reference (op1);
4989 /* For integer comparisons of A and B maybe we can simplify A - B and can
4990 then simplify a comparison of that with zero. If A and B are both either
4991 a register or a CONST_INT, this can't help; testing for these cases will
4992 prevent infinite recursion here and speed things up.
4994 We can only do this for EQ and NE comparisons as otherwise we may
4995 lose or introduce overflow which we cannot disregard as undefined as
4996 we do not know the signedness of the operation on either the left or
4997 the right hand side of the comparison. */
4999 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
5000 && (code == EQ || code == NE)
5001 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
5002 && (REG_P (op1) || CONST_INT_P (trueop1)))
5003 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
5004 /* We cannot do this if tem is a nonzero address. */
5005 && ! nonzero_address_p (tem))
5006 return simplify_const_relational_operation (signed_condition (code),
5007 mode, tem, const0_rtx);
5009 if (! HONOR_NANS (mode) && code == ORDERED)
5010 return const_true_rtx;
5012 if (! HONOR_NANS (mode) && code == UNORDERED)
5013 return const0_rtx;
5015 /* For modes without NaNs, if the two operands are equal, we know the
5016 result except if they have side-effects. Even with NaNs we know
5017 the result of unordered comparisons and, if signaling NaNs are
5018 irrelevant, also the result of LT/GT/LTGT. */
5019 if ((! HONOR_NANS (GET_MODE (trueop0))
5020 || code == UNEQ || code == UNLE || code == UNGE
5021 || ((code == LT || code == GT || code == LTGT)
5022 && ! HONOR_SNANS (GET_MODE (trueop0))))
5023 && rtx_equal_p (trueop0, trueop1)
5024 && ! side_effects_p (trueop0))
5025 return comparison_result (code, CMP_EQ);
5027 /* If the operands are floating-point constants, see if we can fold
5028 the result. */
5029 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
5030 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
5031 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
5033 REAL_VALUE_TYPE d0, d1;
5035 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
5036 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
5038 /* Comparisons are unordered iff at least one of the values is NaN. */
5039 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
5040 switch (code)
5042 case UNEQ:
5043 case UNLT:
5044 case UNGT:
5045 case UNLE:
5046 case UNGE:
5047 case NE:
5048 case UNORDERED:
5049 return const_true_rtx;
5050 case EQ:
5051 case LT:
5052 case GT:
5053 case LE:
5054 case GE:
5055 case LTGT:
5056 case ORDERED:
5057 return const0_rtx;
5058 default:
5059 return 0;
5062 return comparison_result (code,
5063 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
5064 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
5067 /* Otherwise, see if the operands are both integers. */
5068 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
5069 && (CONST_DOUBLE_AS_INT_P (trueop0) || CONST_INT_P (trueop0))
5070 && (CONST_DOUBLE_AS_INT_P (trueop1) || CONST_INT_P (trueop1)))
5072 int width = GET_MODE_PRECISION (mode);
5073 HOST_WIDE_INT l0s, h0s, l1s, h1s;
5074 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
5076 /* Get the two words comprising each integer constant. */
5077 if (CONST_DOUBLE_AS_INT_P (trueop0))
5079 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
5080 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
5082 else
5084 l0u = l0s = INTVAL (trueop0);
5085 h0u = h0s = HWI_SIGN_EXTEND (l0s);
5088 if (CONST_DOUBLE_AS_INT_P (trueop1))
5090 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
5091 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
5093 else
5095 l1u = l1s = INTVAL (trueop1);
5096 h1u = h1s = HWI_SIGN_EXTEND (l1s);
5099 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
5100 we have to sign or zero-extend the values. */
5101 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
5103 l0u &= GET_MODE_MASK (mode);
5104 l1u &= GET_MODE_MASK (mode);
5106 if (val_signbit_known_set_p (mode, l0s))
5107 l0s |= ~GET_MODE_MASK (mode);
5109 if (val_signbit_known_set_p (mode, l1s))
5110 l1s |= ~GET_MODE_MASK (mode);
5112 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
5113 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
5115 if (h0u == h1u && l0u == l1u)
5116 return comparison_result (code, CMP_EQ);
5117 else
5119 int cr;
5120 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
5121 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
5122 return comparison_result (code, cr);
5126 /* Optimize comparisons with upper and lower bounds. */
5127 if (HWI_COMPUTABLE_MODE_P (mode)
5128 && CONST_INT_P (trueop1))
5130 int sign;
5131 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
5132 HOST_WIDE_INT val = INTVAL (trueop1);
5133 HOST_WIDE_INT mmin, mmax;
5135 if (code == GEU
5136 || code == LEU
5137 || code == GTU
5138 || code == LTU)
5139 sign = 0;
5140 else
5141 sign = 1;
5143 /* Get a reduced range if the sign bit is zero. */
5144 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
5146 mmin = 0;
5147 mmax = nonzero;
5149 else
5151 rtx mmin_rtx, mmax_rtx;
5152 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
5154 mmin = INTVAL (mmin_rtx);
5155 mmax = INTVAL (mmax_rtx);
5156 if (sign)
5158 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
5160 mmin >>= (sign_copies - 1);
5161 mmax >>= (sign_copies - 1);
5165 switch (code)
5167 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5168 case GEU:
5169 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5170 return const_true_rtx;
5171 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5172 return const0_rtx;
5173 break;
5174 case GE:
5175 if (val <= mmin)
5176 return const_true_rtx;
5177 if (val > mmax)
5178 return const0_rtx;
5179 break;
5181 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5182 case LEU:
5183 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5184 return const_true_rtx;
5185 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5186 return const0_rtx;
5187 break;
5188 case LE:
5189 if (val >= mmax)
5190 return const_true_rtx;
5191 if (val < mmin)
5192 return const0_rtx;
5193 break;
5195 case EQ:
5196 /* x == y is always false for y out of range. */
5197 if (val < mmin || val > mmax)
5198 return const0_rtx;
5199 break;
5201 /* x > y is always false for y >= mmax, always true for y < mmin. */
5202 case GTU:
5203 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5204 return const0_rtx;
5205 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5206 return const_true_rtx;
5207 break;
5208 case GT:
5209 if (val >= mmax)
5210 return const0_rtx;
5211 if (val < mmin)
5212 return const_true_rtx;
5213 break;
5215 /* x < y is always false for y <= mmin, always true for y > mmax. */
5216 case LTU:
5217 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5218 return const0_rtx;
5219 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5220 return const_true_rtx;
5221 break;
5222 case LT:
5223 if (val <= mmin)
5224 return const0_rtx;
5225 if (val > mmax)
5226 return const_true_rtx;
5227 break;
5229 case NE:
5230 /* x != y is always true for y out of range. */
5231 if (val < mmin || val > mmax)
5232 return const_true_rtx;
5233 break;
5235 default:
5236 break;
5240 /* Optimize integer comparisons with zero. */
5241 if (trueop1 == const0_rtx)
5243 /* Some addresses are known to be nonzero. We don't know
5244 their sign, but equality comparisons are known. */
5245 if (nonzero_address_p (trueop0))
5247 if (code == EQ || code == LEU)
5248 return const0_rtx;
5249 if (code == NE || code == GTU)
5250 return const_true_rtx;
5253 /* See if the first operand is an IOR with a constant. If so, we
5254 may be able to determine the result of this comparison. */
5255 if (GET_CODE (op0) == IOR)
5257 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5258 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5260 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
5261 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5262 && (UINTVAL (inner_const)
5263 & ((unsigned HOST_WIDE_INT) 1
5264 << sign_bitnum)));
5266 switch (code)
5268 case EQ:
5269 case LEU:
5270 return const0_rtx;
5271 case NE:
5272 case GTU:
5273 return const_true_rtx;
5274 case LT:
5275 case LE:
5276 if (has_sign)
5277 return const_true_rtx;
5278 break;
5279 case GT:
5280 case GE:
5281 if (has_sign)
5282 return const0_rtx;
5283 break;
5284 default:
5285 break;
5291 /* Optimize comparison of ABS with zero. */
5292 if (trueop1 == CONST0_RTX (mode)
5293 && (GET_CODE (trueop0) == ABS
5294 || (GET_CODE (trueop0) == FLOAT_EXTEND
5295 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5297 switch (code)
5299 case LT:
5300 /* Optimize abs(x) < 0.0. */
5301 if (!HONOR_SNANS (mode)
5302 && (!INTEGRAL_MODE_P (mode)
5303 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5305 if (INTEGRAL_MODE_P (mode)
5306 && (issue_strict_overflow_warning
5307 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5308 warning (OPT_Wstrict_overflow,
5309 ("assuming signed overflow does not occur when "
5310 "assuming abs (x) < 0 is false"));
5311 return const0_rtx;
5313 break;
5315 case GE:
5316 /* Optimize abs(x) >= 0.0. */
5317 if (!HONOR_NANS (mode)
5318 && (!INTEGRAL_MODE_P (mode)
5319 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5321 if (INTEGRAL_MODE_P (mode)
5322 && (issue_strict_overflow_warning
5323 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5324 warning (OPT_Wstrict_overflow,
5325 ("assuming signed overflow does not occur when "
5326 "assuming abs (x) >= 0 is true"));
5327 return const_true_rtx;
5329 break;
5331 case UNGE:
5332 /* Optimize ! (abs(x) < 0.0). */
5333 return const_true_rtx;
5335 default:
5336 break;
5340 return 0;
5343 /* Simplify CODE, an operation with result mode MODE and three operands,
5344 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5345 a constant. Return 0 if no simplifications is possible. */
5348 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
5349 enum machine_mode op0_mode, rtx op0, rtx op1,
5350 rtx op2)
5352 unsigned int width = GET_MODE_PRECISION (mode);
5353 bool any_change = false;
5354 rtx tem, trueop2;
5356 /* VOIDmode means "infinite" precision. */
5357 if (width == 0)
5358 width = HOST_BITS_PER_WIDE_INT;
5360 switch (code)
5362 case FMA:
5363 /* Simplify negations around the multiplication. */
5364 /* -a * -b + c => a * b + c. */
5365 if (GET_CODE (op0) == NEG)
5367 tem = simplify_unary_operation (NEG, mode, op1, mode);
5368 if (tem)
5369 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5371 else if (GET_CODE (op1) == NEG)
5373 tem = simplify_unary_operation (NEG, mode, op0, mode);
5374 if (tem)
5375 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5378 /* Canonicalize the two multiplication operands. */
5379 /* a * -b + c => -b * a + c. */
5380 if (swap_commutative_operands_p (op0, op1))
5381 tem = op0, op0 = op1, op1 = tem, any_change = true;
5383 if (any_change)
5384 return gen_rtx_FMA (mode, op0, op1, op2);
5385 return NULL_RTX;
5387 case SIGN_EXTRACT:
5388 case ZERO_EXTRACT:
5389 if (CONST_INT_P (op0)
5390 && CONST_INT_P (op1)
5391 && CONST_INT_P (op2)
5392 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5393 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5395 /* Extracting a bit-field from a constant */
5396 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5397 HOST_WIDE_INT op1val = INTVAL (op1);
5398 HOST_WIDE_INT op2val = INTVAL (op2);
5399 if (BITS_BIG_ENDIAN)
5400 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5401 else
5402 val >>= op2val;
5404 if (HOST_BITS_PER_WIDE_INT != op1val)
5406 /* First zero-extend. */
5407 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
5408 /* If desired, propagate sign bit. */
5409 if (code == SIGN_EXTRACT
5410 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
5411 != 0)
5412 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5415 return gen_int_mode (val, mode);
5417 break;
5419 case IF_THEN_ELSE:
5420 if (CONST_INT_P (op0))
5421 return op0 != const0_rtx ? op1 : op2;
5423 /* Convert c ? a : a into "a". */
5424 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5425 return op1;
5427 /* Convert a != b ? a : b into "a". */
5428 if (GET_CODE (op0) == NE
5429 && ! side_effects_p (op0)
5430 && ! HONOR_NANS (mode)
5431 && ! HONOR_SIGNED_ZEROS (mode)
5432 && ((rtx_equal_p (XEXP (op0, 0), op1)
5433 && rtx_equal_p (XEXP (op0, 1), op2))
5434 || (rtx_equal_p (XEXP (op0, 0), op2)
5435 && rtx_equal_p (XEXP (op0, 1), op1))))
5436 return op1;
5438 /* Convert a == b ? a : b into "b". */
5439 if (GET_CODE (op0) == EQ
5440 && ! side_effects_p (op0)
5441 && ! HONOR_NANS (mode)
5442 && ! HONOR_SIGNED_ZEROS (mode)
5443 && ((rtx_equal_p (XEXP (op0, 0), op1)
5444 && rtx_equal_p (XEXP (op0, 1), op2))
5445 || (rtx_equal_p (XEXP (op0, 0), op2)
5446 && rtx_equal_p (XEXP (op0, 1), op1))))
5447 return op2;
5449 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5451 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5452 ? GET_MODE (XEXP (op0, 1))
5453 : GET_MODE (XEXP (op0, 0)));
5454 rtx temp;
5456 /* Look for happy constants in op1 and op2. */
5457 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5459 HOST_WIDE_INT t = INTVAL (op1);
5460 HOST_WIDE_INT f = INTVAL (op2);
5462 if (t == STORE_FLAG_VALUE && f == 0)
5463 code = GET_CODE (op0);
5464 else if (t == 0 && f == STORE_FLAG_VALUE)
5466 enum rtx_code tmp;
5467 tmp = reversed_comparison_code (op0, NULL_RTX);
5468 if (tmp == UNKNOWN)
5469 break;
5470 code = tmp;
5472 else
5473 break;
5475 return simplify_gen_relational (code, mode, cmp_mode,
5476 XEXP (op0, 0), XEXP (op0, 1));
5479 if (cmp_mode == VOIDmode)
5480 cmp_mode = op0_mode;
5481 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5482 cmp_mode, XEXP (op0, 0),
5483 XEXP (op0, 1));
5485 /* See if any simplifications were possible. */
5486 if (temp)
5488 if (CONST_INT_P (temp))
5489 return temp == const0_rtx ? op2 : op1;
5490 else if (temp)
5491 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5494 break;
5496 case VEC_MERGE:
5497 gcc_assert (GET_MODE (op0) == mode);
5498 gcc_assert (GET_MODE (op1) == mode);
5499 gcc_assert (VECTOR_MODE_P (mode));
5500 trueop2 = avoid_constant_pool_reference (op2);
5501 if (CONST_INT_P (trueop2))
5503 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5504 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5505 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5506 unsigned HOST_WIDE_INT mask;
5507 if (n_elts == HOST_BITS_PER_WIDE_INT)
5508 mask = -1;
5509 else
5510 mask = ((unsigned HOST_WIDE_INT) 1 << n_elts) - 1;
5512 if (!(sel & mask) && !side_effects_p (op0))
5513 return op1;
5514 if ((sel & mask) == mask && !side_effects_p (op1))
5515 return op0;
5517 rtx trueop0 = avoid_constant_pool_reference (op0);
5518 rtx trueop1 = avoid_constant_pool_reference (op1);
5519 if (GET_CODE (trueop0) == CONST_VECTOR
5520 && GET_CODE (trueop1) == CONST_VECTOR)
5522 rtvec v = rtvec_alloc (n_elts);
5523 unsigned int i;
5525 for (i = 0; i < n_elts; i++)
5526 RTVEC_ELT (v, i) = ((sel & ((unsigned HOST_WIDE_INT) 1 << i))
5527 ? CONST_VECTOR_ELT (trueop0, i)
5528 : CONST_VECTOR_ELT (trueop1, i));
5529 return gen_rtx_CONST_VECTOR (mode, v);
5532 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5533 if no element from a appears in the result. */
5534 if (GET_CODE (op0) == VEC_MERGE)
5536 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5537 if (CONST_INT_P (tem))
5539 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5540 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5541 return simplify_gen_ternary (code, mode, mode,
5542 XEXP (op0, 1), op1, op2);
5543 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5544 return simplify_gen_ternary (code, mode, mode,
5545 XEXP (op0, 0), op1, op2);
5548 if (GET_CODE (op1) == VEC_MERGE)
5550 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5551 if (CONST_INT_P (tem))
5553 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5554 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5555 return simplify_gen_ternary (code, mode, mode,
5556 op0, XEXP (op1, 1), op2);
5557 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5558 return simplify_gen_ternary (code, mode, mode,
5559 op0, XEXP (op1, 0), op2);
5564 if (rtx_equal_p (op0, op1)
5565 && !side_effects_p (op2) && !side_effects_p (op1))
5566 return op0;
5568 break;
5570 default:
5571 gcc_unreachable ();
5574 return 0;
5577 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
5578 or CONST_VECTOR,
5579 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5581 Works by unpacking OP into a collection of 8-bit values
5582 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5583 and then repacking them again for OUTERMODE. */
5585 static rtx
5586 simplify_immed_subreg (enum machine_mode outermode, rtx op,
5587 enum machine_mode innermode, unsigned int byte)
5589 /* We support up to 512-bit values (for V8DFmode). */
5590 enum {
5591 max_bitsize = 512,
5592 value_bit = 8,
5593 value_mask = (1 << value_bit) - 1
5595 unsigned char value[max_bitsize / value_bit];
5596 int value_start;
5597 int i;
5598 int elem;
5600 int num_elem;
5601 rtx * elems;
5602 int elem_bitsize;
5603 rtx result_s;
5604 rtvec result_v = NULL;
5605 enum mode_class outer_class;
5606 enum machine_mode outer_submode;
5608 /* Some ports misuse CCmode. */
5609 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5610 return op;
5612 /* We have no way to represent a complex constant at the rtl level. */
5613 if (COMPLEX_MODE_P (outermode))
5614 return NULL_RTX;
5616 /* Unpack the value. */
5618 if (GET_CODE (op) == CONST_VECTOR)
5620 num_elem = CONST_VECTOR_NUNITS (op);
5621 elems = &CONST_VECTOR_ELT (op, 0);
5622 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5624 else
5626 num_elem = 1;
5627 elems = &op;
5628 elem_bitsize = max_bitsize;
5630 /* If this asserts, it is too complicated; reducing value_bit may help. */
5631 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5632 /* I don't know how to handle endianness of sub-units. */
5633 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5635 for (elem = 0; elem < num_elem; elem++)
5637 unsigned char * vp;
5638 rtx el = elems[elem];
5640 /* Vectors are kept in target memory order. (This is probably
5641 a mistake.) */
5643 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5644 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5645 / BITS_PER_UNIT);
5646 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5647 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5648 unsigned bytele = (subword_byte % UNITS_PER_WORD
5649 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5650 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5653 switch (GET_CODE (el))
5655 case CONST_INT:
5656 for (i = 0;
5657 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5658 i += value_bit)
5659 *vp++ = INTVAL (el) >> i;
5660 /* CONST_INTs are always logically sign-extended. */
5661 for (; i < elem_bitsize; i += value_bit)
5662 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5663 break;
5665 case CONST_DOUBLE:
5666 if (GET_MODE (el) == VOIDmode)
5668 unsigned char extend = 0;
5669 /* If this triggers, someone should have generated a
5670 CONST_INT instead. */
5671 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5673 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5674 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5675 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5677 *vp++
5678 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5679 i += value_bit;
5682 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5683 extend = -1;
5684 for (; i < elem_bitsize; i += value_bit)
5685 *vp++ = extend;
5687 else
5689 long tmp[max_bitsize / 32];
5690 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5692 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5693 gcc_assert (bitsize <= elem_bitsize);
5694 gcc_assert (bitsize % value_bit == 0);
5696 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5697 GET_MODE (el));
5699 /* real_to_target produces its result in words affected by
5700 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5701 and use WORDS_BIG_ENDIAN instead; see the documentation
5702 of SUBREG in rtl.texi. */
5703 for (i = 0; i < bitsize; i += value_bit)
5705 int ibase;
5706 if (WORDS_BIG_ENDIAN)
5707 ibase = bitsize - 1 - i;
5708 else
5709 ibase = i;
5710 *vp++ = tmp[ibase / 32] >> i % 32;
5713 /* It shouldn't matter what's done here, so fill it with
5714 zero. */
5715 for (; i < elem_bitsize; i += value_bit)
5716 *vp++ = 0;
5718 break;
5720 case CONST_FIXED:
5721 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5723 for (i = 0; i < elem_bitsize; i += value_bit)
5724 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5726 else
5728 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5729 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5730 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5731 i += value_bit)
5732 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5733 >> (i - HOST_BITS_PER_WIDE_INT);
5734 for (; i < elem_bitsize; i += value_bit)
5735 *vp++ = 0;
5737 break;
5739 default:
5740 gcc_unreachable ();
5744 /* Now, pick the right byte to start with. */
5745 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5746 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5747 will already have offset 0. */
5748 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5750 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5751 - byte);
5752 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5753 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5754 byte = (subword_byte % UNITS_PER_WORD
5755 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5758 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5759 so if it's become negative it will instead be very large.) */
5760 gcc_assert (byte < GET_MODE_SIZE (innermode));
5762 /* Convert from bytes to chunks of size value_bit. */
5763 value_start = byte * (BITS_PER_UNIT / value_bit);
5765 /* Re-pack the value. */
5767 if (VECTOR_MODE_P (outermode))
5769 num_elem = GET_MODE_NUNITS (outermode);
5770 result_v = rtvec_alloc (num_elem);
5771 elems = &RTVEC_ELT (result_v, 0);
5772 outer_submode = GET_MODE_INNER (outermode);
5774 else
5776 num_elem = 1;
5777 elems = &result_s;
5778 outer_submode = outermode;
5781 outer_class = GET_MODE_CLASS (outer_submode);
5782 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5784 gcc_assert (elem_bitsize % value_bit == 0);
5785 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5787 for (elem = 0; elem < num_elem; elem++)
5789 unsigned char *vp;
5791 /* Vectors are stored in target memory order. (This is probably
5792 a mistake.) */
5794 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5795 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5796 / BITS_PER_UNIT);
5797 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5798 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5799 unsigned bytele = (subword_byte % UNITS_PER_WORD
5800 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5801 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5804 switch (outer_class)
5806 case MODE_INT:
5807 case MODE_PARTIAL_INT:
5809 unsigned HOST_WIDE_INT hi = 0, lo = 0;
5811 for (i = 0;
5812 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5813 i += value_bit)
5814 lo |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5815 for (; i < elem_bitsize; i += value_bit)
5816 hi |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5817 << (i - HOST_BITS_PER_WIDE_INT);
5819 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5820 know why. */
5821 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5822 elems[elem] = gen_int_mode (lo, outer_submode);
5823 else if (elem_bitsize <= HOST_BITS_PER_DOUBLE_INT)
5824 elems[elem] = immed_double_const (lo, hi, outer_submode);
5825 else
5826 return NULL_RTX;
5828 break;
5830 case MODE_FLOAT:
5831 case MODE_DECIMAL_FLOAT:
5833 REAL_VALUE_TYPE r;
5834 long tmp[max_bitsize / 32];
5836 /* real_from_target wants its input in words affected by
5837 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5838 and use WORDS_BIG_ENDIAN instead; see the documentation
5839 of SUBREG in rtl.texi. */
5840 for (i = 0; i < max_bitsize / 32; i++)
5841 tmp[i] = 0;
5842 for (i = 0; i < elem_bitsize; i += value_bit)
5844 int ibase;
5845 if (WORDS_BIG_ENDIAN)
5846 ibase = elem_bitsize - 1 - i;
5847 else
5848 ibase = i;
5849 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5852 real_from_target (&r, tmp, outer_submode);
5853 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5855 break;
5857 case MODE_FRACT:
5858 case MODE_UFRACT:
5859 case MODE_ACCUM:
5860 case MODE_UACCUM:
5862 FIXED_VALUE_TYPE f;
5863 f.data.low = 0;
5864 f.data.high = 0;
5865 f.mode = outer_submode;
5867 for (i = 0;
5868 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5869 i += value_bit)
5870 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5871 for (; i < elem_bitsize; i += value_bit)
5872 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5873 << (i - HOST_BITS_PER_WIDE_INT));
5875 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5877 break;
5879 default:
5880 gcc_unreachable ();
5883 if (VECTOR_MODE_P (outermode))
5884 return gen_rtx_CONST_VECTOR (outermode, result_v);
5885 else
5886 return result_s;
5889 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5890 Return 0 if no simplifications are possible. */
5892 simplify_subreg (enum machine_mode outermode, rtx op,
5893 enum machine_mode innermode, unsigned int byte)
5895 /* Little bit of sanity checking. */
5896 gcc_assert (innermode != VOIDmode);
5897 gcc_assert (outermode != VOIDmode);
5898 gcc_assert (innermode != BLKmode);
5899 gcc_assert (outermode != BLKmode);
5901 gcc_assert (GET_MODE (op) == innermode
5902 || GET_MODE (op) == VOIDmode);
5904 if ((byte % GET_MODE_SIZE (outermode)) != 0)
5905 return NULL_RTX;
5907 if (byte >= GET_MODE_SIZE (innermode))
5908 return NULL_RTX;
5910 if (outermode == innermode && !byte)
5911 return op;
5913 if (CONST_SCALAR_INT_P (op)
5914 || CONST_DOUBLE_AS_FLOAT_P (op)
5915 || GET_CODE (op) == CONST_FIXED
5916 || GET_CODE (op) == CONST_VECTOR)
5917 return simplify_immed_subreg (outermode, op, innermode, byte);
5919 /* Changing mode twice with SUBREG => just change it once,
5920 or not at all if changing back op starting mode. */
5921 if (GET_CODE (op) == SUBREG)
5923 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5924 int final_offset = byte + SUBREG_BYTE (op);
5925 rtx newx;
5927 if (outermode == innermostmode
5928 && byte == 0 && SUBREG_BYTE (op) == 0)
5929 return SUBREG_REG (op);
5931 /* The SUBREG_BYTE represents offset, as if the value were stored
5932 in memory. Irritating exception is paradoxical subreg, where
5933 we define SUBREG_BYTE to be 0. On big endian machines, this
5934 value should be negative. For a moment, undo this exception. */
5935 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5937 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5938 if (WORDS_BIG_ENDIAN)
5939 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5940 if (BYTES_BIG_ENDIAN)
5941 final_offset += difference % UNITS_PER_WORD;
5943 if (SUBREG_BYTE (op) == 0
5944 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5946 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5947 if (WORDS_BIG_ENDIAN)
5948 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5949 if (BYTES_BIG_ENDIAN)
5950 final_offset += difference % UNITS_PER_WORD;
5953 /* See whether resulting subreg will be paradoxical. */
5954 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5956 /* In nonparadoxical subregs we can't handle negative offsets. */
5957 if (final_offset < 0)
5958 return NULL_RTX;
5959 /* Bail out in case resulting subreg would be incorrect. */
5960 if (final_offset % GET_MODE_SIZE (outermode)
5961 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5962 return NULL_RTX;
5964 else
5966 int offset = 0;
5967 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5969 /* In paradoxical subreg, see if we are still looking on lower part.
5970 If so, our SUBREG_BYTE will be 0. */
5971 if (WORDS_BIG_ENDIAN)
5972 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5973 if (BYTES_BIG_ENDIAN)
5974 offset += difference % UNITS_PER_WORD;
5975 if (offset == final_offset)
5976 final_offset = 0;
5977 else
5978 return NULL_RTX;
5981 /* Recurse for further possible simplifications. */
5982 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5983 final_offset);
5984 if (newx)
5985 return newx;
5986 if (validate_subreg (outermode, innermostmode,
5987 SUBREG_REG (op), final_offset))
5989 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5990 if (SUBREG_PROMOTED_VAR_P (op)
5991 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5992 && GET_MODE_CLASS (outermode) == MODE_INT
5993 && IN_RANGE (GET_MODE_SIZE (outermode),
5994 GET_MODE_SIZE (innermode),
5995 GET_MODE_SIZE (innermostmode))
5996 && subreg_lowpart_p (newx))
5998 SUBREG_PROMOTED_VAR_P (newx) = 1;
5999 SUBREG_PROMOTED_UNSIGNED_SET
6000 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
6002 return newx;
6004 return NULL_RTX;
6007 /* SUBREG of a hard register => just change the register number
6008 and/or mode. If the hard register is not valid in that mode,
6009 suppress this simplification. If the hard register is the stack,
6010 frame, or argument pointer, leave this as a SUBREG. */
6012 if (REG_P (op) && HARD_REGISTER_P (op))
6014 unsigned int regno, final_regno;
6016 regno = REGNO (op);
6017 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
6018 if (HARD_REGISTER_NUM_P (final_regno))
6020 rtx x;
6021 int final_offset = byte;
6023 /* Adjust offset for paradoxical subregs. */
6024 if (byte == 0
6025 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
6027 int difference = (GET_MODE_SIZE (innermode)
6028 - GET_MODE_SIZE (outermode));
6029 if (WORDS_BIG_ENDIAN)
6030 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6031 if (BYTES_BIG_ENDIAN)
6032 final_offset += difference % UNITS_PER_WORD;
6035 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
6037 /* Propagate original regno. We don't have any way to specify
6038 the offset inside original regno, so do so only for lowpart.
6039 The information is used only by alias analysis that can not
6040 grog partial register anyway. */
6042 if (subreg_lowpart_offset (outermode, innermode) == byte)
6043 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
6044 return x;
6048 /* If we have a SUBREG of a register that we are replacing and we are
6049 replacing it with a MEM, make a new MEM and try replacing the
6050 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6051 or if we would be widening it. */
6053 if (MEM_P (op)
6054 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
6055 /* Allow splitting of volatile memory references in case we don't
6056 have instruction to move the whole thing. */
6057 && (! MEM_VOLATILE_P (op)
6058 || ! have_insn_for (SET, innermode))
6059 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
6060 return adjust_address_nv (op, outermode, byte);
6062 /* Handle complex values represented as CONCAT
6063 of real and imaginary part. */
6064 if (GET_CODE (op) == CONCAT)
6066 unsigned int part_size, final_offset;
6067 rtx part, res;
6069 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
6070 if (byte < part_size)
6072 part = XEXP (op, 0);
6073 final_offset = byte;
6075 else
6077 part = XEXP (op, 1);
6078 final_offset = byte - part_size;
6081 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
6082 return NULL_RTX;
6084 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
6085 if (res)
6086 return res;
6087 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
6088 return gen_rtx_SUBREG (outermode, part, final_offset);
6089 return NULL_RTX;
6092 /* A SUBREG resulting from a zero extension may fold to zero if
6093 it extracts higher bits that the ZERO_EXTEND's source bits. */
6094 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
6096 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
6097 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
6098 return CONST0_RTX (outermode);
6101 if (SCALAR_INT_MODE_P (outermode)
6102 && SCALAR_INT_MODE_P (innermode)
6103 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
6104 && byte == subreg_lowpart_offset (outermode, innermode))
6106 rtx tem = simplify_truncation (outermode, op, innermode);
6107 if (tem)
6108 return tem;
6111 return NULL_RTX;
6114 /* Make a SUBREG operation or equivalent if it folds. */
6117 simplify_gen_subreg (enum machine_mode outermode, rtx op,
6118 enum machine_mode innermode, unsigned int byte)
6120 rtx newx;
6122 newx = simplify_subreg (outermode, op, innermode, byte);
6123 if (newx)
6124 return newx;
6126 if (GET_CODE (op) == SUBREG
6127 || GET_CODE (op) == CONCAT
6128 || GET_MODE (op) == VOIDmode)
6129 return NULL_RTX;
6131 if (validate_subreg (outermode, innermode, op, byte))
6132 return gen_rtx_SUBREG (outermode, op, byte);
6134 return NULL_RTX;
6137 /* Simplify X, an rtx expression.
6139 Return the simplified expression or NULL if no simplifications
6140 were possible.
6142 This is the preferred entry point into the simplification routines;
6143 however, we still allow passes to call the more specific routines.
6145 Right now GCC has three (yes, three) major bodies of RTL simplification
6146 code that need to be unified.
6148 1. fold_rtx in cse.c. This code uses various CSE specific
6149 information to aid in RTL simplification.
6151 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6152 it uses combine specific information to aid in RTL
6153 simplification.
6155 3. The routines in this file.
6158 Long term we want to only have one body of simplification code; to
6159 get to that state I recommend the following steps:
6161 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6162 which are not pass dependent state into these routines.
6164 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6165 use this routine whenever possible.
6167 3. Allow for pass dependent state to be provided to these
6168 routines and add simplifications based on the pass dependent
6169 state. Remove code from cse.c & combine.c that becomes
6170 redundant/dead.
6172 It will take time, but ultimately the compiler will be easier to
6173 maintain and improve. It's totally silly that when we add a
6174 simplification that it needs to be added to 4 places (3 for RTL
6175 simplification and 1 for tree simplification. */
6178 simplify_rtx (const_rtx x)
6180 const enum rtx_code code = GET_CODE (x);
6181 const enum machine_mode mode = GET_MODE (x);
6183 switch (GET_RTX_CLASS (code))
6185 case RTX_UNARY:
6186 return simplify_unary_operation (code, mode,
6187 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6188 case RTX_COMM_ARITH:
6189 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6190 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6192 /* Fall through.... */
6194 case RTX_BIN_ARITH:
6195 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6197 case RTX_TERNARY:
6198 case RTX_BITFIELD_OPS:
6199 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6200 XEXP (x, 0), XEXP (x, 1),
6201 XEXP (x, 2));
6203 case RTX_COMPARE:
6204 case RTX_COMM_COMPARE:
6205 return simplify_relational_operation (code, mode,
6206 ((GET_MODE (XEXP (x, 0))
6207 != VOIDmode)
6208 ? GET_MODE (XEXP (x, 0))
6209 : GET_MODE (XEXP (x, 1))),
6210 XEXP (x, 0),
6211 XEXP (x, 1));
6213 case RTX_EXTRA:
6214 if (code == SUBREG)
6215 return simplify_subreg (mode, SUBREG_REG (x),
6216 GET_MODE (SUBREG_REG (x)),
6217 SUBREG_BYTE (x));
6218 break;
6220 case RTX_OBJ:
6221 if (code == LO_SUM)
6223 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6224 if (GET_CODE (XEXP (x, 0)) == HIGH
6225 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6226 return XEXP (x, 1);
6228 break;
6230 default:
6231 break;
6233 return NULL;